id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
92650
|
from armulator.armv6.opcodes.abstract_opcodes.bkpt import Bkpt
from armulator.armv6.opcodes.opcode import Opcode
from bitstring import BitArray
class BkptA1(Bkpt, Opcode):
def __init__(self, instruction):
Opcode.__init__(self, instruction)
Bkpt.__init__(self)
def is_pc_changing_opcode(self):
return False
@staticmethod
def from_bitarray(instr, processor):
imm32 = BitArray(bin="0000000000000000" + instr.bin[12:24] + instr.bin[-4:])
if instr.bin[0:4] != "1110":
print "unpredictable"
else:
return BkptA1(instr)
|
92663
|
from RecoTauTag.RecoTau.pfRecoTauProducerDef_cfi import pfRecoTauProducerDef
pfRecoTauProducer = pfRecoTauProducerDef.clone()
|
92679
|
from django.dispatch import Signal
task_cleanup_signal = Signal(providing_args=['apiview', 'result', 'task_id', 'status', 'obj'])
|
92690
|
from datetime import date
from decimal import Decimal
from openroboadvisor.ledger import Ledger
from openroboadvisor.ledger.account import AccountType
from openroboadvisor.ledger.asset import Currency, Security
from openroboadvisor.ledger.entry import OpenAccount
from openroboadvisor.portfolio.account import Account, EXTERNAL_BANK_ID
from pytest import raises
def test_get_balances_before_account_open() -> None:
account_id = 'test'
ledger = Ledger()
account = Account(
account_id=account_id,
ledger=ledger,
)
with raises(AssertionError, match=r"No ledger account found.*"):
account.get_balances()
def test_get_balances_empty() -> None:
account_id = 'test'
ledger = Ledger()
account = Account(
account_id=account_id,
ledger=ledger,
)
ledger.record(OpenAccount(
account_id=account_id,
account_type=AccountType.BROKERAGE,
entry_date=date(2022, 1, 1)
))
balances = account.get_balances()
assert balances.subaccounts == {}, "Expected empty subaccounts"
assert balances.cash == {}, "Expected no cash"
assert balances.securities == {}, "Expected no securities"
def test_basic_account_functionality() -> None:
account_id = 'test'
ledger = Ledger()
account = Account(
account_id=account_id,
ledger=ledger,
)
ledger.record(OpenAccount(
account_id=account_id,
account_type=AccountType.BROKERAGE,
entry_date=date(2022, 1, 1)
))
ledger.record(OpenAccount(
account_id=EXTERNAL_BANK_ID,
account_type=AccountType.BROKERAGE,
entry_date=date(2022, 1, 1)
))
account.deposit(1000)
balances = account.get_balances()
assert balances.cash == {
Currency('USD'): Decimal(1000),
}
assert balances.securities == {}
assert account.get_fees() == {}
account.buy(
symbol='AAPL',
shares=1,
amount=Decimal('151.32'),
fees=Decimal('9.95'),
)
balances = account.get_balances()
assert balances.cash == {
Currency('USD'): Decimal('838.73'),
}
assert balances.securities == {
Security('AAPL', ): Decimal(1),
}
assert account.get_fees() == {
Currency('USD'): Decimal('9.95'),
}
account.sell(
symbol='AAPL',
shares=1,
amount=Decimal(200),
fees=10,
)
balances = account.get_balances()
assert balances.cash == {
Currency('USD'): Decimal('1028.73'),
}
assert balances.securities == {
Security('AAPL', ): Decimal(0),
}
assert account.get_fees() == {
Currency('USD'): Decimal('19.95'),
}
account.withdraw(
amount=Decimal('1028.73'),
)
balances = account.get_balances()
assert balances.cash == {
Currency('USD'): Decimal(0),
}
assert balances.securities == {
Security('AAPL', ): Decimal(0),
}
assert account.get_fees() == {
Currency('USD'): Decimal('19.95'),
}
|
92763
|
from detectron2.structures import BoxMode
from pathlib import Path
import json
import cv2
SAL_THR = 0.5
def get_assr_dicts(root, mode):
root = Path(root)
json_file = root / f"obj_seg_data_{mode}.json"
list_file = root / f"{mode}_images.txt"
with open(json_file) as f:
imgs_anns = json.load(f)
dataset_dicts = []
for idx, anno in enumerate(imgs_anns):
record = {}
filename = str(root / 'images' / mode / (anno['img'] + '.jpg'))
height, width = cv2.imread(filename).shape[:2]
record["file_name"] = filename
record["image_id"] = idx
record["height"] = height
record["width"] = width
with open(root / 'rank_order' / mode / (anno['img'] + '.json')) as f:
ranker_order = json.load(f)['rank_order']
objs = []
assert len(ranker_order) == len(
anno["object_data"]), "Every box should correspond a rank order"
for rank, obj_anno in zip(ranker_order, anno["object_data"]):
# 这里要过滤一下rank <= 0.5 的 box
if rank > SAL_THR:
obj = {
"bbox": obj_anno['bbox'],
"bbox_mode": BoxMode.XYXY_ABS,
"segmentation": obj_anno['segmentation'],
"category_id": 0,
"gt_rank": int(rank * 10 - 6) # map 0.5~1.0 to 0,1,2,3,4
}
objs.append(obj)
record["annotations"] = objs
dataset_dicts.append(record)
return dataset_dicts
|
92800
|
from django import forms
from .utils import get_coins_list
class ChooseCoinToPayForm(forms.Form):
currency = forms.ChoiceField(choices=get_coins_list(),
widget=forms.RadioSelect(),
label='',
required=True)
|
92836
|
from typing import Any, Dict, Set, cast
from functools import cached_property
from opensanctions.core.dataset import Dataset
from opensanctions.core.source import Source
class Collection(Dataset):
"""A grouping of individual data sources. Data sources are bundled in order
to be more useful for list use."""
TYPE = "collection"
def __init__(self, file_path, config):
super().__init__(self.TYPE, file_path, config)
@cached_property
def datasets(self) -> Set[Dataset]:
datasets: Set[Dataset] = set([self])
for dataset in Dataset.all():
if self.name in dataset.collections:
datasets.update(dataset.datasets)
return datasets
@cached_property
def sources(self) -> Set[Source]:
return set([cast(Source, t) for t in self.datasets if t.TYPE == Source.TYPE])
def to_dict(self) -> Dict[str, Any]:
data = super().to_dict()
data["sources"] = [s.name for s in self.sources]
return data
|
92846
|
from .. import config
config.setup_examples()
import infermedica_api
if __name__ == "__main__":
api: infermedica_api.APIv3Connector = infermedica_api.get_api()
# Prepare the diagnosis request object
request = config.get_example_request_data()
# call triage method
response = api.suggest(**request)
print("\n\n", response)
# Set different suggest_method
request["suggest_method"] = "risk_factors"
# call triage method
response = api.suggest(**request)
print("\n\n", response)
# Set different suggest_method
request["suggest_method"] = "red_flags"
# call triage method
response = api.suggest(**request)
print("\n\n", response)
|
92898
|
import copy
import errno
import os
import logging
import math
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn.parallel import DistributedDataParallel
from .helper import TensorBoardWriter
from .linear_eval import iter_eval_epoch, linear_eval_online, linear_eval_offline
from data import get_loaders_for_trainer
from models import Backbone
from models.heads import SingleLayerLinearHead, TwoLayerLinearHead
from optim import get_optimizer_and_scheduler
import utils
log = logging.getLogger('main')
C = utils.Colorer.instance()
def _unwrap(wrapped_module):
if isinstance(wrapped_module, DistributedDataParallel):
module = wrapped_module.module
else:
module = wrapped_module
return module
def _regression_loss(x, y):
# eps = 1e-6 if torch.is_autocast_enabled() else 1e-12
x = F.normalize(x, p=2, dim=1) #, eps=eps)
y = F.normalize(y, p=2, dim=1) #, eps=eps)
return (2 - 2 * (x * y).sum(dim=1)).view(-1)
class BYOLBasedTrainer:
"""This trainer supports BYOL-like training framework that can be subclassed
by other task-specific trainer classes. To specify a detailed algorithm,
the user should implement Traniner.run().
"""
def __init__(self, cfg, online_network, target_network,
predictor=None, evaluator=None,
train_loader=None, eval_loader=None):
if cfg.train.enabled:
assert train_loader is not None
assert predictor is not None
if cfg.train.enabled and cfg.train.online_eval:
assert eval_loader is not None
assert evaluator is not None
self._modules = {}
self._saving_targets = {}
self.cfg = cfg
self.device = cfg.device
self.online_network = online_network
self.target_network = target_network
self.predictor = predictor
self.evaluator = evaluator
self.xent_loss = nn.CrossEntropyLoss()
self.train_loader = train_loader
self.eval_loader = eval_loader
self._setup_device_and_distributed_parallel(cfg.device)
self.cur_epoch = 0
self.max_epochs = 0
self.max_eval_score = 0.
self.max_eval_epoch = 0
if self.cfg.train.enabled:
self.m_base = self.m = cfg.train.m
self.max_epochs = cfg.train.max_epochs
self.total_global_step = len(train_loader) * cfg.train.max_epochs
self.optimizer, self.scheduler = get_optimizer_and_scheduler(
cfg=self.cfg, mode='train', modules=self._modules, loader=train_loader,
exclude_from_lars=True, module_black_list=['target_network'])
self.scaler = torch.cuda.amp.GradScaler() #init_scale=2**14)
# default init_scale 2**16 will yield invalid gradient in the first interation
self.tb_writer = TensorBoardWriter.init_for_train_from_config(cfg)
else:
self.optimizer, self.scheduler, self.scaler = None, None, None
def __setattr__(self, name, value):
if hasattr(value, 'state_dict') and callable(value.state_dict):
self._saving_targets[name] = value # including optimzers & schedulers
if isinstance(value, nn.Module):
self._modules[name] = value
object.__setattr__(self, name, value)
def run(self):
"""Main training algorithm should be implemented in this method."""
raise NotImplementedError()
@classmethod
def init_from_config(cls, cfg):
train_loader, eval_loader, num_classes = get_loaders_for_trainer(cfg)
online_network = Backbone.init_from_config(cfg)
target_network, predictor, evaluator = None, None, None
if cfg.train.enabled:
target_network = Backbone.init_from_config(cfg)
predictor = TwoLayerLinearHead.init_predictor_from_config(cfg)
evaluator = SingleLayerLinearHead.init_evaluator_from_config(
cfg, num_classes)
return cls(
cfg=cfg,
train_loader=train_loader,
eval_loader=eval_loader,
online_network=online_network,
target_network=target_network,
predictor=predictor,
evaluator=evaluator,
)
def _setup_device_and_distributed_parallel(self, device):
for name, module in self._modules.items():
module = module.to(device)
module = utils.wrap_if_distributed(module, device)
self._modules[name] = module
object.__setattr__(self, name, module)
@torch.no_grad()
def _update_target_network_parameters(self):
"""
Momentum update of the key encoder
"""
for param_q, param_k in zip(self.online_network.parameters(),
self.target_network.parameters()):
param_k.data = param_k.data * self.m + param_q.data * (1. - self.m)
def _decay_ema_momentum(self, step):
self.m = (1 - (1 - self.m_base) *
(math.cos(math.pi * step / self.total_global_step) + 1) / 2)
@staticmethod
def _criterion(p_online, p_target):
"""Regression loss used in BYOL."""
p_online_v1, p_online_v2 = p_online.chunk(2)
p_target_v1, p_target_v2 = p_target.chunk(2)
assert p_online_v1.size(0) == p_online_v2.size(0)
assert p_target_v1.size(0) == p_target_v2.size(0)
assert p_online_v1.size(0) == p_target_v1.size(0)
# symmetric loss
loss = _regression_loss(p_online_v1, p_target_v2)
loss += _regression_loss(p_online_v2, p_target_v1)
return loss.mean()
def _initialize_target_network(self, from_online):
# init momentum network as encoder net
for param_q, param_k in zip(self.online_network.parameters(),
self.target_network.parameters()):
if from_online:
param_k.data.copy_(param_q.data) # initialize
param_k.requires_grad = False # not update by gradient
def _save_checkpoint(self, tag):
save_path = f"{self.cfg.save_dir}/checkpoint_" + str(tag) + ".pth"
state_dict = {
'tag': str(tag),
'epoch': self.cur_epoch,
'max_eval_score': self.max_eval_score,
'max_eval_epoch': self.max_eval_epoch,
}
for key, target in self._saving_targets.items():
if self.cfg.fake_checkpoint:
target = "fake_state_dict"
else:
target = utils.unwrap_if_distributed(target)
target = target.state_dict()
state_dict[f"{key}_state_dict"] = target
torch.save(state_dict, save_path)
suffix = (C.debug(" (fake_checkpoint)")
if self.cfg.fake_checkpoint else "")
return save_path + suffix
def save_checkpoint(self, epoch):
save_path = self._save_checkpoint(str(epoch))
log.info(f"[Save] restore the model's checkpoint: {save_path}")
return save_path
def save_best_checkpoint(self):
save_path = self._save_checkpoint('best')
log.info(f"[Save] restore the best model's checkpoint: {save_path}")
return save_path
def symlink_checkpoint_with_tag(self, epoch, tag):
save_path = f"{self.cfg.save_dir}/checkpoint_{epoch}.pth"
symlink_path = f"{self.cfg.save_dir}/checkpoint_{tag}.pth"
if not os.path.exists(save_path):
self._save_checkpoint(epoch)
try:
os.symlink(os.path.abspath(save_path), symlink_path)
except OSError as e:
if e.errno == errno.EEXIST:
os.remove(symlink_path)
os.symlink(os.path.abspath(save_path), symlink_path)
else:
raise e
finally:
log.info(f"[Save] make a symlink of the current model: "
f"{symlink_path}")
return symlink_path
def load_checkpoint_if_available(self, tag='last'):
if self.cfg.overwrite:
assert not self.cfg.load_dir, \
"Mutually exclusive aruguements: overwrite, load_dir."
log.warning("Overwrite checkpoints in save_dir.")
return False
try:
load_dir = self.cfg.load_dir or self.cfg.save_dir
load_path = f"{load_dir}/checkpoint_{tag}.pth"
state_dict = torch.load(load_path)
except FileNotFoundError:
if self.cfg.load_dir:
raise FileNotFoundError(f"Can't find checkpoint at {load_dir}")
else:
log.warning(f'No checkpoint to resume from {load_dir}.')
return False
self.cur_epoch = state_dict['epoch']
self.max_eval_score = state_dict['max_eval_score']
self.max_eval_epoch = state_dict['max_eval_epoch']
state_dict = {k[:-len('_state_dict')]: v for k, v in state_dict.items()
if k.endswith('_state_dict')}
log.info(f"[Resume] Loaded chekpoint (epoch: {self.cur_epoch}) "
f"from: {load_path}")
missing_keys = set(self._saving_targets.keys()) - set(state_dict.keys())
unexpected_keys = set(state_dict.keys()) - set(self._saving_targets.keys())
assert len(missing_keys) == 0, "Missing keys!"
log.info("[Resume] Redundant keys: "
f"{list(unexpected_keys) if unexpected_keys else 'None'}")
for key, target in self._saving_targets.items():
if state_dict[key] == 'fake_state_dict':
log.info(f"[Resume] Loaded {key}: {C.debug('(fake_chekpoint)')}")
else:
kwargs = {'strict': False} if isinstance(target, nn.Module) else {}
loaded = _unwrap(target).load_state_dict(state_dict[key], **kwargs)
if isinstance(target, nn.Module):
assert len(loaded.missing_keys) == 0
if isinstance(target, Backbone):
# the projector is be ignored in evaluation-only cases
assert all([key.startswith('projector.')
for key in loaded.unexpected_keys])
log.info(f"[Resume] Loaded {key}")
return True
|
92963
|
from typing import Optional, List
from seedwork.domain.entities import Aggregate
from seedwork.domain.value_objects import UUID
from modules.iam.domain.value_objects import Session
ANONYMOUS_ID = UUID("00000000-0000-0000-0000-000000000000")
class User(Aggregate):
id: UUID
username: str
email: str = ""
hashed_password: str = ""
first_name: Optional[str] = ""
last_name: Optional[str] = ""
def change_main_attributes(
self,
username: str = None,
first_name: str = None,
last_name: str = None,
email: str = None,
):
if username:
self.username = username
if first_name:
self.first_name = first_name
if last_name:
self.last_name = last_name
if email:
self.email = email
def activate(self):
# TODO: maybe later
...
def deactivate(self):
# TODO: maybe later
...
def is_disabled(self):
return False
def is_anonymous(self):
return self.id == ANONYMOUS_ID
def is_active(self):
return not self.is_anonymous() and not self.is_disabled()
@classmethod
def Anonymous(cls):
return User(
id=ANONYMOUS_ID,
username="anonymous",
)
|
92981
|
from django.dispatch import Signal
handler_add = Signal(providing_args=["user"])
view_init = Signal(providing_args=["user"])
|
92983
|
from torchtext.data.datasets_utils import (
_RawTextIterableDataset,
_wrap_split_argument,
_add_docstring_header,
_download_extract_validate,
_create_dataset_directory,
_create_data_from_iob,
)
import os
import logging
URL = {
'train': "https://www.clips.uantwerpen.be/conll2000/chunking/train.txt.gz",
'test': "https://www.clips.uantwerpen.be/conll2000/chunking/test.txt.gz",
}
MD5 = {
'train': "6969c2903a1f19a83569db643e43dcc8",
'test': "a916e1c2d83eb3004b38fc6fcd628939",
}
NUM_LINES = {
'train': 8936,
'test': 2012,
}
_EXTRACTED_FILES = {
'train': 'train.txt',
'test': 'test.txt'
}
_EXTRACTED_FILES_MD5 = {
'train': "2e2f24e90e20fcb910ab2251b5ed8cd0",
'test': "56944df34be553b72a2a634e539a0951"
}
DATASET_NAME = "CoNLL2000Chunking"
@_add_docstring_header(num_lines=NUM_LINES)
@_create_dataset_directory(dataset_name=DATASET_NAME)
@_wrap_split_argument(('train', 'test'))
def CoNLL2000Chunking(root, split):
# Create a dataset specific subfolder to deal with generic download filenames
root = os.path.join(root, 'conll2000chunking')
path = os.path.join(root, split + ".txt.gz")
data_filename = _download_extract_validate(root, URL[split], MD5[split], path, os.path.join(root, _EXTRACTED_FILES[split]),
_EXTRACTED_FILES_MD5[split], hash_type="md5")
logging.info('Creating {} data'.format(split))
return _RawTextIterableDataset(DATASET_NAME, NUM_LINES[split],
_create_data_from_iob(data_filename, " "))
|
92990
|
class Solution:
def minSteps(self, n: int) -> int:
res, m = 0, 2
while n > 1:
while n % m == 0:
res += m
n //= m
m += 1
return res
|
93050
|
import pytest
from django.contrib.auth.models import User
from django.urls import reverse
from rest_framework.test import APIClient
from tests.test_data import DEFAULT_PASSWORD
def test_valid_rest_login(client: APIClient, user: User):
res = client.post(
reverse("rest_login"), {"email": user.email, "password": <PASSWORD>_PASSWORD}
)
assert res.status_code == 200
assert res.data["token"] is not None
returned_user = res.data["user"]
assert returned_user["username"] == user.email
assert returned_user["email"] == user.email
assert returned_user["first_name"] == user.first_name
assert returned_user["last_name"] == user.last_name
assert returned_user["zipcode"] == user.profile.zipcode
# Commented out until email confirmation is required again
#
# @pytest.mark.django_db
# def test_unverified_email_rest_login(client: test.Client, user: User):
# EmailAddress.objects.filter(email=user.email).update(verified=False)
#
# res = client.post(
# reverse("rest_login"), {"email": user.email, "password": <PASSWORD>}
# )
#
# assert res.status_code == 400
# assert "Email has not been verified" in res.data["error"]
def test_invalid_pass_rest_login(client: APIClient, user: User):
res = client.post(
reverse("rest_login"), {"email": user.email, "password": "<PASSWORD>"}
)
assert res.status_code == 400
assert res.data["error"] == "The email or password you entered is incorrect!"
def test_invalid_username_rest_login(client: APIClient, user: User):
res = client.post(
reverse("rest_login"), {"email": "<EMAIL>", "password": user.username}
)
assert res.status_code == 400
assert res.data["error"] == "The email or password you entered is incorrect!"
|
93072
|
from django.forms import widgets
from django.core.validators import RegexValidator
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
from orchestra.api.serializers import SetPasswordHyperlinkedSerializer
from orchestra.contrib.accounts.serializers import AccountSerializerMixin
from orchestra.core import validators
from .models import SaaS
class SaaSSerializer(AccountSerializerMixin, SetPasswordHyperlinkedSerializer):
data = serializers.DictField(required=False)
password = serializers.CharField(write_only=True, required=False,
style={'widget': widgets.PasswordInput},
validators=[
validators.validate_password,
RegexValidator(r'^[^"\'\\]+$',
_('Enter a valid password. '
'This value may contain any ascii character except for '
' \'/"/\\/ characters.'), 'invalid'),
])
class Meta:
model = SaaS
fields = ('url', 'id', 'name', 'service', 'is_active', 'data', 'password')
postonly_fields = ('name', 'service', 'password')
|
93079
|
import numpy as np
import cv2
from matplotlib import pyplot as plt
import glob # for iterating through directories
import sys
import os
path_training_items = '/home/amilan/ownCloud/ARChallenge2017/acrv_apc_2017_data/data/items/Training_items/'
#path_training_items = '/home/amilan/ownCloud/ARChallenge2017/acrv_apc_2017_data/data/items/Unseen_items/'
#img = cv2.imread('ballons.jpg')
#img = cv2.imread('/home/amilan/ownCloud/ARChallenge2017/acrv_apc_2017_data/data/items/Training_items/Balloons/Balloons_Bottom-Side_01.png');
#img = cv2.imread('/home/amilan/ownCloud/ARChallenge2017/acrv_apc_2017_data/data/items/Training_items/Bath_Sponge/Bath_Sponge_Bottom-Side_01.png');
#img = cv2.imread('/home/amilan/ownCloud/ARChallenge2017/acrv_apc_2017_data/data/items/Training_items/Flashlight/Flashlight_Bottom-Side_01.png');
#img = cv2.imread('/home/amilan/ownCloud/ARChallenge2017/acrv_apc_2017_data/data/items/Unseen_items/Bunny_Book/Bunny_Book_Bottom-Side_01.png');
for dirname in glob.iglob(os.path.join(path_training_items,'*')):
#for filename in glob.iglob(os.path.join(dirname
item_name = os.path.split(dirname)[-1]
#print os.path.join(dirname,item_name +'*.png')
for filename in glob.iglob(os.path.join(dirname,item_name +'*.png')):
print("Reading %s" % filename)
basefilename = os.path.basename(filename)
img = cv2.imread(filename);
height, width = img.shape[:2]
# downsize 10x for faster processing
img = cv2.resize(img, (np.int(0.1*width), np.int(0.1*height)), interpolation = cv2.INTER_CUBIC)
mask = np.zeros(img.shape[:2],np.uint8)
mask[:] = 255
bgdModel = np.zeros((1,65),np.float64)
fgdModel = np.zeros((1,65),np.float64)
height_small, width_small = img.shape[:2]
brd = 5 # border width for background
rect = (brd,brd,width_small-2*brd,height_small-2*brd) # leave 10 px border
print "Running GrabCut..."
# mask is filled with
# 0 - an obvious background pixels
# 1 - an obvious foreground (object) pixel
# 2 - a possible background pixel
# 3 - a possible foreground pixel
cv2.grabCut(img,mask,rect,bgdModel,fgdModel,5,cv2.GC_INIT_WITH_RECT)
print "Done!"
mask2 = np.where((mask==2)|(mask==0),0,1).astype('uint8')
img = img*mask2[:,:,np.newaxis]
# img = np.where((mask==2)|(mask==0),128,255)
mask_object = np.where((mask==2)|(mask==0),0,255).astype('uint8')
mask_object = cv2.resize(mask_object, (width, height), interpolation = cv2.INTER_NEAREST )
writefile = os.path.join(dirname, 'mask_' + basefilename)
cv2.imwrite(writefile,mask_object)
print("Writing %s" % writefile)
#plt.imshow(img),plt.colorbar(),plt.show()
|
93100
|
import ctypes
import json
import html2text
import logging
import pprint
import random
import requests
import sys
import time
import traceback
from DDPClient import DDPClient
from multiprocessing import Manager
from multiprocessing.dummy import Process
from six.moves.urllib import parse
from will import settings
from will.abstractions import Event, Message, Person, Channel
from will.mixins import SleepMixin, StorageMixin
from will.utils import Bunch, UNSURE_REPLIES, clean_for_pickling
from .base import IOBackend
class RocketChatBackend(IOBackend, StorageMixin):
friendly_name = "RocketChat"
internal_name = "will.backends.io_adapters.rocketchat"
required_settings = [
{
"name": "ROCKETCHAT_USERNAME",
"obtain_at": """1. Go to your rocket.chat instance (i.e. your-name.rocket.chat)
2. Create a new normal account for Will.
3. Set this value to the username, just like you'd use to log in with it.""",
},
{
"name": "ROCKETCHAT_PASSWORD",
"obtain_at": """1. Go to your rocket.chat instance (i.e. your-name.rocket.chat)
2. Create a new normal account for Will, and note the password you use.
3. Set this value to that password, just like you'd use to log in with it.""",
},
{
"name": "ROCKETCHAT_URL",
"obtain_at": (
"This is your rocket.chat url - typically either your-name.rocket.chat for "
"Rocket.Chat cloud, or something like http://localhost:3000 for local installations."
),
},
]
pp = pprint.PrettyPrinter(indent=4)
def normalize_incoming_event(self, event):
logging.info('Normalizing incoming Rocket.Chat event')
logging.debug('event: {}'.format(self.pp.pformat(event)))
if event["type"] == "message":
# Were we mentioned?
will_is_mentioned = False
for mention in event['mentions']:
if mention['username'] == self.me.handle:
will_is_mentioned = True
break
# Handle direct messages, which in Rocket.Chat are a rid
# made up of both users' _ids.
is_private_chat = False
if self.me.id in event["rid"]:
is_private_chat = True
# Create a "Channel" to align with Rocket.Chat DM
# paradigm. There might well be a better way of doing
# this. See TODO in _rest_channels_list.
sender_id = event['u']['_id']
ids = [sender_id, self.me.id]
ids.sort()
channel_id = '{}{}'.format(*ids)
sender = self.people[sender_id]
channel_members = {}
channel_members[sender_id] = sender
channel_members[self.me.id] = self.me
channel = Channel(
id=channel_id,
name=channel_id,
source=clean_for_pickling(channel_id),
members=channel_members
)
else:
if "rid" in event and event["rid"] in self.channels:
channel = clean_for_pickling(self.channels[event["rid"]])
else:
# Private channel, unknown members. Just do our best and try to route it.
if "rid" in event:
channel = Channel(
id=event["rid"],
name=event["rid"],
source=clean_for_pickling(event["rid"]),
members={}
)
logging.debug('channel: {}'.format(channel))
# Set various variables depending on whether @handle was
# part of the message.
interpolated_handle = "@%s " % self.handle
logging.debug('interpolated_handle: {}'
.format(interpolated_handle))
is_direct = False
if is_private_chat or event['msg'].startswith(interpolated_handle):
is_direct = True
# Strip my handle from the start. NB Won't strip it from
# elsewhere in the text, and won't strip other mentions.
# This will stop regexes from working, not sure if it's a
# feature or a bug.
if event['msg'].startswith(interpolated_handle):
event['msg'] = event['msg'][len(interpolated_handle):].strip()
if interpolated_handle in event['msg']:
will_is_mentioned = True
# Determine if Will said it.
logging.debug('self.people: {}'.format(self.pp.pformat(self.people)))
sender = self.people[event['u']['_id']]
logging.debug('sender: {}'.format(sender))
if sender['handle'] == self.me.handle:
logging.debug('Will said it')
will_said_it = True
else:
logging.debug('Will didnt say it')
will_said_it = False
m = Message(
content=event['msg'],
type=event.type,
is_direct=is_direct,
is_private_chat=is_private_chat,
is_group_chat=not is_private_chat,
backend=self.internal_name,
sender=sender,
channel=channel,
will_is_mentioned=will_is_mentioned,
will_said_it=will_said_it,
backend_supports_acl=True,
original_incoming_event=clean_for_pickling(event)
)
return m
else:
logging.debug('Passing, I dont know how to normalize this event of type ', event["type"])
pass
def handle_outgoing_event(self, event):
# Print any replies.
logging.info('Handling outgoing Rocket.Chat event')
logging.debug('event: {}'.format(self.pp.pformat(event)))
if event.type in ["say", "reply"]:
if "kwargs" in event and "html" in event.kwargs and event.kwargs["html"]:
event.content = html2text.html2text(event.content)
self.send_message(event)
if hasattr(event, "source_message") and event.source_message:
pass
else:
# Backend needs to provide ways to handle and properly route:
# 1. 1-1 messages
# 2. Group (channel) messages
# 3. Ad-hoc group messages (if they exist)
# 4. Messages that have a channel/room explicitly specified that's different than
# where they came from.
# 5. Messages without a channel (Fallback to ROCKETCHAT_DEFAULT_CHANNEL) (messages that don't have a room )
kwargs = {}
if "kwargs" in event:
kwargs.update(**event.kwargs)
if event.type in ["topic_change", ]:
self.set_topic(event.content)
elif (
event.type == "message.no_response" and
event.data.is_direct and
event.data.will_said_it is False
):
event.content = random.choice(UNSURE_REPLIES)
self.send_message(event)
def set_topic(self, event):
logging.warn("Rocket.Chat doesn't support topics yet: https://github.com/RocketChat/Rocket.Chat/issues/328")
event.content("Hm. Looks like Rocket.Chat doesn't support topics yet: https://github.com/RocketChat/Rocket.Chat/issues/328")
self.send_message(event)
def send_message(self, event):
logging.info('Sending message to Rocket.Chat')
logging.debug('event: {}'.format(self.pp.pformat(event)))
data = {}
if hasattr(event, "kwargs"):
logging.debug('event.kwargs: {}'.format(event.kwargs))
data.update(event.kwargs)
# TODO: Go through the possible attachment parameters at
# https://rocket.chat/docs/developer-guides/rest-api/chat/postmessage
# - this is a bare minimum inspired by slack.py
if 'color' in event.kwargs:
data.update({
"attachments": [
{
'color': event.kwargs["color"],
'text': event.content,
}
],
})
else:
data.update({
'text': event.content,
})
else:
# I haven't seen this yet, not sure when it's relevant.
# 'text' was wrongly set to 'msg' and nothing blew up. ;)
logging.debug("event doesn't have kwargs")
data.update({
'text': event.content,
})
if "source_message" in event:
if hasattr(event.source_message, "data"):
data['roomId'] = event.source_message.data.channel.id
else:
data['roomId'] = event.source_message.channel.id
else:
data['roomId'] = event.data['source'].data.channel.id
self._rest_post_message(data)
def _get_rest_metadata(self):
self._rest_users_list()
self._rest_channels_list()
def _get_realtime_metadata(self):
self._realtime_get_rooms()
# REST API functions, documented at
# https://rocket.chat/docs/developer-guides/rest-api/
def _rest_login(self):
params = {'username': settings.ROCKETCHAT_USERNAME,
'password': settings.ROCKETCHAT_PASSWORD}
r = requests.post('{}login'.format(self.rocketchat_api_url),
data=params)
resp_json = r.json()
self._token = resp_json['data']['authToken']
self.save("WILL_ROCKETCHAT_TOKEN", self._token)
self._userid = resp_json['data']['userId']
self.save("WILL_ROCKETCHAT_USERID", self._userid)
def _rest_users_list(self):
logging.debug('Getting users list from Rocket.Chat')
# Remember to paginate. ;)
count = 50
passes = 0
headers = {'X-Auth-Token': self.token,
'X-User-Id': self.userid}
fetched = 0
total = 0
self.handle = settings.ROCKETCHAT_USERNAME
self.mention_handle = "@%s" % settings.ROCKETCHAT_USERNAME
people = {}
while fetched <= total:
params = {'count': count,
'offset': fetched}
r = requests.get('{}users.list'.format(self.rocketchat_api_url),
headers=headers,
params=params)
resp_json = r.json()
if resp_json['success'] is False:
logging.exception('resp_json: {}'.format(resp_json))
total = resp_json['total']
for user in resp_json['users']:
# TODO: Unlike slack.py, no timezone support at present.
# RC returns utcOffset, but this isn't enough to
# determine timezone.
# TODO: Pickle error if timezone set to UTC, and I didn't
# have a chance to report it. Using GMT as a poor substitute.
person = Person(
id=user['_id'],
handle=user['username'],
mention_handle="@%s" % user["username"],
source=clean_for_pickling(user)['username'],
name=user['name'],
timezone='GMT'
)
people[user['_id']] = person
if user['username'] == self.handle:
self.me = person
passes += 1
fetched = count * passes
self.people = people
def _get_userid_from_username(self, username):
if username is None:
raise TypeError("No username given")
for id, data in self.people.items():
if data['handle'] == username:
return id
def _rest_channels_list(self):
logging.debug('Getting channel list from Rocket.Chat')
# Remember to paginate. ;)
count = 50
passes = 0
headers = {'X-Auth-Token': self.token,
'X-User-Id': self.userid}
fetched = 0
total = 0
channels = {}
while fetched <= total:
r = requests.get('{}channels.list'.format(self.rocketchat_api_url),
headers=headers)
resp_json = r.json()
total = resp_json['total']
for channel in resp_json['channels']:
members = {}
for username in channel['usernames']:
userid = self._get_userid_from_username(username)
members[userid] = self.people[userid]
channels[channel['_id']] = Channel(
id=channel['_id'],
name=channel['name'],
source=clean_for_pickling(channel),
members=members
)
passes += 1
fetched = count * passes
self.channels = channels
def _rest_post_message(self, data):
logging.info('Posting message to Rocket.Chat REST API')
logging.debug('data: {}'.format(data))
headers = {
'X-Auth-Token': self.token,
'X-User-Id': self.userid
}
logging.debug('headers: {}'.format(headers))
r = requests.post(
'{}chat.postMessage'.format(self.rocketchat_api_url),
headers=headers,
data=data,
)
resp_json = r.json()
# TODO: Necessary / useful to check return codes?
if not 'success' in resp_json:
logging.debug('resp_json: {}'.format(resp_json))
assert resp_json['success']
# Realtime API functions, documented at
# https://rocket.chat/docs/developer-guides/realtime-api/
def _start_connect(self):
up = parse.urlparse(settings.ROCKETCHAT_URL)
if up.scheme == 'http':
ws_proto = 'ws'
else:
ws_proto = 'wss'
self.rc = DDPClient('{}://{}/websocket'.format(ws_proto, up.netloc), auto_reconnect=True, auto_reconnect_timeout=1)
self.rc.on('connected', self._realtime_login)
self.rc.on('changed', self._changed_callback)
self.rc.connect()
def _realtime_login(self):
params = [{'user': {'username': settings.ROCKETCHAT_USERNAME}, 'password': settings.<PASSWORD>}]
self.rc.call('login', params, self._login_callback)
def _login_callback(self, error, result):
logging.debug('_login_callback')
if error:
logging.exception('error: {}'.format(error))
return
logging.debug('result: {}'.format(result))
logging.debug('self.token: {}'.format(self.token))
logging.debug('self.userid: {}'.format(self.userid))
# Use dummy to make it a Thread, otherwise DDP events don't
# get back to the right place. If there is a real need to make
# it a real Process, it is probably just a matter of using
# multiprocessing.Value(s) in the right place(s).
# TODO: Could this be the reason for the 100% CPU usage?
# Have asked in #development.
self.update_thread = Process(target=self._get_updates)
self.update_thread.start()
def _changed_callback(self, collection, _id, fields, cleared):
logging.debug('_changed_callback')
logging.debug('collection: {}'.format(collection))
logging.debug('id: {}'.format(_id))
logging.debug('fields: {}'.format(self.pp.pformat(fields)))
logging.debug('cleared: {}'.format(cleared))
event = Event(type='message', version=1, **fields['args'][0])
self.handle_incoming_event(event)
def _stream_room_message_callback(self, error, event):
logging.debug('_stream_room_message_callback')
if error:
logging.exception('error: {}'.format(error))
return
@property
def token(self):
if not hasattr(self, "_token") or not self._token:
self._token = self.load("WILL_ROCKETCHAT_TOKEN", None)
if not self._token:
self._rest_login()
return self._token
@property
def userid(self):
if not hasattr(self, "_userid") or not self._userid:
self._userid = self.load("WILL_ROCKETCHAT_USERID", None)
if not self._userid:
self._rest_login()
return self._userid
@property
def rocketchat_api_url(self):
if settings.ROCKETCHAT_URL.endswith("/"):
return settings.ROCKETCHAT_URL + 'api/v1/'
else:
return settings.ROCKETCHAT_URL + '/api/v1/'
# Gets updates from REST and Realtime APIs.
def _get_updates(self):
try:
polling_interval_seconds = 5
self._get_rest_metadata()
while True:
# Update channels/people/me/etc.
self._get_rest_metadata()
self._get_realtime_metadata()
time.sleep(polling_interval_seconds)
except (KeyboardInterrupt, SystemExit):
pass
except:
logging.critical("Error in watching RocketChat API: \n%s" % traceback.format_exc())
# Use this to get a list of all rooms that we are in.
# https://rocket.chat/docs/developer-guides/realtime-api/the-room-object
def _realtime_get_rooms(self):
params = [{'$date': 0}]
self.rc.call('rooms/get', params, self._get_rooms_callback)
def _get_rooms_callback(self, error, result):
logging.debug('_get_rooms_callback')
if error:
logging.exception('_get_rooms_callback error: {}'.format(error))
return
# TODO: When we leave a room, we don't delete it from
# self.subscribed_rooms. Not a problem in practice -
# subscriptions to the room won't fire, but messy.
for room in result:
logging.debug('room: {}'.format(room))
if room['_id'] not in self.subscribed_rooms:
self.rc.subscribe('stream-room-messages', [room['_id']],
self._stream_room_message_callback)
self.subscribed_rooms[room['_id']] = True
def bootstrap(self):
# Bootstrap must provide a way to to have:
# a) self.normalize_incoming_event fired, or incoming events put into self.incoming_queue
# b) any necessary threads running for a)
# c) self.me (Person) defined, with Will's info
# d) self.people (dict of People) defined, with everyone in an organization/backend
# e) self.channels (dict of Channels) defined, with all available channels/rooms.
# Note that Channel asks for users, a list of People.
# f) A way for self.handle, self.me, self.people, and self.channels to be kept accurate,
# with a maximum lag of 60 seconds.
self.subscribed_rooms = {}
# Gets and stores token and ID.
self._rest_login()
# Kicks off listeners and REST room polling.
self._start_connect()
|
93147
|
import base64
from typing import Optional
from cryptography.hazmat.primitives.ciphers.aead import AESGCM
from fidesops.core.config import config
from fidesops.util.cryptographic_util import bytes_to_b64_str
def encrypt_to_bytes_verify_secrets_length(
plain_value: Optional[str], key: bytes, nonce: bytes
) -> bytes:
"""Encrypts the value using the AES GCM Algorithm. Note that provided nonce must be 12 bytes.
Returns encrypted value in bytes"""
verify_nonce(nonce)
verify_encryption_key(key)
return _encrypt_to_bytes(plain_value, key, nonce)
def _encrypt_to_bytes(plain_value: Optional[str], key: bytes, nonce: bytes) -> bytes:
"""Encrypts the value using the AES GCM Algorithm. Note that provided nonce must be 12 bytes.
Returns encrypted value in bytes"""
if plain_value is None:
raise ValueError("plain_value cannot be null")
gcm = AESGCM(key)
value_bytes = plain_value.encode(config.security.ENCODING)
encrypted_bytes = gcm.encrypt(nonce, value_bytes, nonce)
return encrypted_bytes
def encrypt_verify_secret_length(
plain_value: Optional[str], key: bytes, nonce: bytes
) -> str:
"""Encrypts the value using the AES GCM Algorithm, with secret length verification.
Returns encrypted value as a string"""
encrypted: bytes = encrypt_to_bytes_verify_secrets_length(plain_value, key, nonce)
return bytes_to_b64_str(encrypted)
def encrypt(plain_value: Optional[str], key: bytes, nonce: bytes) -> str:
"""Encrypts the value using the AES GCM Algorithm, without secret length verification.
Returns encrypted value as a string"""
encrypted: bytes = _encrypt_to_bytes(plain_value, key, nonce)
return bytes_to_b64_str(encrypted)
def decrypt_combined_nonce_and_message(encrypted_value: str, key: bytes) -> str:
"""Decrypts a message when the nonce has been packaged together with the message"""
verify_encryption_key(key)
gcm = AESGCM(key)
encrypted_combined: bytes = base64.b64decode(encrypted_value)
# Separate the nonce out as the first 12 characters of the combined message
nonce: bytes = encrypted_combined[0 : config.security.AES_GCM_NONCE_LENGTH]
encrypted_message: bytes = encrypted_combined[
config.security.AES_GCM_NONCE_LENGTH :
]
decrypted_bytes: bytes = gcm.decrypt(nonce, encrypted_message, nonce)
decrypted_str = decrypted_bytes.decode(config.security.ENCODING)
return decrypted_str
def decrypt(encrypted_value: str, key: bytes, nonce: bytes) -> str:
"""Decrypts the value using the AES GCM Algorithm"""
verify_encryption_key(key)
verify_nonce(nonce)
gcm = AESGCM(key)
encrypted_bytes = base64.b64decode(encrypted_value)
decrypted_bytes = gcm.decrypt(nonce, encrypted_bytes, nonce)
decrypted_str = decrypted_bytes.decode(config.security.ENCODING)
return decrypted_str
def verify_nonce(nonce: bytes) -> None:
if len(nonce) != config.security.AES_GCM_NONCE_LENGTH:
raise ValueError(
f"Nonce must be {config.security.AES_GCM_NONCE_LENGTH} bytes long"
)
def verify_encryption_key(key: bytes) -> None:
if len(key) != config.security.AES_ENCRYPTION_KEY_LENGTH:
raise ValueError(
f"Encryption key must be {config.security.AES_ENCRYPTION_KEY_LENGTH} bytes long"
)
|
93166
|
import torch
import unittest
from qtorch.quant import *
from qtorch import FixedPoint, BlockFloatingPoint, FloatingPoint
DEBUG = False
log = lambda m: print(m) if DEBUG else False
class TestStochastic(unittest.TestCase):
"""
invariant: quantized numbers cannot be greater than the maximum representable number
or lower than the maximum representable number
"""
def test_fixed(self):
"""test fixed point clamping"""
for d in ["cpu", "cuda"]:
for r in ["stochastic", "nearest"]:
wl = 5
fl = 4
t_min = -(2 ** (wl - fl - 1))
t_max = 2 ** (wl - fl - 1) - 2 ** (-fl)
a = torch.linspace(-2, 2, steps=100, device=d)
clamp_a = fixed_point_quantize(a, wl=wl, fl=fl, clamp=True, rounding=r)
self.assertEqual(t_max, clamp_a.max().item())
self.assertEqual(t_min, clamp_a.min().item())
a = torch.linspace(-2, 2, steps=100, device=d)
no_clamp_a = fixed_point_quantize(a, wl=wl, fl=fl, clamp=False, rounding=r)
self.assertLess(t_max, no_clamp_a.max().item())
self.assertGreater(t_min, no_clamp_a.min().item())
def test_float(self):
"""test floating point quantization"""
formats = [(2,2),(2,3),(3,2)]
for exp, man in formats:
for d in ["cpu", "cuda"]:
for r in ["stochastic", "nearest"]:
a_max = 2 ** (2 ** (exp - 1)) * (1 - 2 ** (-man - 1))
a_min = 2 ** (-(2 ** (exp - 1)) + 1)
max_exp=int((2**exp)/2)
min_exp=-(max_exp-2)
mantissa_step=2**(-man)
min_mantissa=mantissa_step # When denormalized
max_mantissa=2-mantissa_step # When normalized, mantissa goes from 1 to 2-mantissa_step
a_min = 2**min_exp*min_mantissa
a_max = 2**max_exp*max_mantissa
expected_vals=[]
log(f"With {exp} exponent bits, our exponent goes from {min_exp} to {max_exp}")
log(f"With {man} mantissa bits, our mantissa goes from {min_mantissa} (denormalized) to {max_mantissa}")
log(f"With {man} mantissa bits and {exp} exponent bits, we can go from {a_min} to {a_max}")
representable_normalized =[]
for sign in [1,-1]:
for e in range(0,2**exp):
for m in range(0,2**man):
if e==0:
val = sign*(2**(e+min_exp)*m*2**(-man))
log(f"{0 if sign==1 else 1} {e:0{exp}b} {m:0{man}b} = {sign} * 2^{e+min_exp} * {m*2**(-man)} \t= {val} (denormalized)")
else:
val = sign*(2**(e+min_exp-1)*(1+(m*2**(-man))))
log(f"{0 if sign==1 else 1} {e:0{exp}b} {m:0{man}b} = {sign} * 2^{e+min_exp-1} * {1+(m*2**(-man))} \t= {val}")
if val not in expected_vals:
expected_vals.append(val)
expected_vals.sort()
# Block box test to get representable numbers
import numpy as np
quant_vals=[]
for i in np.arange(-30,30,.01):
a = torch.Tensor([i]).to(device=d)
quant_a = float_quantize(a, exp=exp, man=man, rounding=r)
if quant_a[0] not in quant_vals:
quant_vals.append(quant_a[0].item())
log("Values representable in QPytorch")
log(quant_vals)
self.assertEqual(quant_vals, expected_vals)
if __name__ == "__main__":
unittest.main()
|
93173
|
import random
from dateutil import parser
from django.utils import timezone
from django.core import signing
from .primes import PRIMES
SURVEY_TOKEN_SALT = 'salary:survey'
SURVEY_TOKEN_MAX_AGE = 60 * 60 * 24 * 7 # 7 days
def get_survey_unique_primes(*emails):
if len(emails) > len(PRIMES):
raise ValueError("Not enough primes")
return zip(
emails,
random.sample(PRIMES, len(emails)),
)
def generate_survey_token(seed, prime_identifier, is_admin, survey_id):
return signing.dumps({
'seed': seed,
'prime_identifier': prime_identifier,
'is_admin': is_admin,
'survey_id': str(survey_id),
'expires_at': str(timezone.now() + timezone.timedelta(seconds=SURVEY_TOKEN_MAX_AGE)),
}, salt=SURVEY_TOKEN_SALT)
def unsign_survey_token(token):
data = signing.loads(
token, salt=SURVEY_TOKEN_SALT, max_age=SURVEY_TOKEN_MAX_AGE,
)
expires_at_str = data.pop('expires_at')
data['expires_at'] = parser.parse(expires_at_str)
return data
|
93196
|
import pandas as pd
from enum import Enum
class EQUI(Enum):
EQUIVALENT = 1
DIF_CARDINALITY = 2
DIF_SCHEMA = 3
DIF_VALUES = 4
"""
UTILS
"""
def most_likely_key(df):
res = uniqueness(df)
res = sorted(res.items(), key=lambda x: x[1], reverse=True)
return res[0]
def uniqueness(df):
res = dict()
for c in df.columns:
total = len(df[c])
unique = len(df[c].unique())
uniqueness = float(unique)/float(total)
res[c] = uniqueness
return res
def curate_view(df):
df = df.dropna() # drop nan
df = df.drop_duplicates()
# this may tweak indexes, so need to reset that
df = df.reset_index(drop=True)
# make sure it's sorted according to some order
df.sort_index(inplace=True, axis=1)
df.sort_index(inplace=True, axis=0)
return df
"""
VIEW CLASSIFICATION FUNCTIONS
"""
def equivalent(v1, v2):
v1 = curate_view(v1)
v2 = curate_view(v2)
if len(v1) != len(v2):
return False, EQUI.DIF_CARDINALITY
if len(v1.columns) != len(v2.columns):
return False, EQUI.DIF_SCHEMA
if not len(set(v1.columns).intersection(set(v2.columns))) == len(v1.columns):
return False, EQUI.DIF_SCHEMA # dif attributes
for c in v1.columns:
s1 = v1[c].apply(lambda x: str(x).lower()).sort_values().reset_index(drop=True)
s2 = v2[c].apply(lambda x: str(x).lower()).sort_values().reset_index(drop=True)
idx = (s1 == s2)
if not idx.all():
return False, EQUI.DIF_VALUES
return True, EQUI.EQUIVALENT
def contained(v1, v2):
v1 = curate_view(v1)
v2 = curate_view(v2)
if len(v1) > len(v2):
l = v1
s = v2
elif len(v2) > len(v1):
l = v2
s = v1
elif len(v1) == len(v2):
for c in v1.columns:
tv1 = v1[c].apply(lambda x: str(x).lower())
tv2 = v2[c].apply(lambda x: str(x).lower())
v12 = len(set(tv1) - set(tv2))
v21 = len(set(tv2) - set(tv1))
if v12 > 0:
return False, v12
elif v21 > 0:
return False, v21
return True
for c in l.columns:
print(c)
small_set = s[c].apply(lambda x: str(x).lower())
large_set = l[c].apply(lambda x: str(x).lower())
dif = set(small_set) - set(large_set)
print(str(len(small_set)) + " - " + str(len(large_set)))
if len(dif) > 0:
return False, len(dif)
return True
def complementary(v1, v2):
v1 = curate_view(v1)
v2 = curate_view(v2)
k1 = most_likely_key(v1)[0]
k2 = most_likely_key(v2)[0]
s1 = set(v1[k1])
s2 = set(v2[k2])
s12 = (s1 - s2)
sdiff = set()
if len(s12) > 0:
sdiff.update((s12))
s21 = (s2 - s1)
if len(s21) > 0:
sdiff.update((s21))
if len(sdiff) == 0:
return False
return True, sdiff
def contradictory(v1, v2):
v1 = curate_view(v1)
v2 = curate_view(v2)
k1 = most_likely_key(v1)[0]
k2 = most_likely_key(v2)[0]
vg1 = v1.groupby([k1])
vg2 = v2.groupby([k2])
vref = None
voth = None
if len(vg1.groups) > len(vg2.groups):
vref = vg1
voth = vg2
else:
vref = vg2
voth = vg1
contradictions = []
for gn, gv in vref:
v = voth.get_group(gn)
are_equivalent, equivalency_type = equivalent(gv, v)
if not are_equivalent:
contradictions.append((k1, k2, gn))
# print(contradictions)
# break
if len(contradictions) == 0:
return False
return True, len(contradictions)
def inconsistent_value_on_key(df1, df2, key=None):
missing_keys = []
non_unique_df1 = set()
non_unique_df2 = set()
conflicting_pair = []
cols = df1.columns # should be same in both df1 and df2
for key_value in df1[key]:
row1 = df1[df1[key] == key_value]
row2 = df2[df2[key] == key_value]
if len(row1) == 0 or len(row2) == 0:
missing_keys.append(key_value)
continue
do_continue = False
if len(row1) > 1:
non_unique_df1.add(key_value)
do_continue = True
if len(row2) > 1:
non_unique_df2.add(key_value)
do_continue = True
if do_continue:
continue
for c in cols:
if len(row1[c]) > 0 and len(row2[c]) > 0:
val_1 = row1[c].values
val_2 = row2[c].values
if val_1 != val_2 and not pd.isnull(val_1) and not pd.isnull(val_2):
conflicting_pair.append((row1[c].values, row2[c].values))
return missing_keys, list(non_unique_df1), list(non_unique_df2), conflicting_pair
if __name__ == "__main__":
print("Materialized View Analysis")
raw_views_txt = {
0: '/Users/ra-mit/development/discovery_proto/data/dod/raw_view_0',
1: '/Users/ra-mit/development/discovery_proto/data/dod/raw_view_1',
2: '/Users/ra-mit/development/discovery_proto/data/dod/raw_view_2'
}
views_txt = {
0: '/Users/ra-mit/development/discovery_proto/data/dod/view_0',
1: '/Users/ra-mit/development/discovery_proto/data/dod/view_1',
2: '/Users/ra-mit/development/discovery_proto/data/dod/view_2'
}
raw_views = dict()
for k, v in raw_views_txt.items():
raw_views[k] = pd.read_csv(v, encoding='latin1')
views = dict()
for k, v in views_txt.items():
views[k] = pd.read_csv(v, encoding='latin1')
exact_views = dict()
for k, v in views.items():
print(str(k) + " -> " + str(len(v)))
if len(v.columns) == 2:
exact_views[k] = v
equivalent(views[0], views[2])
|
93217
|
async def say_hello():
print("hey, hello world!")
async def hello_world():
print("Resume coroutine.")
for i in range(3):
await say_hello()
print("Finished coroutine.")
class MyLoop:
def run_until_complete(self, task):
try:
while 1:
task.send(None)
except StopIteration:
pass
my_loop = MyLoop()
task = hello_world()
my_loop.run_until_complete(task)
import asyncio
loop = asyncio.get_event_loop()
task = hello_world()
loop.run_until_complete(task)
|
93232
|
import sys
import numpy as np
import argparse
from mung.data import DataSet, Partition
PART_NAMES = ["train", "dev", "test"]
parser = argparse.ArgumentParser()
parser.add_argument('data_dir', action="store")
parser.add_argument('split_output_file', action="store")
parser.add_argument('train_size', action="store", type=float)
parser.add_argument('dev_size', action="store", type=float)
parser.add_argument('test_size', action="store", type=float)
parser.add_argument('--seed', action='store', dest='seed', type=int, default=1)
parser.add_argument('--maintain_partition_file', action='store', dest='maintain_partition_file', default=None)
args = parser.parse_args()
np.random.seed(args.seed)
data_dir = sys.argv[1]
split_output_file = sys.argv[2]
part_sizes = [args.train_size, args.dev_size, args.test_size]
maintain_part = None
if args.maintain_partition_file is not None:
maintain_part = Partition.load(args.maintain_partition_file)
D_all = DataSet.load(data_dir, id_key="gameid")
partition = Partition.make(D_all, part_sizes, PART_NAMES, lambda d : d.get_id(), maintain_partition=maintain_part)
partition.save(split_output_file)
|
93235
|
import argparse
import numpy as np
import h5py
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
'''
Usage
python plot_trajectory --filename <path to file>
If data collector is modified, use extra arguments to modify
pose_name: topic name of pose in h5f data
action_name: topic of action name
label_name: topic of label
Due to my configuration, I need to use python2 instead of python
I think life will be easier in Linux enviroment
'''
def _parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--filename","-f", type=str)
parser.add_argument("--pose_name",type=str,default = unicode("pose"))
parser.add_argument("--action_name",type=str,default = unicode("labels_to_name"))
parser.add_argument("--label_name",type=str,default = unicode("label"))
return vars(parser.parse_args())
def main(args):
data = h5py.File(args['filename'],'r')
label = np.array(data[args['label_name']])
action_name = np.array(data[args['action_name']])
pose = np.array(data[args['pose_name']])
time_length = len(pose)
action_trajectories = {}
action_idx = -1
pre_label = -1
for i in range(time_length):
if not label[i] == pre_label:
action_idx = action_idx + 1
action = action_name[action_idx]
pre_label = label[i]
if not action in action_trajectories:
action_trajectories[action] = []
action_trajectories[action].append(pose[i][0:3])
fig = plt.figure()
ax = fig.gca(projection = '3d')
for action, trajectory in action_trajectories.items():
trajectory = np.array(trajectory)
ax.plot(trajectory[:,0], trajectory[:,1], trajectory[:,2], label=str(action))
ax.legend()
plt.show()
if __name__ == "__main__":
args = _parse_args()
main(args)
|
93242
|
from abc import ABC, abstractmethod
from copy import copy
from typing import Any, Optional
import numpy as np
from gym.spaces import Space
from gym.utils import seeding
class Operator(ABC):
# Set these in ALL subclasses
suboperators: tuple = tuple()
grid_dependant: Optional[bool] = None
action_dependant: Optional[bool] = None
context_dependant: Optional[bool] = None
deterministic: Optional[bool] = None
@abstractmethod
def __init__(
self,
grid_space: Optional[Space] = None,
action_space: Optional[Space] = None,
context_space: Optional[Space] = None,
) -> None:
# fmt: off
self.grid_space = grid_space
self.action_space = action_space
self.context_space = context_space
# fmt: on
self.seed()
@abstractmethod
def update(
self, grid: np.ndarray, action: Any, context: Any
) -> tuple[np.ndarray, Any]:
"""Update a Cellular Automaton's Lattice (Grid) by using a provided action and context.
Parameters
----------
grid : array-like
Cellular Automaton lattice.
action : object
Action influencing the operator output.
context : object
Extra information.
Returns
-------
new_grid : array-like
Modified grid.
new_context : object
Modified context.
"""
new_grid = copy(grid)
new_context = copy(context)
return new_grid, new_context
def __call__(self, *args, **kwargs):
return self.update(*args, **kwargs)
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
|
93246
|
from collections import defaultdict
def load_words(filename = "words.txt"):
with open(filename) as f:
for word in f:
yield word.rstrip() # return a generator
def all_anagram(s):
'''
s: string
'''
d = defaultdict(list)
for i in s:
signature = ''.join(sorted(i)) # siganature is a after-sort string of chars in the word
d[signature].append(i)
return d
def print_anagram_in_order(s):
d = all_anagram(s)
l = []
for signature, words in d.items():
if len(words) > 1:
l.append((len(words),words))
l.sort(reverse=True)
for x in l:
print x
def select_bingo(s, n=8):
d = all_anagram(s)
l = {}
for signature, words in d.iteritems():
if len(words) > 1:
if len(signature) == n:
l[signature] = words
res = []
for k, v in l.iteritems():
res.append((len(v),v))
for x in sorted(res, reverse=False):
print x
#print_anagram_in_order(load_words())
select_bingo(load_words())
|
93288
|
import datetime
import pytest
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
import Pegasus.db.schema as schema
from Pegasus.db.ensembles import EMError, Triggers, TriggerType
@pytest.fixture(scope="function")
def session():
"""
Create in-memory sqlite database with tables setup and return a db session
object.
"""
engine = create_engine("sqlite://")
# create all tables in the schema
schema.Base.metadata.create_all(engine)
session = sessionmaker(bind=engine)()
# create an ensemble entry
session.add(
schema.Ensemble(
name="test-ensemble",
created=datetime.datetime.now(),
updated=datetime.datetime.now(),
state="ACTIVE",
max_running=1,
max_planning=1,
username="test-user",
)
)
session.commit()
yield session
# close session, db will be released
session.close()
class TestTriggers:
def test_get_trigger(self, session):
# insert trigger
t = schema.Trigger(
_id=1,
ensemble_id=1,
name="test-trigger",
state="STOPPED",
workflow=r'{"script":"/wf.py", "args":["arg1"]}',
args=r'{"timeout":100, "interval":20}',
_type=TriggerType.CRON.value,
)
session.add(t)
triggers = Triggers(session)
expected = {
"id": 1,
"ensemble_id": 1,
"name": "test-trigger",
"state": "STOPPED",
"workflow": {"script": "/wf.py", "args": ["arg1"]},
"args": {"timeout": 100, "interval": 20},
"type": "CRON",
}
# get trigger and convert to dict for comparison
result = Triggers.get_object(triggers.get_trigger(1, "test-trigger"))
assert expected == result
def test_get_trigger_not_found(self, session):
with pytest.raises(EMError) as e:
Triggers(session).get_trigger(1, "test-trigger")
assert "No such trigger: test-trigger" in str(e)
assert e.value.status_code == 404
def test_list_triggers(self, session):
t1 = schema.Trigger(
_id=1,
ensemble_id=1,
name="test-trigger1",
state="READY",
workflow=r'{"script":"/wf.py", "args":["arg1"]}',
args=r'{"timeout":100, "interval":20}',
_type=TriggerType.CRON.value,
)
session.add(t1)
t2 = schema.Trigger(
_id=2,
ensemble_id=1,
name="test-trigger2",
state="READY",
workflow=r'{"script":"/wf.py", "args":["arg1"]}',
args=r'{"timeout":100, "interval":20}',
_type=TriggerType.CRON.value,
)
session.add(t2)
session.commit()
triggers = Triggers(session)
result = triggers.list_triggers()
assert len(result) == 2
def test_list_triggers_by_ensemble(self, session):
# add another ensemble to the ensemble table
session.add(
schema.Ensemble(
id=2,
name="test-ensemble2",
created=datetime.datetime.now(),
updated=datetime.datetime.now(),
state="ACTIVE",
max_running=1,
max_planning=1,
username="test-user",
)
)
session.commit()
# add a trigger assigned to test-ensemble2
t = schema.Trigger(
_id=1,
ensemble_id=2,
name="test-trigger1",
state="READY",
workflow=r'{"script":"/wf.py", "args":["arg1"]}',
args=r'{"timeout":100, "interval":20}',
_type=TriggerType.CRON.value,
)
session.add(t)
session.commit()
triggers = Triggers(session)
result = triggers.list_triggers_by_ensemble(
username="test-user", ensemble="test-ensemble2"
)
assert len(result) == 1
assert Triggers.get_object(result[0]) == {
"id": 1,
"ensemble_id": 2,
"name": "test-trigger1",
"state": "READY",
"workflow": {"script": "/wf.py", "args": ["arg1"]},
"args": {"timeout": 100, "interval": 20},
"type": "CRON",
}
result = triggers.list_triggers_by_ensemble(
username="test-user", ensemble="doesntexist"
)
assert len(result) == 0
def test_insert_trigger(self, session):
print(session.query(schema.Ensemble).all())
triggers = Triggers(session)
triggers.insert_trigger(
ensemble_id=1,
trigger="test-trigger",
trigger_type=TriggerType.CRON.value,
workflow_script="/wf.py",
workflow_args=["arg1"],
interval=10,
timeout=20,
)
expected = {
"id": 1,
"ensemble_id": 1,
"name": "test-trigger",
"state": "READY",
"workflow": {"script": "/wf.py", "args": ["arg1"]},
"args": {"timeout": 20, "interval": 10},
"type": "CRON",
}
result = Triggers.get_object(
session.query(schema.Trigger)
.filter_by(ensemble_id=1, name="test-trigger")
.one()
)
assert expected == result
def test_update_state(self, session):
# insert trigger
t = schema.Trigger(
_id=1,
ensemble_id=1,
name="test-trigger",
state="READY",
workflow=r'{"script":"/wf.py", "args":["arg1"]}',
args=r'{"timeout":100, "interval":20}',
_type=TriggerType.CRON.value,
)
session.add(t)
triggers = Triggers(session)
triggers.update_state(ensemble_id=1, trigger_id=1, new_state="RUNNING")
expected_state = "RUNNING"
result = session.query(schema.Trigger).filter_by(_id=1).one().state
assert expected_state == result
def test_delete_trigger(self, session):
# insert trigger
t = schema.Trigger(
_id=1,
ensemble_id=1,
name="test-trigger",
state="READY",
workflow=r'{"script":"/wf.py", "args":["arg1"]}',
args=r'{"timeout":100, "interval":20}',
_type=TriggerType.CRON.value,
)
session.add(t)
assert len(session.query(schema.Trigger).all()) == 1
triggers = Triggers(session)
# delete trigger
triggers.delete_trigger(ensemble_id=1, trigger="test-trigger")
assert len(session.query(schema.Trigger).all()) == 0
def test_get_object(self, session):
t = schema.Trigger(
_id=1,
ensemble_id=1,
name="test-trigger",
state="READY",
workflow=r'{"script":"/wf.py", "args":["arg1"]}',
args=r'{"timeout":100, "interval":20}',
_type=TriggerType.CRON.value,
)
expected = {
"id": 1,
"ensemble_id": 1,
"name": "test-trigger",
"state": "READY",
"workflow": {"script": "/wf.py", "args": ["arg1"]},
"args": {"timeout": 100, "interval": 20},
"type": "CRON",
}
result = Triggers.get_object(t)
assert expected == result
|
93308
|
import gym
from gym import spaces
import cv2
import pygame
import copy
import numpy as np
from overcooked_ai_py.mdp.overcooked_env import OvercookedEnv as OriginalEnv
from overcooked_ai_py.mdp.overcooked_mdp import OvercookedGridworld
from overcooked_ai_py.visualization.state_visualizer import StateVisualizer
from overcooked_ai_py.mdp.actions import Action
def _convert_action(joint_action) -> list:
action_set = []
for _action in joint_action:
action_set.append(Action.INDEX_TO_ACTION[int(_action)])
return action_set
class OverCookedEnv():
def __init__(self,
scenario="tutorial_0",
episode_length=200
):
super(OverCookedEnv, self).__init__()
self.scenario = scenario
self.episode_length = episode_length
base_mdp = OvercookedGridworld.from_layout_name(scenario)
self.overcooked = OriginalEnv.from_mdp(base_mdp, horizon=episode_length)
self.visualizer = StateVisualizer()
self._available_actions = Action.ALL_ACTIONS
def reset(self):
self.overcooked.reset()
return self._get_observation()
def render(self, mode='rgb_array'):
image = self.visualizer.render_state(state=self.overcooked.state, grid=self.overcooked.mdp.terrain_mtx,
hud_data=StateVisualizer.default_hud_data(self.overcooked.state))
buffer = pygame.surfarray.array3d(image)
image = copy.deepcopy(buffer)
image = np.flip(np.rot90(image, 3), 1)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
image = cv2.resize(image, (528, 464))
return image
def step(self, action):
action = _convert_action(action)
next_state, reward, done, info = self.overcooked.step(action)
return self._get_observation(), reward, done, info
def _get_observation(self):
return self.get_feature_state().reshape(len(self.agents), -1)
def get_onehot_state(self):
return np.array(self.overcooked.lossless_state_encoding_mdp(self.overcooked.state))
def get_feature_state(self):
return np.array(self.overcooked.featurize_state_mdp(self.overcooked.state))
@property
def agents(self) -> [str]:
num_agents = len(self.overcooked.lossless_state_encoding_mdp(self.overcooked.state))
return ['ally' for _ in range(num_agents)]
@property
def observation_space(self):
state = self.get_feature_state()[0]
state = np.array(state)
# return spaces.Discrete(4056)
return spaces.Discrete(state.shape[0])
@property
def action_space(self):
return spaces.Discrete(Action.NUM_ACTIONS)
|
93314
|
from fbrp import life_cycle
from fbrp import registrar
import argparse
@registrar.register_command("down")
class down_cmd:
@classmethod
def define_argparse(cls, parser: argparse.ArgumentParser):
parser.add_argument("proc", action="append", nargs="*")
@staticmethod
def exec(args: argparse.Namespace):
procs = life_cycle.system_state().procs.keys()
given_proc_names = args.proc[0]
if given_proc_names:
procs = set(procs) & set(given_proc_names)
for proc_name in procs:
life_cycle.set_ask(proc_name, life_cycle.Ask.DOWN)
|
93338
|
from diesel.web import DieselFlask, request
app = DieselFlask(__name__)
@app.route("/")
def hello():
name = request.args.get('name', 'world')
return "hello, %s!" % name
@app.route("/err")
def err():
a = b
return "never happens.."
if __name__ == '__main__':
import diesel
def t():
while True:
diesel.sleep(1)
print "also looping.."
app.diesel_app.add_loop(diesel.Loop(t))
app.run(debug=True)
|
93340
|
import pytest
from pg13 import pgmock_dbapi2, sqparse2
def test_connection():
with pgmock_dbapi2.connect() as a, a.cursor() as acur:
acur.execute('create table t1 (a int)')
acur.execute('insert into t1 values (1)')
acur.execute('insert into t1 values (3)')
# test second connction into same DB
with pgmock_dbapi2.connect(a.db_id) as b, b.cursor() as bcur:
bcur.execute('select * from t1')
assert bcur.fetchall() == [[1],[3]]
# test that new connection *doesn't* share DB
with pgmock_dbapi2.connect() as c, c.cursor() as ccur:
with pytest.raises(KeyError):
ccur.execute('select * from t1')
def test_auto_rollback():
with pytest.raises(sqparse2.SQLSyntaxError):
with pgmock_dbapi2.connect() as db, db.cursor() as cur:
cur.execute('create table t1 (a int)')
cur.execute('insert into t1 values (1)')
cur.execute("this one won't parse")
assert 't1' not in db.db
def test_fetchone():
with pgmock_dbapi2.connect() as db, db.cursor() as cur:
cur.execute('create table t1 (a int, b int)')
db.db['t1'].rows = [[1,2],[2,3],[3,4]]
cur.execute('select * from t1')
assert cur.fetchone() == [1,2]
assert cur.fetchone() == [2,3]
def test_exmany():
"this is also testing subbed literals, I think"
vals = [[1,2],[3,4],[5,6]]
with pgmock_dbapi2.connect() as db, db.cursor() as cur:
cur.execute('create table t1 (a int, b int)')
cur.executemany('insert into t1 (a, b) values (%s, %s)', list(map(tuple, vals)))
assert db.db['t1'].rows == vals
def test_iter():
with pgmock_dbapi2.connect() as db, db.cursor() as cur:
cur.execute('create table t1 (a int, b int)')
db.db['t1'].rows = [[1,2],[3,4],[5,6],[7,8]]
# first, test whole iteration
cur.execute('select * from t1')
assert list(cur) == db.db['t1'].rows
# now test iteration from middle
cur.execute('select * from t1')
assert cur.fetchone() == [1,2]
assert list(cur) == db.db['t1'].rows[1:]
@pytest.mark.xfail
def test_count_after_fetch():
# todo: look at spec; what's supposed to happen here
raise NotImplementedError
@pytest.mark.xfail
def test_cursor_description_select():
raise NotImplementedError
@pytest.mark.xfail
def test_cursor_description_nonselect():
raise NotImplementedError
|
93365
|
from math import sin, pi
import random
import numpy as np
from scipy.stats import norm
def black_box_projectile(theta, v0=10, g=9.81):
assert theta >= 0
assert theta <= 90
return (v0 ** 2) * sin(2 * pi * theta / 180) / g
def random_shooting(n=1, min_a=0, max_a=90):
assert min_a <= max_a
return [random.uniform(min_a, max_a) for i in range(n)]
def pick_elites(actions, M_elites):
actions = np.array(actions)
assert M_elites <= len(actions)
assert M_elites > 0
results = np.array([black_box_projectile(a)
for a in actions])
sorted_ix = np.argsort(results)[-M_elites:][::-1]
return actions[sorted_ix], results[sorted_ix]
def try_random_shooting(trials=10000, n=20):
print("Trying random shooting.")
best_results = []
best_actions = []
for i in range(trials):
actions_to_try = random_shooting(n)
best_action, best_result = pick_elites(actions_to_try, 1)
best_results.append(best_result[0])
best_actions.append(best_action[0])
print(f"Out of {trials} trials:")
print(f"- Average score is {round(np.mean(best_results), 2)}")
print(f"- Average action is {round(np.mean(best_actions), 2)}")
print(f"- Action SD is {round(np.std(best_actions), 2)}")
def try_cem_normal(trials=10000, N=5, M_elites=2, iterations=3):
print("Trying CEM.")
best_results = []
best_actions = []
for i in range(trials):
print(f"================ trial {i}")
print("--iteration: 1")
actions_to_try = random_shooting(N)
elite_acts, _ = pick_elites(actions_to_try, M_elites)
print(f"actions_to_try: {np.round(actions_to_try, 2)}")
print(f"elites: {np.round(elite_acts, 2)}")
for r in range(iterations - 1):
print(f"--iteration: {r + 2}")
mu, std = norm.fit(elite_acts)
print(f"fitted normal mu: {np.round(mu, 2)}, std: {np.round(std, 2)}")
actions_to_try = np.clip(norm.rvs(mu, std, N), 0, 90)
elite_acts, elite_results = pick_elites(actions_to_try,
M_elites)
print(f"actions_to_try: {np.round(actions_to_try, 2)}")
print(f"elites: {np.round(elite_acts, 2)}")
mu, std = norm.fit(elite_acts)
print(f"final action: {np.round(mu, 2)}")
best_results.append(black_box_projectile(mu))
best_actions.append(np.clip(mu, 0, 90))
print(f"Out of {trials} trials:")
print(f"- Average score is {round(np.mean(best_results), 2)}")
print(f"- Average action is {round(np.mean(best_actions), 2)}")
print(f"- Action SD is {round(np.std(best_actions), 2)}")
|
93395
|
import sys
import re
import os
import fnmatch
from os import walk as py_walk
def walk(top, callback, args):
for root, dirs, files in py_walk(top):
callback(args, root, files)
def find_data_files(srcdir, destdir, *wildcards, **kw):
"""
get a list of all files under the srcdir matching wildcards,
returned in a format to be used for install_data
"""
def walk_helper(arg, dirname, files):
if '.svn' in dirname:
return
names = []
lst, wildcards, dirnameconverter, destdir = arg
for wc in wildcards:
wc_name = os.path.normpath(os.path.join(dirname, wc))
for f in files:
filename = os.path.normpath(os.path.join(dirname, f))
if fnmatch.fnmatch(filename, wc_name) and not os.path.isdir(filename):
names.append(filename)
if names:
destdirname = dirnameconverter.sub(destdir, dirname)
lst.append((destdirname, names))
file_list = []
recursive = kw.get('recursive', True)
converter = re.compile('^({0})'.format(srcdir))
if recursive:
walk(srcdir, walk_helper, (file_list, wildcards, converter, destdir))
else:
walk_helper((file_list, wildcards, converter, destdir),
srcdir,
[os.path.basename(f) for f in glob.glob(os.path.join(srcdir, '*'))])
return file_list
|
93456
|
from os import makedirs
from os.path import exists, join
from fedot.core.composer.gp_composer.gp_composer import GPComposerBuilder, GPComposerRequirements
from fedot.core.data.data import InputData
from fedot.core.data.data_split import train_test_data_setup
from fedot.core.optimisers.gp_comp.gp_optimiser import GPGraphOptimiserParameters
from fedot.core.optimisers.gp_comp.operators.inheritance import GeneticSchemeTypesEnum
from fedot.core.pipelines.node import PrimaryNode, SecondaryNode
from fedot.core.pipelines.pipeline import Pipeline
from fedot.core.repository.operation_types_repository import get_operations_for_task
from fedot.core.repository.quality_metrics_repository import ClassificationMetricsEnum, MetricsRepository, \
RegressionMetricsEnum
from fedot.core.repository.tasks import Task, TaskTypesEnum
from fedot.core.utils import default_fedot_data_dir, fedot_project_root
from fedot.sensitivity.node_sa_approaches import NodeDeletionAnalyze, NodeReplaceOperationAnalyze
from fedot.sensitivity.nodes_sensitivity import NodesAnalysis
def get_three_depth_manual_class_pipeline():
logit_node_primary = PrimaryNode('logit')
xgb_node_primary = PrimaryNode('xgboost')
xgb_node_primary_second = PrimaryNode('xgboost')
qda_node_third = SecondaryNode('qda', nodes_from=[xgb_node_primary_second])
knn_node_third = SecondaryNode('knn', nodes_from=[logit_node_primary, xgb_node_primary])
knn_root = SecondaryNode('knn', nodes_from=[qda_node_third, knn_node_third])
pipeline = Pipeline(knn_root)
return pipeline
def get_three_depth_manual_regr_pipeline():
xgb_primary = PrimaryNode('xgbreg')
knn_primary = PrimaryNode('knnreg')
dtreg_secondary = SecondaryNode('dtreg', nodes_from=[xgb_primary])
rfr_secondary = SecondaryNode('rfr', nodes_from=[knn_primary])
knnreg_root = SecondaryNode('knnreg', nodes_from=[dtreg_secondary, rfr_secondary])
pipeline = Pipeline(knnreg_root)
return pipeline
def get_composed_pipeline(dataset_to_compose, task, metric_function):
# the search of the models provided by the framework that can be used as nodes in a pipeline for the selected task
available_model_types = get_operations_for_task(task=task, mode='model')
# the choice and initialisation of the GP search
composer_requirements = GPComposerRequirements(
primary=available_model_types,
secondary=available_model_types, max_arity=3,
max_depth=3, pop_size=20, num_of_generations=20,
crossover_prob=0.8, mutation_prob=0.8)
# GP optimiser parameters choice
scheme_type = GeneticSchemeTypesEnum.steady_state
optimiser_parameters = GPGraphOptimiserParameters(genetic_scheme_type=scheme_type)
# Create builder for composer and set composer params
builder = GPComposerBuilder(task=task).with_requirements(composer_requirements).with_metrics(
metric_function).with_optimiser_parameters(optimiser_parameters)
# Create GP-based composer
composer = builder.build()
# the optimal pipeline generation by composition - the most time-consuming task
pipeline_evo_composed = composer.compose_pipeline(data=dataset_to_compose,
is_visualise=True)
return pipeline_evo_composed
def get_scoring_data():
file_path_train = 'cases/data/scoring/scoring_train.csv'
full_path_train = join(str(fedot_project_root()), file_path_train)
# a dataset for a final validation of the composed model
file_path_test = 'cases/data/scoring/scoring_test.csv'
full_path_test = join(str(fedot_project_root()), file_path_test)
task = Task(TaskTypesEnum.classification)
train = InputData.from_csv(full_path_train, task=task)
test = InputData.from_csv(full_path_test, task=task)
return train, test
def get_kc2_data():
file_path = 'cases/data/kc2/kc2.csv'
full_path = join(str(fedot_project_root()), file_path)
task = Task(TaskTypesEnum.classification)
data = InputData.from_csv(full_path, task=task)
train, test = train_test_data_setup(data)
return train, test
def get_cholesterol_data():
file_path = 'cases/data/cholesterol/cholesterol.csv'
full_path = join(str(fedot_project_root()), file_path)
task = Task(TaskTypesEnum.regression)
data = InputData.from_csv(full_path, task=task)
train, test = train_test_data_setup(data)
return train, test
def pipeline_by_task(task, metric, data, is_composed):
if is_composed:
pipeline = get_composed_pipeline(data, task,
metric_function=metric)
else:
if task.task_type.name == 'classification':
pipeline = get_three_depth_manual_class_pipeline()
else:
pipeline = get_three_depth_manual_regr_pipeline()
return pipeline
def run_analysis_case(train_data: InputData, test_data: InputData,
case_name: str, task, metric, is_composed=False, result_path=None):
pipeline = pipeline_by_task(task=task, metric=metric,
data=train_data, is_composed=is_composed)
pipeline.fit(train_data)
if not result_path:
result_path = join(default_fedot_data_dir(), 'sensitivity', f'{case_name}')
if not exists(result_path):
makedirs(result_path)
pipeline.show(path=result_path)
pipeline_analysis_result = NodesAnalysis(pipeline=pipeline, train_data=train_data,
test_data=test_data, path_to_save=result_path,
approaches=[NodeDeletionAnalyze,
NodeReplaceOperationAnalyze]).analyze()
print(f'pipeline analysis result {pipeline_analysis_result}')
def run_class_scoring_case(is_composed: bool, path_to_save=None):
train_data, test_data = get_scoring_data()
task = Task(TaskTypesEnum.classification)
# the choice of the metric for the pipeline quality assessment during composition
metric_function = MetricsRepository().metric_by_id(ClassificationMetricsEnum.ROCAUC_penalty)
if is_composed:
case = 'scoring_composed'
run_analysis_case(train_data, test_data, case, task,
metric=metric_function,
is_composed=True, result_path=path_to_save)
else:
case = 'scoring'
run_analysis_case(train_data, test_data, case, task,
metric=metric_function,
is_composed=False, result_path=path_to_save)
def run_class_kc2_case(is_composed: bool = False, path_to_save=None):
train_data, test_data = get_kc2_data()
task = Task(TaskTypesEnum.classification)
# the choice of the metric for the pipeline quality assessment during composition
metric_function = MetricsRepository().metric_by_id(ClassificationMetricsEnum.ROCAUC_penalty)
if is_composed:
case = 'kc2_composed'
run_analysis_case(train_data, test_data, case, task,
metric=metric_function,
is_composed=True, result_path=path_to_save)
else:
case = 'kc2'
run_analysis_case(train_data, test_data, case, task,
metric=metric_function,
is_composed=False, result_path=path_to_save)
def run_regr_case(is_composed: bool = False, path_to_save=None):
train_data, test_data = get_cholesterol_data()
task = Task(TaskTypesEnum.regression)
# the choice of the metric for the pipeline quality assessment during composition
metric_function = MetricsRepository().metric_by_id(RegressionMetricsEnum.RMSE)
if is_composed:
case = 'cholesterol_composed'
run_analysis_case(train_data, test_data, case, task,
metric=metric_function,
is_composed=True, result_path=path_to_save)
else:
case = 'cholesterol'
run_analysis_case(train_data, test_data, case, task,
metric=metric_function,
is_composed=False, result_path=path_to_save)
if __name__ == '__main__':
# scoring case manual
run_class_scoring_case(is_composed=False)
# kc2 case manual
run_class_kc2_case(is_composed=False)
# cholesterol regr case
run_regr_case(is_composed=False)
|
93481
|
from django.apps import AppConfig
class PIDConfig(AppConfig):
name = 'waldur_pid'
verbose_name = 'PID'
service_name = 'PID'
def ready(self):
pass
|
93490
|
from starkware.cairo.lang.compiler.ast.cairo_types import (
CairoType,
TypePointer,
TypeFelt,
)
from starkware.cairo.lang.compiler.identifier_definition import StructDefinition
from starkware.crypto.signature.signature import FIELD_PRIME
def is_felt_pointer(cairo_type: CairoType) -> bool:
return isinstance(cairo_type, TypePointer) and isinstance(
cairo_type.pointee, TypeFelt
)
def is_uint256(definition: StructDefinition) -> bool:
(struct_name, *_) = definition.full_name.path
return (
struct_name == "Uint256"
and len(definition.members.items()) == 2
and definition.members.get("low")
and definition.members.get("high")
and isinstance(definition.members["low"].cairo_type, TypeFelt)
and isinstance(definition.members["high"].cairo_type, TypeFelt)
)
MAX_UINT256 = (1 << 256) - 1
MIN_UINT256 = 0
def uint256_range_check(value: int):
if not MIN_UINT256 <= value <= MAX_UINT256:
raise ValueError(f"UInt256 is expected to be in range [0;2^256), got {value}")
MIN_FELT = -FIELD_PRIME / 2
MAX_FELT = FIELD_PRIME / 2
def cairo_vm_range_check(value: int):
if not 0 <= value < FIELD_PRIME:
raise ValueError(
f"Felt is expected to be in range [0; {FIELD_PRIME}), got {value}"
)
def encode_shortstring(text: str) -> int:
"""
A function which encodes short string value (at most 31 characters) into cairo felt (MSB as first character)
:param text: A short string value in python
:return: Short string value encoded into felt
"""
if len(text) > 31:
raise ValueError(
f"Shortstring cannot be longer than 31 characters, got: {len(text)}."
)
try:
text_bytes = text.encode("ascii")
except UnicodeEncodeError as u_err:
raise ValueError(f"Expected an ascii string. Found: {repr(text)}.") from u_err
value = int.from_bytes(text_bytes, "big")
cairo_vm_range_check(value)
return value
def decode_shortstring(value: int) -> str:
"""
A function which decodes a felt value to short string (31 characters)
:param value: A felt value
:return: Decoded string which is corresponds to that felt
"""
cairo_vm_range_check(value)
return "".join([chr(i) for i in value.to_bytes(31, byteorder="big")])
|
93502
|
from radixlib.actions import TransferTokens
from typing import Dict, Any
import unittest
class TestTransferTokensAction(unittest.TestCase):
""" Unit tests for the TransferTokens action of mutable tokens """
ActionDict: Dict[str, Any] = {
"from_account": {
"address": "tdx1qspqqecwh3tgsgz92l4d4f0e4egmfe86049dj75pgq347fkkfmg84pgx9um0v"
},
"to_account": {
"address": "tdx1qspsl85c9cpgm8t906zewv66quyg6d4gdlru2q9ujgk0u66c8kw2t6caan5qa"
},
"amount": {
"value": "100000000000000000000",
"token_identifier": {
"rri": "xrd_tr1qyf0x76s"
}
},
"type": "TransferTokens"
}
def test_from_dict(self):
""" Tests the derivation of the mainnet wallet addresses from the public key """
# The action loaded from the dictionary
mint: TransferTokens = TransferTokens.from_dict(self.ActionDict)
# Asserting that the TransferTokens object understood the content of the dictionary
self.assertEqual(mint.to_account.address, self.ActionDict['to_account']['address'])
self.assertEqual(mint.from_account.address, self.ActionDict['from_account']['address'])
self.assertEqual(mint.amount, int(self.ActionDict['amount']['value']))
self.assertEqual(mint.token_rri, self.ActionDict['amount']['token_identifier']['rri'])
def test_to_dict(self):
""" Tests the conversion of the token account to a dictionary """
# The account loaded from the dictionary
account: TransferTokens = TransferTokens.from_dict(self.ActionDict)
self.assertEqual(account.to_dict(), self.ActionDict)
|
93530
|
from typing import TypeVar
class Config():
SimplexMax: int = 8
Epsilon: float = 1e-5
Max: float = 1e37
PositiveMin: float = 1e-37
NegativeMin: float = -Max
Pi: float = 3.14159265
HalfPi: float = Pi / 2.0
DoublePi: float = Pi * 2.0
ReciprocalOfPi: float = 0.3183098861
GeometryEpsilon: float = 0.00001
MaxVelocity: float = 1000.0
MaxAngularVelocity: float = 1000.0
# render
BackgroundColor: int = 0x323232
OuterLineColor: int = 0x00FF00
FillColor: int = 0x008000
AxisPointColor: int = 0x00FF00
AxisLineColor: int = 0x008000
AABBLineColor: int = 0xFFFF33
BodyCenterColor: int = 0x660066
AngleLineXColor: int = 0x0000FF
AngleLineYColor: int = 0xFF0000
QueryRectLineColor: int = 0xFF0000
QueryRaycasFillColor: int = 0x00CCCC
QueryRaycasOutLineColor: int = 0x33FFFF
JointPointColor: int = 0xFF0000
JointLineColor: int = 0x0000FF
T = TypeVar('T', float, int)
@staticmethod
def clamp(num: T, low: T, high: T) -> T:
assert low <= high
if num < low:
return low
elif num > high:
return high
else:
return num
|
93533
|
import unittest
import datetime as dt
from AShareData.config import get_db_interface, set_global_config
from AShareData.date_utils import date_type2datetime
class MyTestCase(unittest.TestCase):
def setUp(self) -> None:
set_global_config('config.json')
self.db_interface = get_db_interface()
def test_read_data(self):
table_name = '合并资产负债表'
factor_name = '期末总股本'
start_date = date_type2datetime('20190101')
end_date = date_type2datetime('20190101')
report_period = date_type2datetime('20181231')
print(self.db_interface.read_table(table_name, factor_name).head())
print(self.db_interface.read_table(table_name, factor_name, start_date=start_date, end_date=end_date).head())
print(self.db_interface.read_table(table_name, factor_name, start_date=start_date).head())
print(self.db_interface.read_table(table_name, factor_name, report_period=report_period).head())
def test_calendar(self):
self.db_interface.read_table('交易日历')
def test_db_timestamp(self):
table_name = '合并资产负债表'
print(self.db_interface.get_latest_timestamp(table_name))
table_name = '模型因子日收益率'
print(self.db_interface.get_latest_timestamp(table_name))
print(self.db_interface.get_latest_timestamp(table_name, default_ts=dt.datetime(2021, 3, 4)))
if __name__ == '__main__':
unittest.main()
|
93535
|
import os
from os.path import join
from collections import OrderedDict
templateFile = open('../MaterialDesignIcons/template.xml', 'w')
file1 = open('part1.txt', 'r')
for line in file1:
templateFile.write(line),
file1.close()
icons_path = "../MaterialDesignIcons/root/material-design-icons"
walk = os.listdir(icons_path)
onlydirs = [f for f in walk if os.path.isdir(join(icons_path, f))]
# This line may be optional - if get error "ValueError: list.remove(x): x not in list" then comment it out
# onlydirs.remove(".git")
onlydirs.remove("sprites")
for category in onlydirs:
print category
walkdir = os.listdir(icons_path + '/' + category + "/svg/design")
walkdirremovedsvgbit = []
for dir2 in walkdir:
walkdirremovedsvgbit.append(dir2[:-9])
walkdirset = list(OrderedDict.fromkeys(walkdirremovedsvgbit))
for image_name in walkdirset:
templateFile.write(
' <option id="' + category + "/" + image_name + '">' + category + "/" + image_name + '</option>\n')
file2 = open('part2.txt', 'r')
for line in file2:
templateFile.write(line),
file2.close()
for color in ["black", "grey600", "white"]:
for category in onlydirs:
print category
walkdir = os.listdir(icons_path + '/' + category + "/svg/design")
walkdirremovedsvgbit = []
for dir2 in walkdir:
walkdirremovedsvgbit.append(dir2[:-9])
walkdirset = list(OrderedDict.fromkeys(walkdirremovedsvgbit))
for image_name in walkdirset:
templateFile.write(
' <thumb color="' + color + '" asset="' + category + "/" + image_name + '">root/material-design-icons/' + category + '/drawable-xxxhdpi/' + image_name + '_' + color + '_48dp.png</thumb>\n')
file3 = open('part3.txt', 'r')
for line in file3:
templateFile.write(line),
file3.close()
templateFile.close()
|
93541
|
from datetime import timedelta
from jcasts.episodes.emails import send_new_episodes_email
from jcasts.episodes.factories import EpisodeFactory
from jcasts.podcasts.factories import SubscriptionFactory
class TestSendNewEpisodesEmail:
def test_send_if_no_episodes(self, user, mailoutbox):
"""If no recommendations, don't send."""
send_new_episodes_email(user, timedelta(days=7))
assert len(mailoutbox) == 0
def test_send_if_insufficient_episodes(self, user, mailoutbox):
podcast = SubscriptionFactory(user=user).podcast
EpisodeFactory(podcast=podcast)
send_new_episodes_email(user, timedelta(days=7))
assert len(mailoutbox) == 0
def test_send_if_sufficient_episodes(self, user, mailoutbox):
for _ in range(3):
podcast = SubscriptionFactory(user=user).podcast
EpisodeFactory(podcast=podcast)
send_new_episodes_email(user, timedelta(days=7))
assert len(mailoutbox) == 1
assert mailoutbox[0].to == [user.email]
|
93548
|
import django_filters
from django.contrib.auth.models import User, Group
from rest_framework import viewsets, mixins
from rest_framework.response import Response
from rest_framework.authentication import TokenAuthentication
from rest_framework import filters
from api.pagination import LargeResultsSetPagination
from api.permissions import IsAdminUserOrReadOnly
from api.serializers import QuizSerializer
from api.models import Quiz
class QuizFilter(django_filters.FilterSet):
class Meta:
model = Quiz
fields = ['id', 'created', 'course', 'title', 'description',]
class QuizViewSet(viewsets.ModelViewSet):
queryset = Quiz.objects.all()
serializer_class = QuizSerializer
pagination_class = LargeResultsSetPagination
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAdminUserOrReadOnly,)
filter_class = QuizFilter
|
93634
|
from direct.directnotify import DirectNotifyGlobal
from toontown.toonbase import TTLocalizer
from toontown.battle import DistributedBattleBldg
class DistributedCogdoBattleBldg(DistributedBattleBldg.DistributedBattleBldg):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedCogdoBattleBldg')
def __init__(self, cr):
DistributedBattleBldg.DistributedBattleBldg.__init__(self, cr)
def getBossBattleTaunt(self):
return TTLocalizer.CogdoBattleBldgBossTaunt
|
93640
|
from datetime import date, timedelta, datetime, time
from django.contrib.contenttypes.models import ContentType
from django.conf import settings
from django.db import models
from django.db import connections
from django.utils import timezone
from .models import Period, StatisticByDate, StatisticByDateAndObject
class ObjectsByDateTracker(object):
date_field = 'date'
aggr_op = None
metric = None
period = None
statistic_model = StatisticByDate
def __init__(self, **kwargs):
for prop, val in kwargs.items():
setattr(self, prop, val)
def get_most_recent_kwargs(self):
most_recent_kwargs = {
'metric': self.metric,
'period': self.period}
return most_recent_kwargs
def get_start_date(self, qs):
most_recent_kwargs = self.get_most_recent_kwargs()
last_stat = self.statistic_model.objects.most_recent(
**most_recent_kwargs)
if last_stat:
start_date = last_stat.date
else:
first_instance = qs.order_by(self.date_field).first()
if first_instance is None:
# No data
return
start_date = getattr(first_instance, self.date_field)
if start_date and isinstance(start_date, datetime):
if timezone.is_aware(start_date):
start_date = timezone.make_naive(start_date).date()
else:
start_date = start_date.date()
return start_date
def track_lifetime_upto(self, qs, upto_date):
filter_kwargs = {
self.date_field + '__date__lte': upto_date
}
n = qs.filter(**filter_kwargs).count()
self.statistic_model.objects.record(
metric=self.metric,
value=n,
period=self.period,
date=upto_date)
def get_track_values(self):
return []
def get_record_kwargs(self, val):
return {}
def track(self, qs):
to_date = date.today()
start_date = self.get_start_date(qs)
if not start_date:
return
if self.period == Period.LIFETIME:
# Intentionally recompute last stat, as we may have computed
# that the last time when the day was not over yet.
upto_date = start_date
while upto_date <= to_date:
self.track_lifetime_upto(qs, upto_date)
upto_date += timedelta(days=1)
elif self.period == Period.DAY:
values_fields = ['ts_date'] + self.get_track_values()
connection = connections[qs.db]
tzname = (
timezone.get_current_timezone_name()
if settings.USE_TZ else None)
is_datetime = isinstance(qs.model._meta.get_field(
self.date_field), models.DateTimeField)
if is_datetime:
date_sql = connection.ops.datetime_cast_date_sql(
self.date_field,
tzname)
# before django 2.0 it returns a tuple
if isinstance(date_sql, tuple):
vals = qs.extra(
select={"ts_date": date_sql[0]},
select_params=date_sql[1])
else:
vals = qs.extra(select={"ts_date": date_sql})
start_dt = datetime.combine(
start_date, time()) - timedelta(days=1)
if tzname:
start_dt = timezone.make_aware(
start_dt,
timezone.get_current_timezone())
else:
vals = qs.extra(select={"ts_date": self.date_field})
start_dt = start_date
vals = vals.filter(
**{self.date_field + '__gte': start_dt}).values(
*values_fields).order_by().annotate(ts_n=self.aggr_op)
# TODO: Bulk create
for val in vals:
self.statistic_model.objects.record(
metric=self.metric,
value=val['ts_n'],
date=val['ts_date'],
period=self.period,
**self.get_record_kwargs(val))
else:
raise NotImplementedError
class ObjectsByDateAndObjectTracker(ObjectsByDateTracker):
object = None
object_model = None
object_field = None
statistic_model = StatisticByDateAndObject
def __init__(self, **kwargs):
super(ObjectsByDateAndObjectTracker, self).__init__(**kwargs)
assert self.object is None or self.object_field is None
assert self.object or self.object_field
def get_most_recent_kwargs(self):
kwargs = super(
ObjectsByDateAndObjectTracker, self).get_most_recent_kwargs()
if self.object_model:
kwargs['object_type'] = ContentType.objects.get_for_model(
self.object_model)
else:
kwargs['object'] = self.object
return kwargs
def track_lifetime_upto(self, qs, upto_date):
filter_kwargs = {
self.date_field + '__date__lte': upto_date
}
if self.object_model:
vals = qs.filter(**filter_kwargs).values(
self.object_field).annotate(ts_n=self.aggr_op)
for val in vals:
object = self.object_model(
pk=val[self.object_field])
# TODO: Bulk create
StatisticByDateAndObject.objects.record(
metric=self.metric,
value=val['ts_n'],
date=upto_date,
object=object,
period=self.period)
else:
n = qs.filter(**filter_kwargs).count()
StatisticByDateAndObject.objects.record(
metric=self.metric,
value=n,
object=self.object,
period=self.period,
date=upto_date)
def get_track_values(self):
ret = super(ObjectsByDateAndObjectTracker, self).get_track_values()
if self.object_model:
ret.append(self.object_field)
return ret
def get_record_kwargs(self, val):
if self.object_model:
object = self.object_model(pk=val[self.object_field])
else:
object = self.object
return {'object': object}
class CountObjectsByDateTracker(ObjectsByDateTracker):
aggr_op = models.Count('pk', distinct=True)
class CountObjectsByDateAndObjectTracker(ObjectsByDateAndObjectTracker):
aggr_op = models.Count('pk', distinct=True)
|
93643
|
import discord
from discord.ext import commands,tasks
import time
import json
with open('./setting.json', mode='r',encoding='utf8') as jfile:
jdata = json.load(jfile)
intents = discord.Intents.all()
bot=commands.Bot(command_prefix=".",intents=intents)
@bot.event
async def on_ready():
print('We have logged in as {0.user}'.format(bot))
game = discord.Game("counting")
await bot.change_presence(status=discord.Status.idle, activity=game)
#####################################################################################
@bot.command()
async def countdown(ctx, hour, minute):
hour = int(hour)
minute = int(minute)
minute = minute + (hour*60)
embed=discord.Embed(color=0x009dff,title=f"倒數計時開始 剩下 {minute} 分鐘")
msg = await ctx.send(embed=embed)
time.sleep(60)
while 1:
minute = minute - 1
if minute == 0:
embed=discord.Embed(color=0x009dff,title="時間到!")
await msg.edit(embed=embed)
break
embed=discord.Embed(color=0x009dff,title=f"剩下 {minute} 分鐘")
await msg.edit(embed=embed)
time.sleep(60)
#####################################################################################
bot.run(jdata['HackathonTOKEN'])
#hackathon
|
93649
|
import os
import pytest
from ruamel.yaml import YAML
_TEST_DIR = os.path.dirname(__file__)
# Fixtures in pytest work with reused outer names, so shut up pylint here.
# pylint:disable=redefined-outer-name
@pytest.fixture
def test_file_path():
def loader(*path):
return os.path.join(_TEST_DIR, "data", *path)
return loader
@pytest.fixture
def open_test_file(test_file_path):
def loader(*path):
return open(test_file_path(*path), "r", encoding="utf-8")
return loader
@pytest.fixture
def load_yaml(open_test_file):
def loader(filename):
with open_test_file(os.path.join("yaml", filename)) as yaml_file:
return YAML().load(yaml_file)
return loader
@pytest.fixture
def read_test_file(open_test_file):
def reader(*path):
with open_test_file(*path) as test_file:
return test_file.read()
return reader
|
93655
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ai_pics', '0002_aiattachment_file_no'),
]
operations = [
migrations.AddField(
model_name='aipics',
name='is_valid',
field=models.NullBooleanField(),
),
]
|
93701
|
import os
serenityff_C6 = os.path.dirname(__file__) + "/C6/"
serenityff_C12 = os.path.dirname(__file__) + "/C12/"
|
93706
|
import uuid
from django.core.exceptions import ValidationError
from django.test import TestCase
from app.models import Uuid
class UuidTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.uuid_value = uuid.uuid4()
Uuid.objects.create(uuid=cls.uuid_value)
def test_uuid(self):
uuid_item = Uuid.objects.get(uuid=self.uuid_value)
self.assertEqual(uuid_item.uuid, self.uuid_value)
def test_uuid_exact(self):
uuid_item = Uuid.objects.get(uuid__exact=self.uuid_value)
self.assertEqual(uuid_item.uuid, self.uuid_value)
def test_uuid_in(self):
uuid_item = Uuid.objects.get(uuid__in=[self.uuid_value])
self.assertEqual(uuid_item.uuid, self.uuid_value)
def test_invalid_uuid(self):
with self.assertRaises(ValidationError):
Uuid.objects.create(uuid='non-uuid text value')
|
93726
|
import sys
import logging
import argparse
import json
from blocksec2go import CardError
def main(argv=None):
if argv == None:
argv = sys.argv
prog = sys.argv[0]
args = sys.argv[1:]
parser = argparse.ArgumentParser(
prog=prog,
description='Command line interface for Infineon\'s Blockchain Security 2Go starter kit'
)
subparsers = parser.add_subparsers(help='subcommands')
parser.add_argument('--reader', help='name of the reader to use')
parser.add_argument('--machine-readable', help='json output', action='store_true')
parser.add_argument(
'--loglevel',
help='log level',
default='info',
choices=['debug', 'info', 'warning', 'error', 'critical', 'nolog'],
)
from blocksec2go.cli import (generate_signature, generate_keypair, get_key_info,
list_readers, get_card_info, encrypted_keyimport, set_pin, change_pin, unlock_pin,
disable_pin)
generate_signature.add_subcommand(subparsers)
generate_keypair.add_subcommand(subparsers)
get_key_info.add_subcommand(subparsers)
list_readers.add_subcommand(subparsers)
get_card_info.add_subcommand(subparsers)
encrypted_keyimport.add_subcommand(subparsers)
set_pin.add_subcommand(subparsers)
change_pin.add_subcommand(subparsers)
unlock_pin.add_subcommand(subparsers)
disable_pin.add_subcommand(subparsers)
args = parser.parse_args(args)
if hasattr(args, 'func'):
if args.loglevel != 'nolog':
logging.basicConfig(level=args.loglevel.upper())
try:
args.func(args)
return 0
except CardError as e:
if args.machine_readable:
json.dump({'status': 'CardError', 'error': e.response.sw}, fp=sys.stdout)
else:
print(str(e))
return -1
except Exception as e:
if args.machine_readable:
json.dump({'status': 'error', 'error': str(e)}, fp=sys.stdout)
return -1
else:
raise e
else:
parser.print_help()
return 0
|
93738
|
from video import *
import sys
header_color='#ffaa00'
slide("title.png",48,[(50,"A Novel Mass Spring",header_color),
(50,"Model for Simulating",header_color),
(50,"Full Hair Geometry",header_color),
" ",
(36,"paperid 0384"),
(36,"SIGGRAPH 2008")])
slide("tet_springs.png",36,
[(54,"Altitude Springs",header_color),
" ",
"Our novel altitude",
"springs allow a volumetric",
"torus to recover from",
"full collapse"])
slide("tet_springs_shell.png",36,
[(54,"Altitude Springs",header_color),
" ",
"Our novel altitude",
"springs also allow a",
"thin shell to recover from",
"full collapse"])
slide("model.png",36,
[(54,"Hair Model",header_color),
" ",
"Our new hair model",
"reproduces real hair",
"phenomena"])
slide("curly.png",36,
[(54,"Hair Model",header_color),
" ",
"Curliness can be varied by",
"by adjusting spring stiffness"])
slide("cloth_frame_rate.png",36,
[(54,"Time Integration",header_color),
" ",
"Our linear implicit springs",
"allow simulating cloth using",
"only one time step per frame"])
slide("cloth_cfl_10.png",36,
[(54,"Time Integration",header_color),
" ",
"...of course smaller",
"time steps yield",
"more accuracy"])
slide("levelset_interpolation.png",36,
[(54,"Time Integration",header_color),
" ",
"Our new level set",
"interpolation scheme",
"allows more accurate",
"collision bodies"])
slide("tuft_old.png",36,
[(54,"Hair Tuft",header_color),
" ",
"Basic mass-spring model",
"with dynamic head motion",
" ",
"(too much stretching)"])
slide("tuft_new.png",36,
[(54,"Hair Tuft",header_color),
" ",
"Our mass-spring model",
"better handles the",
"complex motion",
" ",
"(better length preservation)"])
slide("tuft_new_interact.png",36,
[(54,"Hair Tuft",header_color),
" ",
"Our model with stiction",
"and self-collisions"])
slide("shaking5k.png",36,
[(54,"Full head",header_color),
" ",
"5,000 long straight hairs",
" ",
"(coverage too thin)"])
slide("shaking.png",36,
[(54,"Full head",header_color),
" ",
"10,000 long straight hairs",
" ",
"(better coverage)"])
slide("spinning.png",36,
[(54,"Full head",header_color),
" ",
"5,000 long curly hairs"])
slide("wind.png",36,
[(54,"Full head",header_color),
" ",
"10,000 medium length",
"straight hairs",
"undergoing wind forces"])
slide("shrek.png",36,
[(54,"Full head",header_color),
" ",
"10,000 medium length",
"straight hairs"])
slide("half.png",36,
["half speed"])
slide("end.png",54,["The End"])
final_render_base="/solver/vol3/hair7/final_render_new"
#final_render_base_20080121="/solver/vol3/hair7/render/Projects/renderman/output_final_20080121/generic_hair"
final_render_base_20080122="/solver/vol3/hair7/render/Projects/renderman/output_final_20080122/generic_hair"
video_dir="hair_video_tmp"
if not os.path.exists(video_dir):
os.mkdir(video_dir)
#if os.path.isdir(video_dir):
# print "%s already exists... delete? (y/n) "%video_dir,
# c=sys.stdin.readline()
# if c=="y\n": shutil.rmtree(video_dir)
# else: sys.exit(1)
shutil.rmtree(video_dir)
video=VIDEO(video_dir)
using_lengths=True
testing_titles=False
skip_interval=1
def caption_length(time=3.5):
if not using_lengths and testing_titles:
return 1
else:
return int(video.fps*time)
video.add_frame("title.png",caption_length(4./skip_interval))
# tet springs demos
video.add_frame("tet_springs.png",caption_length(9./skip_interval))
if not testing_titles:
video.add_directory("/solver/vol3/hair7/final_render/torus_volume",0,170,step=skip_interval)
video.add_frame("tet_springs_shell.png",caption_length(9./skip_interval))
if not testing_titles:
video.add_directory("/solver/vol3/hair7/final_render/torus_shell",0,240,step=skip_interval)
video.add_frame("model.png",caption_length(7./skip_interval))
if not testing_titles:
video.add_directory("/solver/vol3/hair7/final_render/strand/merge",step=skip_interval)
video.add_frame("curly.png",caption_length(7./skip_interval))
if not testing_titles:
video.add_directory("/solver/vol3/hair7/final_render/strand/curly_drop_composite",step=skip_interval)
video.add_frame("half.png",caption_length(2./skip_interval))
video.add_directory("/solver/vol3/hair7/final_render/strand/curly_drop_composite",step=skip_interval,duplicate=2)
video.add_frame("cloth_frame_rate.png",caption_length(11./skip_interval))
if not testing_titles:
video.add_directory("/solver/vol3/hair7/final_render/cloth_cfl10000",0,158,step=skip_interval)
# implicit spring demos
video.add_frame("cloth_cfl_10.png",caption_length(6./skip_interval))
if not testing_titles:
video.add_directory("/solver/vol3/hair7/final_render/cloth_cfl10",0,158,step=skip_interval)
video.add_frame("levelset_interpolation.png",caption_length(9./skip_interval))
if not testing_titles:
video.add_directory("/solver/vol3/hair7/final_render/interpolation/merged",1,60,step=skip_interval)
video.add_directory("/solver/vol3/hair7/final_render/interpolation/merged",1,60,step=skip_interval)
# tuft examples
video.add_frame("tuft_old.png",caption_length(8./skip_interval))
if not testing_titles:
video.add_directory(os.path.join(final_render_base_20080122,"shaking_tuft_back.old-blur_render"),0,188,step=1*skip_interval)
video.add_frame("half.png",caption_length(2./skip_interval))
video.add_directory(os.path.join(final_render_base_20080122,"shaking_tuft_back.old_render"),0,379,step=1*skip_interval)
video.add_frame("tuft_new.png",caption_length(8./skip_interval))
if not testing_titles:
video.add_directory(os.path.join(final_render_base_20080122,"shaking_tuft_back.nocoladh-blur_render"),0,188,step=1*skip_interval)
video.add_frame("half.png",caption_length(2./skip_interval))
video.add_directory(os.path.join(final_render_base_20080122,"shaking_tuft_back.nocoladh_render"),0,379,step=1*skip_interval)
video.add_frame("tuft_new_interact.png",caption_length(6./skip_interval))
if not testing_titles:
video.add_directory(os.path.join(final_render_base_20080122,"shaking_tuft_back.blur_render"),0,188,step=1*skip_interval)
video.add_frame("half.png",caption_length(2./skip_interval))
video.add_directory(os.path.join(final_render_base_20080122,"shaking_tuft_back_render"),0,379,step=1*skip_interval)
# full head examples
video.add_frame("shaking5k.png",caption_length(8./skip_interval))
if not testing_titles:
video.add_directory(os.path.join(final_render_base_20080122,"shaking_head_front.5k-blur_render"),0,109,step=1*skip_interval) # TODO: make this whatever we get to on long (119 natural) (99 old)
video.add_frame("half.png",caption_length(2./skip_interval))
video.add_directory(os.path.join(final_render_base_20080122,"shaking_head_front.5k_render"),0,220,step=1*skip_interval) # TODO: make this whatever we get to on long (239 natural) (199 old)
video.add_frame("shaking.png",caption_length(5.5/skip_interval))
if not testing_titles:
video.add_directory(os.path.join(final_render_base_20080122,"shaking_head_front.blur_render"),0,109,step=1*skip_interval) # TODO: make this whatever we get to on long
video.add_frame("half.png",caption_length(2.5/skip_interval))
video.add_directory(os.path.join(final_render_base_20080122,"shaking_head_front_render"),0,220,step=1*skip_interval) # TODO: make this whatever we get to on long
video.add_frame("spinning.png",caption_length(5./skip_interval))
if not testing_titles:
video.add_directory(os.path.join(final_render_base_20080122,"spinning_head_camera_lower.new-blur_render"),0,43,step=1*skip_interval)
video.add_frame("half.png",caption_length(2./skip_interval))
video.add_directory(os.path.join(final_render_base_20080122,"spinning_head_camera_lower.new_render"),0,86,step=1*skip_interval)
video.add_frame("wind.png",caption_length(7./skip_interval))
if not testing_titles:
video.add_directory(os.path.join(final_render_base_20080122,"shrek_head_back.blur_render"),0,144,step=1*skip_interval)
video.add_frame("half.png",caption_length(2./skip_interval))
video.add_directory(os.path.join(final_render_base_20080122,"shrek_head_back_render"),0,289,step=1*skip_interval)
video.add_frame("shrek.png",caption_length(5./skip_interval))
if not testing_titles:
video.add_directory(os.path.join(final_render_base_20080122,"shrek_head.blur_render"),0,102,step=1*skip_interval)
video.add_frame("half.png",caption_length(2./skip_interval))
video.add_directory(os.path.join(final_render_base_20080122,"shrek_head_render"),0,207,step=1*skip_interval)
video.add_frame("end.png",caption_length(2./skip_interval))
video.make_movie('hair')
|
93748
|
from common import *
def test_virtual_columns_spherical():
df = vaex.from_scalars(alpha=0, delta=0, distance=1)
df.add_virtual_columns_spherical_to_cartesian("alpha", "delta", "distance", "x", "y", "z", radians=False)
x, y, z = df['x'].values[0], df['y'].values[0], df['z'].values[0]
np.testing.assert_array_almost_equal(x, 1)
np.testing.assert_array_almost_equal(y, 0)
np.testing.assert_array_almost_equal(z, 0)
for radians in [True, False]:
def dfs(alpha, delta, distance, radians=radians):
ds_1 = vaex.from_scalars(alpha=alpha, delta=delta, distance=distance, alpha_e=0.1, delta_e=0.2, distance_e=0.3)
ds_1.add_virtual_columns_spherical_to_cartesian("alpha", "delta", "distance", propagate_uncertainties=True, radians=radians)
N = 1000000
# distance
alpha = np.random.normal(0, 0.1, N) + alpha
delta = np.random.normal(0, 0.2, N) + delta
distance = np.random.normal(0, 0.3, N) + distance
ds_many = vaex.from_arrays(alpha=alpha, delta=delta, distance=distance)
ds_many.add_virtual_columns_spherical_to_cartesian("alpha", "delta", "distance", radians=radians)
return ds_1, ds_many
ds_1, ds_many = dfs(0, 0, 1.)
x_e = ds_1.evaluate("x_uncertainty")[0]
y_e = ds_1.evaluate("y_uncertainty")[0]
z_e = ds_1.evaluate("z_uncertainty")[0]
np.testing.assert_array_almost_equal(x_e, ds_many.std("x").item(), decimal=2)
np.testing.assert_array_almost_equal(y_e, ds_many.std("y").item(), decimal=2)
np.testing.assert_array_almost_equal(z_e, ds_many.std("z").item(), decimal=2)
np.testing.assert_array_almost_equal(x_e, 0.3)
# TODO: from cartesian tot spherical errors
df.add_virtual_columns_cartesian_to_spherical("x", "y", "z", "theta", "phi", "r", radians=False)
theta, phi, r = df("theta", "phi", "r").row(0)
np.testing.assert_array_almost_equal(theta, 0)
np.testing.assert_array_almost_equal(phi, 0)
np.testing.assert_array_almost_equal(r, 1)
df.add_virtual_columns_celestial("alpha", "delta", "l", "b", _matrix='eq2gal')
# TODO: properly test, with and without radians
df.evaluate("l")
df.evaluate("b")
ds = vaex.from_scalars(x=1, y=0, z=0)
ds.add_virtual_columns_cartesian_to_spherical()
assert ds.evaluate('b')[0] == 0
def test_inside_polygon_single(df_factory):
df = df_factory(x=[1, 2, 3], y=[2, 3, 4])
px = np.array([1.5, 2.5, 2.5, 1.5])
py = np.array([2.5, 2.5, 3.5, 3.5])
df['inside'] = df.geo.inside_polygon(df.x, df.y, px, py)
assert df.inside.values.tolist() == [False, True, False]
def test_inside_polygons(df_factory):
df = df_factory(x=[1, 2, 3], y=[2, 3, 4])
px = np.array([1.5, 2.5, 2.5, 1.5])
py = np.array([2.5, 2.5, 3.5, 3.5])
df['inside'] = df.geo.inside_polygons(df.x, df.y, [px, px + 1], [py, py + 1], any=True)
assert df.inside.values.tolist() == [False, True, True]
def test_which_polygon_single(df_factory):
df = df_factory(x=[1, 2, 3], y=[2, 3, 4])
px = np.array([1.5, 2.5, 2.5, 1.5])
py = np.array([2.5, 2.5, 3.5, 3.5])
df['polygon_index'] = df.geo.inside_which_polygon(df.x, df.y, [px, px + 1], [py, py + 1])
assert df.polygon_index.values.tolist() == [None, 0, 1]
def test_which_polygons(df_factory):
df = df_factory(x=[1, 2, 3], y=[2, 3, 4])
# polygon1a = np.array( [(1.5, 2.5, 2.5, 1.5), (2.5, 2.5, 3.5, 3.5)] )
# polygon1b = (polygon1a.T + [1, 1]).T
px = np.array([1.5, 2.5, 2.5, 1.5])
py = np.array([2.5, 2.5, 3.5, 3.5])
polygon1a = [px, py] # matches #1
polygon1b = [px + 1, py + 1] # matches #2
polygon_nothing = [px + 10, py + 10] # matches nothing
pxw = np.array([1.5, 3.5, 3.5, 1.5])
pyw = np.array([2.5, 2.5, 4.5, 4.5])
polygon1c = [pxw, pyw] # matches #1 and 2
pxs = [[polygon1a, polygon1b], [polygon1b, polygon1c], [polygon1c]]
df['polygon_index'] = df.geo.inside_which_polygons(df.x, df.y, pxs, any=True)
assert df.polygon_index.values.tolist() == [None, 0, 0]
df['polygon_index'] = df.geo.inside_which_polygons(df.x, df.y, pxs, any=False)
assert df.polygon_index.values.tolist() == [None, 2, 1]
pxs = [[polygon_nothing, polygon1a, polygon_nothing]]
df['polygon_index'] = df.geo.inside_which_polygons(df.x, df.y, pxs, any=True)
assert df.polygon_index.values.tolist() == [None, 0, None]
pxs = [[polygon1a, polygon_nothing, polygon1a]]
df['polygon_index'] = df.geo.inside_which_polygons(df.x, df.y, pxs, any=False)
assert df.polygon_index.values.tolist() == [None, None, None]
|
93785
|
r"""
Check for SageMath Python modules
"""
from . import PythonModule
from .join_feature import JoinFeature
class sage__combinat(JoinFeature):
def __init__(self):
# sage.combinat will be a namespace package.
# Testing whether sage.combinat itself can be imported is meaningless.
# Hence, we test a Python module within the package.
JoinFeature.__init__(self, 'sage.combinat',
[PythonModule('sage.combinat.combinations')])
class sage__graphs(JoinFeature):
def __init__(self):
JoinFeature.__init__(self, 'sage.graphs',
[PythonModule('sage.graphs.graph')])
class sage__plot(JoinFeature):
def __init__(self):
JoinFeature.__init__(self, 'sage.plot',
[PythonModule('sage.plot.plot')])
class sage__rings__number_field(JoinFeature):
def __init__(self):
JoinFeature.__init__(self, 'sage.rings.number_field',
[PythonModule('sage.rings.number_field.number_field_element')])
class sage__rings__real_double(PythonModule):
def __init__(self):
PythonModule.__init__(self, 'sage.rings.real_double')
class sage__symbolic(JoinFeature):
def __init__(self):
JoinFeature.__init__(self, 'sage.symbolic',
[PythonModule('sage.symbolic.expression')],
spkg="sagemath_symbolics")
def sage_features():
"""
Return features corresponding to parts of the Sage library.
These tags are named after Python packages/modules (e.g., :mod:`~sage.symbolic`),
not distribution packages (``sagemath-symbolics``).
This design is motivated by a separation of concerns: The author of a module that depends
on some functionality provided by a Python module usually already knows the
name of the Python module, so we do not want to force the author to also
know about the distribution package that provides the Python module.
Instead, we associate distribution packages to Python modules in
:mod:`sage.features.sagemath` via the ``spkg`` parameter of :class:`Feature`.
EXAMPLES::
sage: from sage.features.sagemath import sage_features
sage: list(sage_features()) # random
[Feature('sage.graphs'),
Feature('sage.plot'),
Feature('sage.rings.number_field'),
Feature('sage.rings.real_double')]
"""
for feature in [sage__combinat(),
sage__graphs(),
sage__plot(),
sage__rings__number_field(),
sage__rings__real_double(),
sage__symbolic()]:
if feature.is_present():
yield feature
|
93798
|
from django.http import JsonResponse, Http404
from django.shortcuts import redirect
def parse_ip_address(request):
ipaddress = request.META.get("HTTP_X_REAL_IP") \
or request.META.get("HTTP_X_FORWARDED_FOR") \
or request.environ.get("REMOTE_ADDR") or ""
if "," in ipaddress: # multiple ips in the header
ipaddress = ipaddress.split(",", 1)[0]
return ipaddress
def parse_useragent(request):
return (request.META.get("HTTP_USER_AGENT") or "")[:512]
def is_ajax(request):
return bool(request.GET.get("is_ajax"))
def ajax_request(view):
def wrapper(request, *args, **kwargs):
status_code = 200
try:
results = view(request, *args, **kwargs)
except Http404:
status_code = 404
results = {"error": "Not Found"}
if is_ajax(request):
return JsonResponse(data=results, status=status_code)
else:
return redirect(request.META.get("HTTP_REFERER") or "/")
return wrapper
|
93804
|
import csv
import shutil
from mock import patch
from django.core.management import call_command
from django.test import TestCase
from dmd.models import AMP, AMPP, VMP, VMPP
from dmd.management.commands.import_dmd import get_common_name
from frontend.models import ImportLog, Presentation, NCSOConcession
from openprescribing.utils import mkdir_p
class TestImportDmd2(TestCase):
@classmethod
def setUpTestData(cls):
for bnf_code, name in [
("0203020C0AAAAAA", "Adenosine_I/V Inf 3mg/ml 2ml Vl"),
("1003020U0AAAIAI", "Diclofenac Sod_Gel 2.32%"),
("1003020U0BBADAI", "Voltarol 12 Hour Emulgel P_Gel 2.32%"),
("1305020C0AAFVFV", "Coal Tar 10%/Salic Acid 5%/Aq_Crm"),
("1106000X0AAAIAI", "Piloc HCl_Eye Dps 6%"),
("090402000BBHCA0", "Nutrison Pack_Stnd"),
]:
Presentation.objects.create(bnf_code=bnf_code, name=name)
shutil.copytree(
"dmd/tests/data/dmd/1",
"pipeline/test-data/data/dmd/2019_07_01/nhsbsa_dmd_7.4.0_20190701000001",
)
mkdir_p("pipeline/test-data/data/bnf_snomed_mapping/2019_07_01")
shutil.copyfile(
"dmd/tests/data/bnf_snomed_mapping/mapping.xlsx",
"pipeline/test-data/data/bnf_snomed_mapping/2019_07_01/mapping.xlsx",
)
# Import the data. See dmd/tests/data/README.txt for details of what
# objects will be created.
with patch("dmd.management.commands.import_dmd.Command.upload_to_bq"):
call_command("import_dmd")
# Copy another, later, dataset into the data directory, for tests that
# call the command again.
shutil.copytree(
"dmd/tests/data/dmd/2",
"pipeline/test-data/data/dmd/2019_07_08/nhsbsa_dmd_7.4.0_20190708000001",
)
@classmethod
def tearDownClass(cls):
shutil.rmtree("pipeline/test-data/data/dmd")
shutil.rmtree("pipeline/test-data/data/bnf_snomed_mapping")
super(TestImportDmd2, cls).tearDownClass()
def test_objects_created(self):
# Check that correct number of objects have been created.
self.assertEqual(VMP.objects.count(), 7)
self.assertEqual(VMPP.objects.count(), 14)
self.assertEqual(AMP.objects.count(), 15)
self.assertEqual(AMPP.objects.count(), 26)
# Check that a selection of fields have been set correctly.
vmp = VMP.objects.get(id=22480211000001104)
self.assertEqual(vmp.nm, "Diclofenac 2.32% gel")
self.assertEqual(vmp.pres_stat.descr, "Valid as a prescribable product")
self.assertEqual(vmp.vmpp_set.count(), 3)
self.assertEqual(vmp.amp_set.count(), 3)
self.assertEqual(vmp.bnf_code, "1003020U0AAAIAI")
vmpp = VMPP.objects.get(id=22479511000001101)
self.assertEqual(vmpp.nm, "Diclofenac 2.32% gel 30 gram")
self.assertEqual(vmpp.vmp, vmp)
self.assertEqual(vmpp.qty_uom.descr, "gram")
self.assertEqual(vmpp.ampp_set.count(), 3)
self.assertEqual(vmpp.bnf_code, "1003020U0AAAIAI")
amp = AMP.objects.get(id=29915211000001103)
self.assertEqual(amp.nm, "Diclofenac 2.32% gel")
self.assertEqual(
amp.descr, "Diclofenac 2.32% gel (Colorama Pharmaceuticals Ltd)"
)
self.assertEqual(amp.vmp, vmp)
self.assertEqual(amp.supp.descr, "Colorama Pharmaceuticals Ltd")
self.assertEqual(amp.ampp_set.count(), 2)
self.assertIsNone(amp.bnf_code)
ampp = AMPP.objects.get(id=29915311000001106)
self.assertEqual(
ampp.nm, "Diclofenac 2.32% gel (Colorama Pharmaceuticals Ltd) 30 gram"
)
self.assertEqual(ampp.vmpp, vmpp)
self.assertEqual(ampp.amp, amp)
self.assertEqual(ampp.legal_cat.descr, "P")
self.assertIsNone(amp.bnf_code)
# The following AMP and AMPP do have BNF codes.
self.assertEqual(
AMP.objects.get(id=22479611000001102).bnf_code, "1003020U0BBADAI"
)
self.assertEqual(
AMPP.objects.get(id=22479911000001108).bnf_code, "1003020U0BBADAI"
)
def test_vmp_bnf_codes_set(self):
# This VMP does not have a BNF code in the mapping, but all its VMPPs
# have the same BNF code, so we assign this to the VMP.
self.assertEqual(
VMP.objects.get(id=35894711000001106).bnf_code, "0203020C0AAAAAA"
)
def test_dmd_names(self):
def _assert_dmd_name(bnf_code, exp_dmd_name):
self.assertEqual(
Presentation.objects.get(bnf_code=bnf_code).dmd_name, exp_dmd_name
)
# This BNF code corresponds to a single VMP.
_assert_dmd_name("1003020U0AAAIAI", "Diclofenac 2.32% gel")
# This BNF code corresponds to a single AMP.
_assert_dmd_name("1003020U0BBADAI", "Voltarol 12 Hour Emulgel P 2.32% gel")
# This BNF code corresponds to multiple VMPs and a common name can be
# inferred.
_assert_dmd_name("1106000X0AAAIAI", "Pilocarpine hydrochloride 6% eye drops")
# This BNF code corresponds to multiple VMPs and a common name cannot
# be inferred.
_assert_dmd_name("1305020C0AAFVFV", None)
# This BNF code corresponds to multiple AMPPs and a common name can be
# inferred.
_assert_dmd_name("090402000BBHCA0", "Nutrison liquid (Nutricia Ltd)")
def test_logs(self):
path = "pipeline/test-data/data/dmd/logs/7.4.0_20190701000001/summary.csv"
with open(path) as f:
summary = list(csv.reader(f))
exp_summary = [
["VMP", "7"],
["AMP", "15"],
["VMPP", "14"],
["AMPP", "26"],
["dmd-objs-present-in-mapping-only", "0"],
["vmps-with-inferred-bnf-code", "0"],
["vmps-with-no-bnf-code", "1"],
["bnf-codes-with-multiple-dmd-objs", "2"],
["bnf-codes-with-multiple-dmd-objs-and-no-inferred-name", "1"],
["vmpps-with-different-bnf-code-to-vmp", "0"],
["ampps-with-different-bnf-code-to-amp", "3"],
]
self.assertEqual(summary, exp_summary)
def test_another_import(self):
# Import updated data. This data is identical to that in dmd/1, except
# that the VMP with VPID 22480211000001104 has been updated with a new
# VPID (12345). This VMP now has a VPIDPREV field, and all references
# to the old VPID have been updated to the new VPID.
#
# Additionally, there is now an NCSOConcession with a FK reference to
# an existing VMPP.
vmpp = VMPP.objects.get(id=22479511000001101)
concession = NCSOConcession.objects.create(
date="2019-06-01",
vmpp=vmpp,
drug=vmpp.nm,
pack_size=vmpp.qtyval,
price_pence=123,
)
vmpp.delete()
with patch("dmd.management.commands.import_dmd.Command.upload_to_bq"):
call_command("import_dmd")
# Check that no VMP present with old VPID, that a new VMP has been
# created, and that references to VMP have been updated.
self.assertIsNone(VMP.objects.filter(id=22480211000001104).first())
vmp = VMP.objects.get(id=12345)
self.assertEqual(vmp.vpidprev, 22480211000001104)
vmpp = VMPP.objects.get(id=22479511000001101)
self.assertEqual(vmpp.vmp, vmp)
amp = AMP.objects.get(id=29915211000001103)
self.assertEqual(amp.vmp, vmp)
concession.refresh_from_db()
self.assertEqual(concession.vmpp, vmpp)
def test_notify_slack(self):
with patch("dmd.management.commands.import_dmd.Command.upload_to_bq"):
with patch("dmd.management.commands.import_dmd.notify_slack") as ns:
call_command("import_dmd")
ns.assert_called()
def test_already_imported(self):
ImportLog.objects.create(
category="dmd", filename="7.4.0_20190708000001", current_at="2019-07-08"
)
with patch("dmd.management.commands.import_dmd.Command.upload_to_bq"):
with patch("dmd.management.commands.import_dmd.notify_slack") as ns:
call_command("import_dmd")
ns.assert_not_called()
class TestGetCommonName(TestCase):
def test_common_name(self):
self._test_get_common_name(
[
"Zoledronic acid 4mg/100ml infusion bags",
"Zoledronic acid 4mg/100ml infusion bottles",
],
"Zoledronic acid 4mg/100ml infusion",
)
def test_no_common_name(self):
self._test_get_common_name(
["Lassar's paste", "Zinc and Salicylic acid paste"], None
)
def test_common_name_too_short(self):
self._test_get_common_name(
[
"Coal tar 10% / Salicylic acid 5% in Aqueous cream",
"Coal tar solution 10% / Salicylic acid 5% in Aqueous cream",
],
None,
)
def test_trailing_with_removed(self):
self._test_get_common_name(
[
"Polyfield Soft Vinyl Patient Pack with small gloves",
"Polyfield Soft Vinyl Patient Pack with medium gloves",
"Polyfield Soft Vinyl Patient Pack with large gloves",
],
"Polyfield Soft Vinyl Patient Pack",
)
def test_trailing_oral_removed(self):
self._test_get_common_name(
[
"Acetazolamide 350mg/5ml oral solution",
"Acetazolamide 350mg/5ml oral suspension",
],
"Acetazolamide 350mg/5ml",
)
def _test_get_common_name(self, names, exp_common_name):
self.assertEqual(get_common_name(names), exp_common_name)
|
93813
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from baselines.chac.utils import Base, hidden_init
class Critic(Base):
def __init__(self, env, level, n_levels, time_scale, device,
lr=0.001, gamma=0.98, hidden_size=64):
super(Critic, self).__init__()
self.gamma = gamma
self.q_limit = -time_scale
# Dimensions of goal placeholder will differ depending on layer level
goal_dim = env.end_goal_dim if level == n_levels - 1 else env.subgoal_dim
# Dimensions of action placeholder will differ depending on layer level
action_dim = env.action_dim if level == 0 else env.subgoal_dim
# Set parameters to give critic optimistic initialization near q_init
self.q_init = -0.067
self.q_offset = -torch.tensor([self.q_limit / self.q_init - 1]).log().to(device)
# Network layers
self.fc1 = nn.Linear(env.state_dim + action_dim + goal_dim, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.fc3 = nn.Linear(hidden_size, hidden_size)
self.fc4 = nn.Linear(hidden_size, 1)
self.critic_optimizer = optim.Adam(self.parameters(), lr)
self.mse_loss = nn.MSELoss()
self.reset_parameters()
def forward(self, state, goal, action):
x = torch.cat([state, action, goal], dim=1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
return torch.sigmoid(self.fc4(x) + self.q_offset) * self.q_limit
def reset_parameters(self):
self.fc1.weight.data.uniform_(*hidden_init(self.fc1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.fc3.weight.data.uniform_(*hidden_init(self.fc3))
self.fc4.weight.data.uniform_(-3e-3, 3e-3)
self.fc1.bias.data.uniform_(*hidden_init(self.fc1))
self.fc2.bias.data.uniform_(*hidden_init(self.fc2))
self.fc3.bias.data.uniform_(*hidden_init(self.fc3))
self.fc4.bias.data.uniform_(-3e-3, 3e-3)
def update(self, states, actions, rewards, new_states, goals, new_actions, done):
next_q = self(new_states, goals, new_actions)
target_q = rewards + (self.gamma * next_q * (1. - done)).detach()
current_q = self(states, goals, actions)
critic_loss = self.mse_loss(current_q, target_q)
self.critic_optimizer.zero_grad()
critic_loss.backward()
flat_grads = torch.cat([param.flatten() for _, param in self.named_parameters()])
self.critic_optimizer.step()
return {
'q_loss': critic_loss.item(),
'target_q': target_q.mean().item(),
'next_q': next_q.mean().item(),
'current_q': current_q.mean().item(),
'q_grads': flat_grads.mean().item(),
'q_grads_std': flat_grads.std().item(),
}
|
93853
|
from tornado.httputil import HTTPServerRequest
def query_argument_contains_string(request: HTTPServerRequest, query_argument_name: str, containing_value: str) -> bool:
return containing_value in str(request.query_arguments[query_argument_name][0])
|
93860
|
import base64
import json
import logging
import os
import pathlib
import re
import subprocess
import tempfile
import warnings
from collections import defaultdict
from typing import Dict, List, Optional, Set, Tuple
import backoff
import google.auth
import google.auth.exceptions
import google.auth.transport.requests
import requests
import yaml
from square.dtypes import (
Filepath, K8sClientCert, K8sConfig, K8sResource, MetaManifest,
)
FNAME_TOKEN = Filepath("/var/run/secrets/kubernetes.io/serviceaccount/token")
FNAME_CERT = Filepath("/var/run/secrets/kubernetes.io/serviceaccount/ca.crt")
# Convenience: global logger instance to avoid repetitive code.
logit = logging.getLogger("square")
def load_kubeconfig(fname: Filepath,
context: Optional[str]) -> Tuple[str, dict, dict, bool]:
"""Return user name and user- and cluster information.
Return None on error.
Inputs:
fname: str
Path to kubeconfig file, eg "~/.kube/config.yaml"
context: str
Kubeconf context. Use `None` to use default context.
Returns:
name, user info, cluster info
"""
# Load `kubeconfig`.
try:
kubeconf = yaml.safe_load(open(fname))
except (IOError, PermissionError) as err:
logit.error(f"{err}")
return ("", {}, {}, True)
# Find the user and cluster information based on the specified `context`.
try:
# Use default context unless specified.
ctx = context if context else kubeconf["current-context"]
del context
try:
# Find the correct context.
ctx = [_ for _ in kubeconf["contexts"] if _["name"] == ctx]
assert len(ctx) == 1
ctx = ctx[0]["context"]
# Unpack the cluster- and user name from the current context.
clustername, username = ctx["cluster"], ctx["user"]
# Find the information for the current cluster and user.
user_info = [_ for _ in kubeconf["users"] if _["name"] == username]
cluster_info = [_ for _ in kubeconf["clusters"] if _["name"] == clustername]
assert len(user_info) == len(cluster_info) == 1
except AssertionError:
logit.error(f"Could not find information for context <{ctx}>")
return ("", {}, {}, True)
# Unpack the cluster- and user information.
cluster_name = cluster_info[0]["name"]
cluster_info_out = cluster_info[0]["cluster"]
cluster_info_out["name"] = cluster_name
user_info = user_info[0]["user"]
del cluster_info
except (KeyError, TypeError):
logit.error(f"Kubeconfig YAML file <{fname}> is invalid")
return ("", {}, {}, True)
# Success. The explicit `dicts()` are to satisfy MyPy.
logit.info(f"Loaded {ctx} from Kubeconfig file <{fname}>")
return (username, dict(user_info), dict(cluster_info_out), False)
def load_incluster_config(
fname_token: Filepath = FNAME_TOKEN,
fname_cert: Filepath = FNAME_CERT) -> Tuple[K8sConfig, bool]:
"""Return K8s access config from Pod service account.
Returns None if we are not running in a Pod.
Inputs:
kubconfig: str
Name of kubeconfig file.
Returns:
Config
"""
# Every K8s pod has this.
server_ip = os.getenv('KUBERNETES_PORT_443_TCP_ADDR', None)
fname_cert = pathlib.Path(fname_cert)
fname_token = pathlib.Path(fname_token)
# Sanity checks: URL and service account files either exist, or we are not
# actually inside a Pod.
try:
assert server_ip is not None
assert fname_cert.exists()
assert fname_token.exists()
except AssertionError:
logit.debug("Could not find incluster (service account) credentials.")
return K8sConfig(), True
# Return the compiled K8s access configuration.
logit.info("Use incluster (service account) credentials.")
return K8sConfig(
url=f'https://{server_ip}',
token=fname_token.read_text(),
ca_cert=fname_cert,
client_cert=None,
version="",
name="",
), False
def load_gke_config(
fname: Filepath,
context: Optional[str],
disable_warnings: bool = False) -> Tuple[K8sConfig, bool]:
"""Return K8s access config for GKE cluster described in `kubeconfig`.
Returns None if `kubeconfig` does not exist or could not be parsed.
Inputs:
kubconfig: str
Name of kubeconfig file.
context: str
Kubeconf context. Use `None` to use default context.
disable_warnings: bool
Whether or not do disable GCloud warnings.
Returns:
Config
"""
# Parse the kubeconfig file.
name, user, cluster, err = load_kubeconfig(fname, context)
if err:
return (K8sConfig(), True)
# Unpack the self signed certificate (Google does not register the K8s API
# server certificate with a public CA).
try:
ssl_ca_cert_data = base64.b64decode(cluster["certificate-authority-data"])
except KeyError:
logit.debug(f"Context {context} in <{fname}> is not a GKE config")
return (K8sConfig(), True)
# Save the certificate to a temporary file. This is only necessary because
# the requests library will need a path to the CA file - unfortunately, we
# cannot just pass it the content.
_, tmp = tempfile.mkstemp(text=False)
ssl_ca_cert = Filepath(tmp)
ssl_ca_cert.write_bytes(ssl_ca_cert_data)
with warnings.catch_warnings(record=disable_warnings):
try:
cred, project_id = google.auth.default(
scopes=['https://www.googleapis.com/auth/cloud-platform']
)
cred.refresh(google.auth.transport.requests.Request())
token = cred.token
except google.auth.exceptions.DefaultCredentialsError as e:
logit.error(str(e))
return (K8sConfig(), True)
# Return the config data.
logit.info("Assuming GKE cluster.")
return K8sConfig(
url=cluster["server"],
token=<PASSWORD>,
ca_cert=ssl_ca_cert,
client_cert=None,
version="",
name=cluster["name"],
), False
def load_eks_config(
fname: Filepath,
context: Optional[str],
disable_warnings: bool = False) -> Tuple[K8sConfig, bool]:
"""Return K8s access config for EKS cluster described in `kubeconfig`.
Returns None if `kubeconfig` does not exist or could not be parsed.
Inputs:
fname: Filepath
Kubeconfig file.
context: str
Kubeconf context. Use `None` to use default context.
disable_warnings: bool
Whether or not do disable GCloud warnings.
Returns:
Config
"""
# Parse the kubeconfig file.
name, user, cluster, err = load_kubeconfig(fname, context)
if err:
return (K8sConfig(), True)
# Get a copy of all env vars. We will pass that one along to the
# sub-process, plus the env vars specified in the kubeconfig file.
env = os.environ.copy()
# Unpack the self signed certificate (AWS does not register the K8s API
# server certificate with a public CA).
try:
ssl_ca_cert_data = base64.b64decode(cluster["certificate-authority-data"])
cmd = user["exec"]["command"]
args = user["exec"]["args"]
env_kubeconf = user["exec"].get("env", [])
except KeyError:
logit.debug(f"Context {context} in <{fname}> is not an EKS config")
return (K8sConfig(), True)
# Convert a None value (valid value in YAML) to an empty list of env vars.
env_kubeconf = env_kubeconf if env_kubeconf else []
# Save the certificate to a temporary file. This is only necessary because
# the Requests library will need a path to the CA file - unfortunately, we
# cannot just pass it the content.
_, tmp = tempfile.mkstemp(text=False)
ssl_ca_cert = Filepath(tmp)
ssl_ca_cert.write_bytes(ssl_ca_cert_data)
# Compile the name, arguments and env vars for the command specified in kubeconf.
cmd_args = [cmd] + args
env_kubeconf = {_["name"]: _["value"] for _ in env_kubeconf}
env.update(env_kubeconf)
logit.debug(f"Requesting EKS certificate: {cmd_args} with envs: {env_kubeconf}")
# Pre-format the command for the log message.
log_cmd = (
f"kubeconf={fname} kubectx={context} "
f"cmd={cmd_args} env={env_kubeconf}"
)
# Run the specified command to produce the access token. That program must
# produce a YAML document on stdout that specifies the bearer token.
try:
out = subprocess.run(cmd_args, stdout=subprocess.PIPE, env=env)
token = yaml.safe_load(out.stdout.decode("utf8"))["status"]["token"]
except FileNotFoundError:
logit.error(f"Could not find {cmd} application to get token ({log_cmd})")
return (K8sConfig(), True)
except (KeyError, yaml.YAMLError):
logit.error(f"Token manifest produce by {cmd_args} is corrupt ({log_cmd})")
return (K8sConfig(), True)
except TypeError:
logit.error(f"The YAML token produced by {cmd_args} is corrupt ({log_cmd})")
return (K8sConfig(), True)
# Return the config data.
logit.info("Assuming EKS cluster.")
return K8sConfig(
url=cluster["server"],
token=token,
ca_cert=ssl_ca_cert,
client_cert=None,
version="",
name=cluster["name"],
), False
def load_minikube_config(fname: Filepath,
context: Optional[str]) -> Tuple[K8sConfig, bool]:
"""Load minikube configuration from `fname`.
Return None on error.
Inputs:
kubconfig: str
Path to kubeconfig file, eg "~/.kube/config.yaml"
context: str
Kubeconf context. Use `None` to use default context.
Returns:
Config
"""
# Parse the kubeconfig file.
name, user, cluster, err = load_kubeconfig(fname, context)
if err:
return (K8sConfig(), True)
# Minikube uses client certificates to authenticate. We need to pass those
# to the HTTP client of our choice when we create the session.
try:
client_cert = K8sClientCert(
crt=user["client-certificate"],
key=user["client-key"],
)
# Return the config data.
logit.info("Assuming Minikube cluster.")
return K8sConfig(
url=cluster["server"],
token="",
ca_cert=cluster["certificate-authority"],
client_cert=client_cert,
version="",
name=cluster["name"],
), False
except KeyError:
logit.debug(f"Context {context} in <{fname}> is not a Minikube config")
return (K8sConfig(), True)
def load_kind_config(fname: Filepath, context: Optional[str]) -> Tuple[K8sConfig, bool]:
"""Load Kind configuration from `fname`.
https://github.com/bsycorp/kind
Kind is just another Minikube cluster. The only notable difference
is that it does not store its credentials as files but directly in
the Kubeconfig file. This function will copy those files into /tmp.
Return None on error.
Inputs:
kubconfig: str
Path to kubeconfig for Kind cluster.
context: str
Kubeconf context. Use `None` to use default context.
Returns:
Config
"""
# Parse the kubeconfig file.
name, user, cluster, err = load_kubeconfig(fname, context)
if err:
return (K8sConfig(), True)
# Kind and Minikube use client certificates to authenticate. We need to
# pass those to the HTTP client of our choice when we create the session.
try:
client_crt = base64.b64decode(user["client-certificate-data"]).decode()
client_key = base64.b64decode(user["client-key-data"]).decode()
client_ca = base64.b64decode(cluster["certificate-authority-data"]).decode()
path = Filepath(tempfile.mkdtemp())
p_client_crt = path / "kind-client.crt"
p_client_key = path / "kind-client.key"
p_ca = path / "kind.ca"
p_client_crt.write_text(client_crt)
p_client_key.write_text(client_key)
p_ca.write_text(client_ca)
client_cert = K8sClientCert(crt=p_client_crt, key=p_client_key)
# Return the config data.
logit.debug("Assuming Minikube/Kind cluster.")
return K8sConfig(
url=cluster["server"],
token="",
ca_cert=p_ca,
client_cert=client_cert,
version="",
name=cluster["name"],
), False
except KeyError:
logit.debug(f"Context {context} in <{fname}> is not a Minikube config")
return (K8sConfig(), True)
def load_auto_config(
fname: Filepath,
context: Optional[str],
disable_warnings: bool = False) -> Tuple[K8sConfig, bool]:
"""Automagically find and load the correct K8s configuration.
This function will load several possible configuration options and returns
the first one with a match. The order is as follows:
1) `load_incluster_config`
2) `load_gke_config`
Inputs:
fname: str
Path to kubeconfig file, eg "~/.kube/config.yaml"
Use `None` to find out automatically or for incluster credentials.
context: str
Kubeconf context. Use `None` to use default context.
Returns:
Config
"""
conf, err = load_incluster_config()
if not err:
return conf, False
logit.debug("Incluster config failed")
conf, err = load_minikube_config(fname, context)
if not err:
return conf, False
logit.debug("Minikube config failed")
conf, err = load_kind_config(fname, context)
if not err:
return conf, False
logit.debug("KIND config failed")
conf, err = load_eks_config(fname, context, disable_warnings)
if not err:
return conf, False
logit.debug("EKS config failed")
conf, err = load_gke_config(fname, context, disable_warnings)
if not err:
return conf, False
logit.debug("GKE config failed")
logit.error(f"Could not find a valid configuration in <{fname}>")
return (K8sConfig(), True)
def session(k8sconfig: K8sConfig):
"""Return configured `requests` session."""
# Plain session.
sess = requests.Session()
# Load the CA file (necessary for self signed certs to avoid https warning).
sess.verify = str(k8sconfig.ca_cert)
# Add the client certificate, if the cluster uses those to authenticate users.
if k8sconfig.client_cert is not None:
sess.cert = (str(k8sconfig.client_cert.crt), str(k8sconfig.client_cert.key))
# Add the bearer token if this cluster uses them to authenticate users.
if k8sconfig.token is not None:
sess.headers.update({'authorization': f'Bearer {k8sconfig.token}'})
# Return the configured session object.
return sess
def resource(k8sconfig: K8sConfig, meta: MetaManifest) -> Tuple[K8sResource, bool]:
"""Return `K8sResource` object.
That object will contain the full path to a resource, eg.
https://1.2.3.4/api/v1/namespace/foo/services.
Inputs:
k8sconfig: K8sConfig
meta: MetaManifest
Returns:
K8sResource
"""
err_resp = (K8sResource("", "", "", False, ""), True)
# Compile the lookup key for the resource, eg `("Service", "v1")`.
if not meta.apiVersion:
# Use the most recent version of the API if None was specified.
candidates = [(kind, ver) for kind, ver in k8sconfig.apis if kind == meta.kind]
if len(candidates) == 0:
logit.warning(f"Cannot determine API version for <{meta.kind}>")
return err_resp
candidates.sort()
key = candidates.pop(0)
else:
key = (meta.kind, meta.apiVersion)
# Retrieve the resource.
try:
resource = k8sconfig.apis[key]
except KeyError:
logit.error(f"Unsupported resource <{meta.kind}> {key}.")
return err_resp
# Void the "namespace" key for non-namespaced resources.
if not resource.namespaced:
meta = meta._replace(namespace=None)
# Namespaces are special because they lack the `namespaces/` path prefix.
if meta.kind == "Namespace":
# Return the correct URL, depending on whether we want all namespaces
# or a particular one.
url = f"{resource.url}/namespaces"
if meta.name:
url += f"/{meta.name}"
return resource._replace(url=url), False
# Determine if the prefix for namespaced resources.
if meta.namespace is None:
namespace = ""
else:
# Namespace name must conform to K8s standards.
match = re.match(r"[a-z0-9]([-a-z0-9]*[a-z0-9])?", meta.namespace)
if match is None or match.group() != meta.namespace:
logit.error(f"Invalid namespace name <{meta.namespace}>.")
return err_resp
namespace = f"namespaces/{meta.namespace}"
# Sanity check: we cannot search for a namespaced resource by name in all
# namespaces. Example: we cannot search for a Service `foo` in all
# namespaces. We could only search for Service `foo` in namespace `bar`, or
# all services in all namespaces.
if resource.namespaced and meta.name and not meta.namespace:
logit.error(f"Cannot search for {meta.kind} {meta.name} in {meta.namespace}")
return err_resp
# Create the full path to the resource depending on whether we have a
# namespace and resource name. Here are all three possibilities:
# - /api/v1/namespaces/services
# - /api/v1/namespaces/my-namespace/services
# - /api/v1/namespaces/my-namespace/services/my-service
path = f"{namespace}/{resource.name}" if namespace else resource.name
path = f"{path}/{meta.name}" if meta.name else path
# The concatenation above may have introduced `//`. Here we remove them.
path = path.replace("//", "/")
# Return the K8sResource with the correct URL.
resource = resource._replace(url=f"{resource.url}/{path}")
return resource, False
def request(
client,
method: str,
url: str,
payload: Optional[dict],
headers: Optional[dict]) -> Tuple[dict, bool]:
"""Return response of web request made with `client`.
Inputs:
client: `requests` session with correct K8s certificates.
url: str
Eg `https://1.2.3.4/api/v1/namespaces`)
payload: dict
Anything that can be JSON encoded, usually a K8s manifest.
headers: dict
Request headers. These will *not* replace the existing request
headers dictionary (eg the access tokens), but augment them.
Returns:
(dict, int): the JSON response and the HTTP status code.
"""
# Define the maximum number of tries and exceptions we want to retry on.
max_tries = 21
web_exceptions = (requests.exceptions.RequestException, )
def on_backoff(details):
"""Log a warning whenever we retry."""
tries = details["tries"]
logit.warning(f"Backing off on {url} (Attempt {tries}/{max_tries-1})")
"""
Use linear backoff. The backoff is not exponential because the most
prevalent use case for this backoff we have seen so far is to wait for
new resource endpoints to become available. These may take a few seconds,
or tens of seconds to do so. If we used an exponential strategy we may
end up waiting for a very long time for no good reason. The time between
backoffs is fairly large to avoid hammering the API. Jitter is disabled
because the irregular intervals are irritating in an interactive tool.
"""
@backoff.on_exception(backoff.constant, web_exceptions,
max_tries=max_tries,
interval=3,
max_time=20,
on_backoff=on_backoff,
logger=None,
jitter=None,
)
def _call(*args, **kwargs):
return client.request(method, url, json=payload, headers=headers, timeout=30)
# Make the web request via our backoff/retry handler.
try:
ret = _call()
except web_exceptions as err:
logit.error(f"{err} ({method} {url})")
return ({}, True)
try:
response = ret.json()
except json.decoder.JSONDecodeError as err:
msg = (
f"JSON error: {err.msg} in line {err.lineno} column {err.colno}",
"-" * 80 + "\n" + err.doc + "\n" + "-" * 80,
)
logit.error(str.join("\n", msg))
return ({}, True)
# Log the entire request in debug mode.
logit.debug(
f"{method} {ret.status_code} {ret.url}\n"
f"Headers: {headers}\n"
f"Payload: {payload}\n"
f"Response: {response}\n"
)
return (response, ret.status_code)
def delete(client, url: str, payload: dict) -> Tuple[dict, bool]:
"""Make DELETE requests to K8s (see `k8s_request`)."""
resp, code = request(client, 'DELETE', url, payload, headers=None)
err = (code not in (200, 202))
if err:
logit.error(f"{code} - DELETE - {url} - {resp}")
return (resp, err)
def get(client, url: str) -> Tuple[dict, bool]:
"""Make GET requests to K8s (see `request`)."""
resp, code = request(client, 'GET', url, payload=None, headers=None)
err = (code != 200)
if err:
logit.error(f"{code} - GET - {url} - {resp}")
return (resp, err)
def patch(client, url: str, payload: dict) -> Tuple[dict, bool]:
"""Make PATCH requests to K8s (see `request`)."""
headers = {'Content-Type': 'application/json-patch+json'}
resp, code = request(client, 'PATCH', url, payload, headers)
err = (code != 200)
if err:
logit.error(f"{code} - PATCH - {url} - {resp}")
return (resp, err)
def post(client, url: str, payload: dict) -> Tuple[dict, bool]:
"""Make POST requests to K8s (see `request`)."""
resp, code = request(client, 'POST', url, payload, headers=None)
err = (code != 201)
if err:
logit.error(f"{code} - POST - {url} - {resp}")
return (resp, err)
def version(k8sconfig: K8sConfig) -> Tuple[K8sConfig, bool]:
"""Return new `k8sconfig` with version number of K8s API.
Contact the K8s API, query its version via `client` and return `k8sconfig`
with an updated `version` field. All other field in `k8sconfig` will remain
intact.
Inputs:
k8sconfig: K8sConfig
Returns:
K8sConfig
"""
# Ask the K8s API for its version and check for errors.
url = f"{k8sconfig.url}/version"
resp, err = get(k8sconfig.client, url)
if err or resp is None:
logit.error(f"Could not interrogate {k8sconfig.name} ({url})")
return (K8sConfig(), True)
# Construct the version number of the K8s API.
major, minor = resp['major'], resp['minor']
version = f"{major}.{minor}"
# If we are talking to GKE, the version string may now be "1.10+". It
# simply indicates that GKE is running version 1.10.x. We need to remove
# the "+" because the version string is important in `square`, for instance
# to determines which URLs to contact, which fields are valid.
version = version.replace("+", "")
# Return an updated `K8sconfig` tuple.
k8sconfig = k8sconfig._replace(version=version)
return (k8sconfig, False)
def cluster_config(
kubeconfig: Filepath,
context: Optional[str]) -> Tuple[K8sConfig, bool]:
"""Return web session to K8s API.
This will read the Kubernetes credentials, contact Kubernetes to
interrogate its version and then return the configuration and web-session.
Inputs:
kubeconfig: str
Path to kubeconfig file.
context: str
Kubernetes context to use (can be `None` to use default).
Returns:
K8sConfig
"""
# Read Kubeconfig file and use it to create a `requests` client session.
# That session will have the proper security certificates and headers so
# that subsequent calls to K8s need not deal with it anymore.
kubeconfig = kubeconfig.expanduser()
try:
# Parse Kubeconfig file.
k8sconfig, err = load_auto_config(kubeconfig, context, disable_warnings=True)
assert not err
# Configure web session.
k8sconfig = k8sconfig._replace(client=session(k8sconfig))
assert k8sconfig.client
# Contact the K8s API to update version field in `k8sconfig`.
k8sconfig, err = version(k8sconfig)
assert not err and k8sconfig
# Populate the `k8sconfig.apis` field.
err = compile_api_endpoints(k8sconfig)
assert not err
except AssertionError:
return (K8sConfig(), True)
# Log the K8s API address and version.
logit.info(f"Kubernetes server at {k8sconfig.url}")
logit.info(f"Kubernetes version is {k8sconfig.version}")
return (k8sconfig, False)
def parse_api_group(api_version, url, resp) -> Tuple[List[K8sResource], Dict[str, str]]:
"""Compile the K8s API `resp` into a `K8sResource` tuples.
The `resp` is the verbatim response from the K8s API group regarding the
resources it provides. Here we compile those into `K8sResource` tuples iff
they meet the criteria to be manageable by Square. These criteria are, most
notably, the ability to create, get, patch and delete the resource.
Also return a LUT to convert short names like "svc" into the proper resource
kind "Service".
"""
resources = resp["resources"]
def valid(_res):
"""Return `True` if `res` describes a Square compatible resource."""
# Convenience.
name = _res["name"]
verbs = list(sorted(_res["verbs"]))
# Ignore resource like "services/status". We only care for "services".
if "/" in name:
logit.debug(f"Ignore resource <{name}>: has a slash ('/') in its name")
return False
# Square can only man age the resource if it can be read, modified and
# deleted. Here we check if `res` has the respective verbs.
minimal_verbs = {"create", "delete", "get", "list", "patch", "update"}
if not minimal_verbs.issubset(set(verbs)):
logit.debug(f"Ignore resource <{name}>: insufficient verbs: {verbs}")
return False
return True
# Compile the K8s resource definition into a `K8sResource` structure if it
# is compatible with Square (see `valid` helper above).
group_urls: List[K8sResource]
group_urls = [
K8sResource(api_version, _["kind"], _["name"], _["namespaced"], url)
for _ in resources if valid(_)
]
# Compile LUT to translate short names into their proper resource
# kind: Example short2kind = {"service":, "Service", "svc": "Service"}
short2kind: Dict[str, str] = {}
for res in resources:
kind = res["kind"]
short2kind[kind.lower()] = kind
short2kind[res["name"]] = kind
for short_name in res.get("shortNames", []):
short2kind[short_name] = kind
return (group_urls, short2kind)
def compile_api_endpoints(k8sconfig: K8sConfig) -> bool:
"""Populate `k8sconfig.apis` with all the K8s endpoints`.
NOTE: This will purge the existing content in `k8sconfig.apis`.
Returns a dictionary like the following:
{
('ConfigMap', 'v1'): K8sResource(
apiVersion=v1, kind='ConfigMap', name='configmaps', namespaced=True,
url='https://localhost:8443/api/v1/configmaps'),
('CronJob', 'batch/v1beta1): K8sResource(
apiVersion='batch/v1beta1', kind='CronJob', name='cronjobs', namespaced=True,
url='https://localhost:8443/apis/batch/v1beta1/cronjobs'),
('DaemonSet', 'apps/v1'): K8sResource(
apiVersion='apps/v1', kind='DaemonSet', name='daemonsets', namespaced=True,
url='https://localhost:8443/apis/apps/v1/daemonsets',
('DaemonSet', apps/v1beta1): K8sResource(
apiVersion='apps/v1beta1', kind='DaemonSet', name='daemonsets', namespaced=True,
url='https://localhost:8443/apis/extensions/v1beta1/daemonsets'),
}
Inputs:
k8sconfig: K8sConfig
"""
# Compile the list of all K8s API groups that this K8s instance knows about.
resp, err = get(k8sconfig.client, f"{k8sconfig.url}/apis")
if err:
logit.error(f"Could not interrogate {k8sconfig.name} ({k8sconfig.url}/apis)")
return True
# Compile the list of all API groups and their endpoints. Example
# apigroups = {
# 'extensions': {('extensions/v1beta1', 'apis/extensions/v1beta1')},
# 'apps': {('apps/v1', 'apis/apps/v1'),
# ('apps/v1beta1', 'apis/apps/v1beta1'),
# ('apps/v1beta2', 'apis/apps/v1beta2')},
# 'batch': {('batch/v1', 'apis/batch/v1'),
# ('batch/v1beta1', 'apis/batch/v1beta1')},
# ...
# }
apigroups: Dict[str, Set[Tuple[str, str]]] = {}
preferred_group: Dict[str, str] = {}
for group in resp["groups"]:
name = group["name"]
# Store the preferred version, eg ("", "apis/v1").
apigroups[name] = set()
# Compile all alternative versions into the same set.
for version in group["versions"]:
ver = version["groupVersion"]
apigroups[name].add((ver, f"apis/{ver}"))
preferred_group[ver] = group["preferredVersion"]["groupVersion"]
del group
# The "v1" group comprises the traditional core components like Service and
# Pod. This group is a special case and exposed under "api/v1" instead
# of the usual `apis/...` path.
apigroups["v1"] = {("v1", "api/v1")}
preferred_group["v1"] = "v1"
# Contact K8s to find out which resources each API group offers.
# This will produce the following group_urls below (K = `K8sResource`): {
# ('apps', 'apps/v1', 'apis/apps/v1'): [
# K(*, kind='DaemonSet', name='daemonsets', namespaced=True, url='apis/apps/v1'),
# K(*, kind='Deployment', name='deployments', namespaced=True, url='apis/apps/v1'),
# K(*, kind='ReplicaSet', name='replicasets', namespaced=True, url='apis/apps/v1'),
# K(*, kind='StatefulSet', name='statefulsets', namespaced=True, url='apis/apps/v1')
# ],
# ('apps', 'apps/v1beta1', 'apis/apps/v1beta1')': [
# K(..., kind='Deployment', name='deployments', namespaced=True, url=...),
# K(..., kind='StatefulSet', name='statefulsets', namespaced=True, url=...)
# ],
# }
group_urls: Dict[Tuple[str, str, str], List[K8sResource]] = {}
for group_name, ver_url in apigroups.items():
for api_version, url in ver_url:
resp, err = get(k8sconfig.client, f"{k8sconfig.url}/{url}")
if err:
msg = f"Could not interrogate {k8sconfig.name} ({k8sconfig.url}/{url})"
logit.error(msg)
return True
data, short2kind = parse_api_group(api_version, url, resp)
group_urls[(group_name, api_version, url)] = data
k8sconfig.short2kind.update(short2kind)
# Produce the entries for `K8sConfig.apis` as described in the doc string.
k8sconfig.apis.clear()
default: Dict[str, Set[K8sResource]] = defaultdict(set)
for (group_name, api_version, url), resources in group_urls.items():
for res in resources:
key = (res.kind, res.apiVersion) # fixme: define namedtuple
k8sconfig.apis[key] = res._replace(url=f"{k8sconfig.url}/{res.url}")
if preferred_group[api_version] == api_version:
default[res.kind].add(k8sconfig.apis[key])
k8sconfig.apis[(res.kind, "")] = k8sconfig.apis[key]
# Determine the default API endpoint Square should query for each resource.
for kind, resources in default.items():
# Happy case: the resource is only available from a single API group.
if len(resources) == 1:
k8sconfig.apis[(kind, "")] = resources.pop()
continue
# If we get here then it means a resource is available from different
# API groups. Here we use heuristics to pick one. The heuristic is
# simply to look for one that is neither alpha nor beta. In Kubernetes
# v1.15 this resolves almost all disputes.
all_apis = list(sorted([_.apiVersion for _ in resources]))
# Remove all alpha/beta resources.
prod_apis = [_ for _ in all_apis if not ("alpha" in _ or "beta" in _)]
# Re-add the alpha/beta resources to the candidate set if we have no
# production ones to choose from.
apis = prod_apis if len(prod_apis) > 0 else list(all_apis)
# Pick the one with probably the highest version number.
apis.sort()
version = apis.pop()
k8sconfig.apis[(kind, "")] = [_ for _ in resources if _.apiVersion == version][0]
# Log the available options. Mark the one Square chose with a "*".
tmp = [_ if _ != version else f"*{_}*" for _ in all_apis]
logit.info(f"Ambiguous {kind.upper()} endpoints: {tmp}")
# Compile the set of all resource kinds that this Kubernetes cluster supports.
for kind, _ in k8sconfig.apis:
k8sconfig.kinds.add(kind)
return False
|
93862
|
from __future__ import print_function
def josephus(list_of_players, step):
#skipdeadguy
step -= 1
index = step
while len(list_of_players) > 1:
print("Player Died : " , list_of_players.pop(index))
index = (index + step) % len(list_of_players)
print('Player Survived : ', list_of_players[0])
def main():
print("[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15], 5")
josephus([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15], 5)
if __name__ == "__main__":
main()
|
93888
|
from __future__ import absolute_import, division, print_function
from mmtbx.geometry import topology
import unittest
class TestAtom(unittest.TestCase):
def test_1(self):
foo = object()
bar = object()
a = topology.Atom( foo = foo, bar = bar )
self.assertEqual( a.foo, foo )
self.assertEqual( a.bar, bar )
class TestMolecule(unittest.TestCase):
def test_0(self):
m = topology.Molecule()
self.assertEqual( m.size(), 0 )
self.assertEqual( m.atoms, [] )
self.assertEqual( m.atom_for, {} )
self.assertEqual( m.descriptor_for, {} )
self.assertEqual( list( m.graph.vertices() ), [] )
self.assertEqual( list( m.graph.edges() ), [] )
def test_1(self):
m = topology.Molecule()
a = topology.Atom()
m.add( atom = a, xyz = ( 0, 0, 0 ) )
self.assertEqual( m.size(), 1 )
self.assertEqual( m.atoms, [ a ] )
self.assertEqual( len( m.atom_for ), 1 )
self.assertTrue( a in m.atom_for.values() )
self.assertEqual( len( m.descriptor_for ), 1 )
self.assertTrue( a in m.descriptor_for )
self.assertEqual( len( list( m.graph.vertices() ) ), 1 )
self.assertEqual( list( m.graph.edges() ), [] )
def test_2(self):
m = topology.Molecule()
a1 = topology.Atom()
a2 = topology.Atom()
m.add( atom = a1, xyz = ( 0, 0, 0 ) )
m.add( atom = a2, xyz = ( 1, 1, 1 ) )
self.assertEqual( m.size(), 2 )
self.assertEqual( set( m.atoms ), set( [ a1, a2 ] ) )
self.assertEqual( len( m.atom_for ), 2 )
self.assertTrue( a1 in m.atom_for.values() )
self.assertTrue( a2 in m.atom_for.values() )
self.assertEqual( len( m.descriptor_for ), 2 )
self.assertTrue( a1 in m.descriptor_for )
self.assertTrue( a2 in m.descriptor_for )
self.assertEqual( len( list( m.graph.vertices() ) ), 2 )
self.assertEqual( len( list( m.graph.edges() ) ), 1 )
edge = next(m.graph.edges())
self.assertAlmostEqual( m.graph.edge_weight( edge = edge ), 1.73205, 5 )
class TestCompound(unittest.TestCase):
def test_0(self):
m = topology.Compound.create()
self.assertEqual( m.atoms, [] )
self.assertEqual( m.atom_for, {} )
self.assertEqual( m.descriptor_for, {} )
self.assertEqual( list( m.graph.vertices() ), [] )
self.assertEqual( list( m.graph.edges() ), [] )
def test_1(self):
m = topology.Compound.create()
a = topology.Atom()
m.add_atom( atom = a )
self.assertEqual( m.atoms, [ a ] )
self.assertEqual( len( m.atom_for ), 1 )
self.assertTrue( a in m.atom_for.values() )
self.assertEqual( len( m.descriptor_for ), 1 )
self.assertTrue( a in m.descriptor_for )
self.assertEqual( len( list( m.graph.vertices() ) ), 1 )
self.assertEqual( list( m.graph.edges() ), [] )
self.assertEqual( m.distances_from( atom = a ), { a: 0 } )
self.assertEqual( m.connected_segments(), [ [ a ] ] )
def test_2(self):
m = topology.Compound.create()
a1 = topology.Atom()
a2 = topology.Atom()
m.add_atom( atom = a1 )
m.add_atom( atom = a2 )
self.assertEqual( set( m.atoms ), set( [ a1, a2 ] ) )
self.assertEqual( len( m.atom_for ), 2 )
self.assertTrue( a1 in m.atom_for.values() )
self.assertTrue( a2 in m.atom_for.values() )
self.assertEqual( len( m.descriptor_for ), 2 )
self.assertTrue( a1 in m.descriptor_for )
self.assertTrue( a2 in m.descriptor_for )
self.assertEqual( len( list( m.graph.vertices() ) ), 2 )
self.assertEqual( len( list( m.graph.edges() ) ), 0 )
self.assertEqual( m.distances_from( atom = a1 ), { a1: 0, a2: None } )
self.assertEqual( m.distances_from( atom = a2 ), { a2: 0, a1: None } )
self.assertEqual(
set( frozenset( s ) for s in m.connected_segments() ),
set( [ frozenset( [ a1 ] ), frozenset( [ a2 ] ) ] ),
)
m.add_bond( left = a1, right = a2 )
self.assertEqual( len( list( m.graph.vertices() ) ), 2 )
self.assertEqual( len( list( m.graph.edges() ) ), 1 )
self.assertEqual( m.distances_from( atom = a1 ), { a1: 0, a2: 1 } )
self.assertEqual( m.distances_from( atom = a2 ), { a2: 0, a1: 1 } )
self.assertEqual(
set( frozenset( s ) for s in m.connected_segments() ),
set( [ frozenset( [ a1, a2 ] ) ] ),
)
ss1 = m.subset( atoms = [ a1 ] )
self.assertEqual( len( ss1.atom_for ), 1 )
self.assertTrue( a1 in ss1.atom_for.values() )
self.assertEqual( len( ss1.descriptor_for ), 1 )
self.assertTrue( a1 in ss1.descriptor_for )
self.assertEqual( len( list( ss1.graph.vertices() ) ), 1 )
self.assertEqual( len( list( ss1.graph.edges() ) ), 0 )
ss2 = m.subset( atoms = [ a2 ] )
self.assertEqual( len( ss2.atom_for ), 1 )
self.assertTrue( a2 in ss2.atom_for.values() )
self.assertEqual( len( ss2.descriptor_for ), 1 )
self.assertTrue( a2 in ss2.descriptor_for )
self.assertEqual( len( list( ss2.graph.vertices() ) ), 1 )
self.assertEqual( len( list( ss2.graph.edges() ) ), 0 )
def test_3(self):
atoms = [
topology.Atom( name = "N", element = "N", xyz = ( 11.498, 10.510, 10.231 ) ),
topology.Atom( name = "CA", element = "C", xyz = ( 12.730, 11.073, 10.769 ) ),
topology.Atom( name = "C", element = "C", xyz = ( 13.674, 9.966, 11.221 ) ),
topology.Atom( name = "O", element = "O", xyz = ( 13.739, 8.902, 10.605 ) ),
topology.Atom( name = "CB", element = "C", xyz = ( 12.421, 12.004, 11.944 ) ),
topology.Atom( name = "CG", element = "C", xyz = ( 11.478, 13.179, 11.661 ) ),
topology.Atom( name = "CD1", element = "C", xyz = ( 11.043, 13.834, 12.963 ) ),
topology.Atom( name = "CD2", element = "C", xyz = ( 12.126, 14.201, 10.736 ) ),
]
compound = topology.Compound.from_structure( atoms = atoms, tolerance = 0.1 )
self.assertEqual(
set( frozenset( [ l.name, r.name ] ) for ( l, r ) in compound.bonds ),
set(
[ frozenset( [ "N", "CA" ] ), frozenset( [ "CA", "C" ] ),
frozenset( [ "C", "O" ] ), frozenset( [ "CA", "CB" ] ),
frozenset( [ "CB", "CG" ] ), frozenset( [ "CG", "CD1" ] ),
frozenset( [ "CG", "CD2" ] ),
]
)
)
class TestMcGregorMatch(unittest.TestCase):
def test_asn_leu(self):
l_ca = topology.Atom( label = "CA" )
l_cb = topology.Atom( label = "C" )
l_cg = topology.Atom( label = "C" )
l_cd1 = topology.Atom( label = "C" )
l_cd2 = topology.Atom( label = "C" )
leu = topology.Molecule()
leu.add( atom = l_ca, xyz = ( -1.0085, -0.590773, 0.814318 ) )
leu.add( atom = l_cb, xyz = ( 0.0275, -0.557773, -0.314682 ) )
leu.add( atom = l_cg, xyz = ( 1.2335, 0.374227, -0.138682 ) )
leu.add( atom = l_cd1, xyz = ( 2.3065, 0.046227, -1.16768 ) )
leu.add( atom = l_cd2, xyz = ( 0.8395, 1.84323, -0.230682 ) )
a_ca = topology.Atom( label = "CA" )
a_cb = topology.Atom( label = "C" )
a_cg = topology.Atom( label = "C" )
a_od1 = topology.Atom( label = "C" )
a_nd2 = topology.Atom( label = "C" )
asn = topology.Molecule()
asn.add( atom = a_ca, xyz = ( -1.03327, -0.544348, 0.860946 ) )
asn.add( atom = a_cb, xyz = ( 0.10486, -0.548357, -0.164901 ) )
asn.add( atom = a_cg, xyz = ( 0.990984, 0.682823, -0.070521 ) )
asn.add( atom = a_od1, xyz = ( 1.39496, 1.24684, -1.08724 ) )
asn.add( atom = a_nd2, xyz = ( 1.29745, 1.10599, 1.15228 ) )
res = topology.McGregorMatch(
molecule1 = leu,
molecule2 = asn,
is_valid = lambda match: any( m.label == "CA" for m in match ),
vertex_equality = lambda l, r: l.label == r.label,
edge_equality = lambda l, r: abs( l - r ) < 0.1
)
self.assertEqual( res.length(), 3 )
mapping = res.remapped()
self.assertTrue( ( l_ca, a_ca ) in mapping )
self.assertTrue( ( l_cb, a_cb ) in mapping )
self.assertTrue( ( l_cg, a_cg ) in mapping )
self.assertTrue( ( l_cd1, a_od1 ) not in mapping )
class TestRascalMatch(unittest.TestCase):
def test_asn_leu(self):
l_ca = topology.Atom( label = "CA" )
l_cb = topology.Atom( label = "C" )
l_cg = topology.Atom( label = "C" )
l_cd1 = topology.Atom( label = "C" )
l_cd2 = topology.Atom( label = "C" )
leu = topology.Molecule()
leu.add( atom = l_ca, xyz = ( -1.0085, -0.590773, 0.814318 ) )
leu.add( atom = l_cb, xyz = ( 0.0275, -0.557773, -0.314682 ) )
leu.add( atom = l_cg, xyz = ( 1.2335, 0.374227, -0.138682 ) )
leu.add( atom = l_cd1, xyz = ( 2.3065, 0.046227, -1.16768 ) )
leu.add( atom = l_cd2, xyz = ( 0.8395, 1.84323, -0.230682 ) )
a_ca = topology.Atom( label = "CA" )
a_cb = topology.Atom( label = "C" )
a_cg = topology.Atom( label = "C" )
a_od1 = topology.Atom( label = "C" )
a_nd2 = topology.Atom( label = "C" )
asn = topology.Molecule()
asn.add( atom = a_ca, xyz = ( -1.03327, -0.544348, 0.860946 ) )
asn.add( atom = a_cb, xyz = ( 0.10486, -0.548357, -0.164901 ) )
asn.add( atom = a_cg, xyz = ( 0.990984, 0.682823, -0.070521 ) )
asn.add( atom = a_od1, xyz = ( 1.39496, 1.24684, -1.08724 ) )
asn.add( atom = a_nd2, xyz = ( 1.29745, 1.10599, 1.15228 ) )
m = topology.RascalMatch(
molecule1 = leu,
molecule2 = asn,
vertex_equality = lambda l, r: l.label == r.label,
edge_equality = lambda l, r: abs( l - r ) <= 0.1,
)
self.assertEqual( m.count(), 1 )
self.assertEqual( m.length(), 3 )
mapping = m.remapped()[0]
self.assertEqual( len( mapping ), 3 )
self.assertTrue( ( l_ca, a_ca ) in mapping )
self.assertTrue( ( l_cb, a_cb ) in mapping )
self.assertTrue( ( l_cg, a_cg ) in mapping )
self.assertTrue( ( l_cd1, a_od1 ) not in mapping )
class TestGreedyMatch(unittest.TestCase):
def test_asn_leu(self):
l_ca = topology.Atom( label = "CA" )
l_cb = topology.Atom( label = "C" )
l_cg = topology.Atom( label = "C" )
l_cd1 = topology.Atom( label = "C" )
l_cd2 = topology.Atom( label = "C" )
leu = topology.Molecule()
leu.add( atom = l_ca, xyz = ( -1.0085, -0.590773, 0.814318 ) )
leu.add( atom = l_cb, xyz = ( 0.0275, -0.557773, -0.314682 ) )
leu.add( atom = l_cg, xyz = ( 1.2335, 0.374227, -0.138682 ) )
leu.add( atom = l_cd1, xyz = ( 2.3065, 0.046227, -1.16768 ) )
leu.add( atom = l_cd2, xyz = ( 0.8395, 1.84323, -0.230682 ) )
a_ca = topology.Atom( label = "CA" )
a_cb = topology.Atom( label = "C" )
a_cg = topology.Atom( label = "C" )
a_od1 = topology.Atom( label = "C" )
a_nd2 = topology.Atom( label = "C" )
asn = topology.Molecule()
asn.add( atom = a_ca, xyz = ( -1.03327, -0.544348, 0.860946 ) )
asn.add( atom = a_cb, xyz = ( 0.10486, -0.548357, -0.164901 ) )
asn.add( atom = a_cg, xyz = ( 0.990984, 0.682823, -0.070521 ) )
asn.add( atom = a_od1, xyz = ( 1.39496, 1.24684, -1.08724 ) )
asn.add( atom = a_nd2, xyz = ( 1.29745, 1.10599, 1.15228 ) )
m = topology.GreedyMatch(
molecule1 = leu,
molecule2 = asn,
vertex_equality = lambda l, r: l.label == r.label,
edge_equality = lambda l, r: abs( l - r ) <= 0.1,
)
self.assertEqual( m.count(), 1 )
self.assertEqual( m.length(), 3 )
mapping = m.remapped()[0]
self.assertEqual( len( mapping ), 3 )
self.assertTrue( ( l_ca, a_ca ) in mapping )
self.assertTrue( ( l_cb, a_cb ) in mapping )
self.assertTrue( ( l_cg, a_cg ) in mapping )
self.assertTrue( ( l_cd1, a_od1 ) not in mapping )
suite_atom = unittest.TestLoader().loadTestsFromTestCase(
TestAtom
)
suite_molecule = unittest.TestLoader().loadTestsFromTestCase(
TestMolecule
)
suite_compound = unittest.TestLoader().loadTestsFromTestCase(
TestCompound
)
suite_mcgregor_match = unittest.TestLoader().loadTestsFromTestCase(
TestMcGregorMatch
)
suite_rascal_match= unittest.TestLoader().loadTestsFromTestCase(
TestRascalMatch
)
suite_greedy_match= unittest.TestLoader().loadTestsFromTestCase(
TestGreedyMatch
)
alltests = unittest.TestSuite(
[
suite_atom,
suite_molecule,
suite_compound,
suite_mcgregor_match,
suite_rascal_match,
suite_greedy_match,
]
)
def load_tests(loader, tests, pattern):
return alltests
if __name__ == "__main__":
unittest.TextTestRunner( verbosity = 2 ).run( alltests )
|
93913
|
import sys
import os
import subprocess
import json
from utils import get_training_world, unarchive_data
from datetime import datetime
if __name__ == "__main__":
start_time = datetime.now()
print('Starting training...')
# print("Training started at {}".format(start_time))
world = get_training_world()
sm_args = json.loads(os.environ["SM_HPS"])
# if "unarchive" in sm_args:
# data_dir = sm_args.pop("unarchive")
# unarchive_data(data_dir)
# print("Unarchive completed in {}".format(datetime.now() - start_time))
args = [f"--{key} {value}" for key, value in sm_args.items()]
launch_config = ["python -m torch.distributed.launch",
"--nnodes", str(world['number_of_machines']),
"--node_rank", str(world['machine_rank']),
"--nproc_per_node", str(world['number_of_processes']),
"--master_addr", world['master_addr'],
"--master_port", world['master_port'],
"/opt/ml/code/train.py"]
launch_config.extend(args)
joint_cmd = " ".join(str(x) for x in launch_config)
print("Following command will be executed: \n", joint_cmd)
process = subprocess.Popen(joint_cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
while True:
output = process.stdout.readline()
if process.poll() is not None:
break
if output:
print(output.decode("utf-8").strip())
rc = process.poll()
if process.returncode != 0:
raise subprocess.CalledProcessError(returncode=process.returncode, cmd=joint_cmd)
sys.exit(process.returncode)
|
93915
|
from .cifar import CIFAR10Instance, PseudoCIFAR10
from .folder import ImageFolderInstance, PseudoDatasetFolder
__all__ = ('CIFAR10Instance', 'PseudoDatasetFolder')
|
93950
|
import time, random, requests
from concurrent.futures import ThreadPoolExecutor
class Downloader(object):
def __init__(
self,
workers = 8,
tries = 3,
sleep = 2,
multiplier = 1.5,
):
"""
Launch a thread pool of workers to download a list of URLs
asynchronously, with built-in adjustable rate limiting.
Arguments:
- workers: the number of threads to launch (default = 8)
- tries: download attempts to make after a failure (default = 3)
- sleep: approx thread wait time between downloads (default = 2)
* (approximate rate is workers/sleep URLs per second)
- multiplier: increase sleep time on each try (default = 1.5)
Example:
>>> ts = Downloader(workers = 12)
>>> results = ts.download(["youtube.com"])
"""
self.workers = workers
self.tries = tries
self.sleep = sleep
self.multiplier = multiplier
def download(self, urls):
"""
Download a thing.
"""
urls = list(urls)
with ThreadPoolExecutor(self.workers) as executor:
yield from executor.map(self._thread, urls)
def _thread(self, url):
sleep = self.sleep
for _ in range(self.tries):
time.sleep(random.random() * sleep * 2)
try:
req = requests.get(url)
if req.status_code == 200:
return {
"url": url,
"html": req.text
}
else:
raise Exception()
except Exception:
sleep *= self.multiplier
return None
|
93954
|
import logging
import sys
from deeplens.struct import VideoStream
from deeplens.dataflow.map import Sample
from deeplens.ocr.pytesseract import PyTesseractOCR
from timeit import default_timer as timer
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
start = timer()
if len(sys.argv) < 2:
print("Enter filename as argv[1]")
exit(1)
filename = sys.argv[1]
v = VideoStream(filename, limit=500)
pipeline = v[Sample(1/50)][PyTesseractOCR()]
labels = [text for text in pipeline]
print(labels)
end = timer()
print(end - start)
|
93956
|
import asyncio
import math
from concurrent.futures.process import ProcessPoolExecutor
from datetime import date
from datetime import datetime
import pytest
from yui.apps.compute.calc import BadSyntax
from yui.apps.compute.calc import Decimal as D
from yui.apps.compute.calc import Evaluator
from yui.apps.compute.calc import calculate
from ...util import FakeBot
class GetItemSpy:
def __init__(self):
self.queue = []
def __getitem__(self, item):
self.queue.append(item)
def test_decimal():
assert -D('1') == D('-1')
assert +D('1') == D('1')
assert abs(D('-1')) == D('1')
assert D('1') + 1 == D('2')
assert 1 + D('1') == D('2')
assert D('1') - 1 == D('0')
assert 1 - D('1') == D('0')
assert D('2') * 3 == D('6')
assert 2 * D('3') == D('6')
assert D('10') // 2 == D('5')
assert 10 // D('2') == D('5')
assert D('10') / 2.5 == D('4')
assert 10 / D('2.5') == D('4')
assert D('5') % 2 == D('1')
assert 5 % D('2') == D('1')
assert divmod(D('5'), 2) == (D('2'), D('1'))
assert divmod(5, D('2')) == (D('2'), D('1'))
assert D('3') ** 2 == D('9')
assert 3 ** D('2') == D('9')
def test_annassign():
e = Evaluator()
err = 'You can not use annotation syntax'
with pytest.raises(BadSyntax, match=err):
e.run('a: int = 10')
assert 'a' not in e.symbol_table
def test_assert():
e = Evaluator()
err = 'You can not use assertion syntax'
with pytest.raises(BadSyntax, match=err):
e.run('assert True')
with pytest.raises(BadSyntax, match=err):
e.run('assert False')
def test_assign():
e = Evaluator()
e.run('a = 1 + 2')
assert e.symbol_table['a'] == 3
e.run('x, y = 10, 20')
assert e.symbol_table['x'] == 10
assert e.symbol_table['y'] == 20
e.symbol_table['dt'] = datetime.now()
err = 'This assign method is not allowed'
with pytest.raises(BadSyntax, match=err):
e.run('dt.year = 2000')
err = 'too many values to unpack'
with pytest.raises(ValueError, match=err):
e.run('year, month, day = 1,')
err = 'not enough values to unpack'
with pytest.raises(ValueError, match=err):
e.run('id, name = 1, "kirito", "black"')
err = 'cannot unpack non-iterable int object'
with pytest.raises(TypeError, match=err):
e.run('year, month, day = 1')
e.run('arr = [1, 2, 3]')
assert e.symbol_table['arr'] == [1, 2, 3]
e.run('arr[1] = 5')
assert e.symbol_table['arr'] == [1, 5, 3]
e.run('arr[:] = [10, 20, 30]')
assert e.symbol_table['arr'] == [10, 20, 30]
def test_asyncfor():
e = Evaluator()
e.symbol_table['r'] = 0
err = 'You can not use `async for` loop syntax'
with pytest.raises(BadSyntax, match=err):
e.run(
'''
async for x in [1, 2, 3, 4]:
r += x
'''
)
assert e.symbol_table['r'] == 0
def test_asyncfunctiondef():
e = Evaluator()
err = 'Defining new coroutine via def syntax is not allowed'
with pytest.raises(BadSyntax, match=err):
e.run(
'''
async def abc():
pass
'''
)
assert 'abc' not in e.symbol_table
def test_asyncwith():
e = Evaluator()
e.symbol_table['r'] = 0
err = 'You can not use `async with` syntax'
with pytest.raises(BadSyntax, match=err):
e.run(
'''
async with x():
r += 100
'''
)
assert e.symbol_table['r'] == 0
def test_attribute():
e = Evaluator()
e.symbol_table['dt'] = datetime.now()
e.run('x = dt.year')
assert e.symbol_table['x'] == e.symbol_table['dt'].year
err = 'You can not access `test_test_test` attribute'
with pytest.raises(BadSyntax, match=err):
e.run('y = dt.test_test_test')
assert 'y' not in e.symbol_table
err = 'You can not access `asdf` attribute'
with pytest.raises(BadSyntax, match=err):
e.run('z = x.asdf')
e.symbol_table['math'] = math
err = 'You can not access `__module__` attribute'
with pytest.raises(BadSyntax, match=err):
e.run('math.__module__')
e.symbol_table['datetime'] = datetime
err = 'You can not access `test_test` attribute'
with pytest.raises(BadSyntax, match=err):
e.run('datetime.test_test')
def test_augassign():
e = Evaluator()
e.symbol_table['a'] = 0
e.run('a += 1')
assert e.symbol_table['a'] == 1
e.symbol_table['l'] = [1, 2, 3, 4]
e.run('l[0] -= 1')
assert e.symbol_table['l'] == [0, 2, 3, 4]
err = 'This assign method is not allowed'
with pytest.raises(BadSyntax, match=err):
e.run('l[2:3] += 20')
e.symbol_table['dt'] = datetime.now()
err = 'This assign method is not allowed'
with pytest.raises(BadSyntax, match=err):
e.run('dt.year += 2000')
def test_await():
e = Evaluator()
err = 'You can not await anything'
with pytest.raises(BadSyntax, match=err):
e.run('r = await x()')
assert 'r' not in e.symbol_table
def test_binop():
e = Evaluator()
assert e.run('1 + 2') == 1 + 2
assert e.run('3 & 2') == 3 & 2
assert e.run('1 | 2') == 1 | 2
assert e.run('3 ^ 2') == 3 ^ 2
assert e.run('3 / 2') == 3 / 2
assert e.run('3 // 2') == 3 // 2
assert e.run('3 << 2') == 3 << 2
with pytest.raises(TypeError):
e.run('2 @ 3')
assert e.run('3 * 2') == 3 * 2
assert e.run('33 % 4') == 33 % 4
assert e.run('3 ** 2') == 3 ** 2
assert e.run('100 >> 2') == 100 >> 2
assert e.run('3 - 1') == 3 - 1
def test_boolop():
e = Evaluator()
assert e.run('True and False') == (True and False)
assert e.run('True or False') == (True or False)
def test_break():
e = Evaluator()
e.run('break')
assert e.current_interrupt.__class__.__name__ == 'Break'
def test_bytes():
e = Evaluator()
assert e.run('b"asdf"') == b'asdf'
e.run('a = b"asdf"')
assert e.symbol_table['a'] == b'asdf'
def test_call():
e = Evaluator()
e.symbol_table['date'] = date
e.run('x = date(2019, 10, day=7)')
assert e.symbol_table['x'] == date(2019, 10, day=7)
e.symbol_table['math'] = math
e.run('y = math.sqrt(121)')
assert e.symbol_table['y'] == math.sqrt(121)
e.symbol_table['datetime'] = datetime
e.run('z = datetime.now().date()')
assert e.symbol_table['z'] == datetime.now().date()
def test_classdef():
e = Evaluator()
err = 'Defining new class via def syntax is not allowed'
with pytest.raises(BadSyntax, match=err):
e.run(
'''
class ABCD:
pass
'''
)
assert 'ABCD' not in e.symbol_table
def test_compare():
e = Evaluator()
assert e.run('1 == 2') == (1 == 2)
assert e.run('3 > 2') == (3 > 2)
assert e.run('3 >= 2') == (3 >= 2)
assert e.run('"A" in "America"') == ('A' in 'America')
assert e.run('"E" not in "America"') == ('E' not in 'America')
assert e.run('1 is 2') == (1 is 2) # noqa
assert e.run('1 is not 2') == (1 is not 2) # noqa
assert e.run('3 < 2') == (3 < 2)
assert e.run('3 <= 2') == (3 <= 2)
def test_continue():
e = Evaluator()
e.run('continue')
assert e.current_interrupt.__class__.__name__ == 'Continue'
def test_delete():
e = Evaluator()
e.symbol_table['a'] = 0
e.symbol_table['b'] = 0
e.symbol_table['c'] = 0
e.run('del a, b, c')
assert 'a' not in e.symbol_table
assert 'b' not in e.symbol_table
assert 'c' not in e.symbol_table
e.symbol_table['l'] = [1, 2, 3, 4]
e.run('del l[0]')
assert e.symbol_table['l'] == [2, 3, 4]
err = 'This delete method is not allowed'
with pytest.raises(BadSyntax, match=err):
e.run('del l[2:3]')
e.symbol_table['dt'] = datetime.now()
err = 'This delete method is not allowed'
with pytest.raises(BadSyntax, match=err):
e.run('del dt.year')
def test_dict():
e = Evaluator()
assert e.run('{1: 111, 2: 222}') == {1: 111, 2: 222}
e.run('a = {1: 111, 2: 222}')
assert e.symbol_table['a'] == {1: 111, 2: 222}
def test_dictcomp():
e = Evaluator()
assert e.run('{k+1: v**2 for k, v in {1: 1, 2: 11, 3: 111}.items()}') == {
2: 1,
3: 121,
4: 12321,
}
assert 'k' not in e.symbol_table
assert 'v' not in e.symbol_table
e.run('a = {k+1: v**2 for k, v in {1: 1, 2: 11, 3: 111}.items()}')
assert e.symbol_table['a'] == {
2: 1,
3: 121,
4: 12321,
}
assert 'k' not in e.symbol_table
assert 'v' not in e.symbol_table
def test_ellipsis():
e = Evaluator()
assert e.run('...') == Ellipsis
def test_expr():
e = Evaluator()
assert e.run('True') is True
assert e.run('False') is False
assert e.run('None') is None
assert e.run('123') == 123
assert e.run('"abc"') == 'abc'
assert e.run('[1, 2, 3]') == [1, 2, 3]
assert e.run('(1, 2, 3, 3)') == (1, 2, 3, 3)
assert e.run('{1, 2, 3, 3}') == {1, 2, 3}
assert e.run('{1: 111, 2: 222}') == {1: 111, 2: 222}
def test_functiondef():
e = Evaluator()
err = 'Defining new function via def syntax is not allowed'
with pytest.raises(BadSyntax, match=err):
e.run(
'''
def abc():
pass
'''
)
assert 'abc' not in e.symbol_table
def test_for():
total = 0
for x in [1, 2, 3, 4, 5, 6]:
total = total + x
if total > 10:
continue
total = total * 2
else:
total = total + 10000
e = Evaluator()
e.run(
'''
total = 0
for x in [1, 2, 3, 4, 5, 6]:
total = total + x
if total > 10:
continue
total = total * 2
else:
total = total + 10000
'''
)
assert e.symbol_table['total'] == total
total2 = 0
for x in [1, 2, 3, 4, 5, 6]:
total2 = total2 + x
if total2 > 10:
break
total2 = total2 * 2
else:
total2 = total2 + 10000
e.run(
'''
total2 = 0
for x in [1, 2, 3, 4, 5, 6]:
total2 = total2 + x
if total2 > 10:
break
total2 = total2 * 2
else:
total2 = total2 + 10000
'''
)
assert e.symbol_table['total2'] == total2
def test_formattedvalue():
e = Evaluator()
e.symbol_table['before'] = 123456
e.run('after = f"change {before} to {before:,}!"')
assert e.symbol_table['after'] == 'change 123456 to 123,456!'
def test_generator_exp():
e = Evaluator()
e.symbol_table['r'] = [1, 2, 3]
err = 'Defining new generator expression is not allowed'
with pytest.raises(BadSyntax, match=err):
e.run('x = (i ** 2 for i in r)')
assert 'x' not in e.symbol_table
def test_global():
e = Evaluator()
err = 'You can not use `global` syntax'
with pytest.raises(BadSyntax, match=err):
e.run('global x')
def test_if():
e = Evaluator()
e.symbol_table['a'] = 1
e.run(
'''
if a == 1:
a = 2
b = 3
'''
)
assert e.symbol_table['a'] == 2
assert e.symbol_table['b'] == 3
e.run(
'''
if a == 1:
a = 2
b = 3
z = 1
else:
a = 3
b = 4
c = 5
'''
)
assert e.symbol_table['a'] == 3
assert e.symbol_table['b'] == 4
assert e.symbol_table['c'] == 5
assert 'z' not in e.symbol_table
e.run(
'''
if a == 1:
a = 2
b = 3
z = 1
elif a == 3:
d = 4
e = 5
f = 6
else:
a = 3
b = 4
c = 5
y = 7
'''
)
assert e.symbol_table['a'] == 3
assert e.symbol_table['b'] == 4
assert e.symbol_table['c'] == 5
assert e.symbol_table['d'] == 4
assert e.symbol_table['e'] == 5
assert e.symbol_table['f'] == 6
assert 'y' not in e.symbol_table
assert 'z' not in e.symbol_table
def test_ifexp():
e = Evaluator()
assert e.run('100 if 1 == 1 else 200') == 100
assert e.run('100 if 1 == 2 else 200') == 200
def test_import():
e = Evaluator()
err = 'You can not import anything'
with pytest.raises(BadSyntax, match=err):
e.run('import sys')
assert 'sys' not in e.symbol_table
def test_importfrom():
e = Evaluator()
err = 'You can not import anything'
with pytest.raises(BadSyntax, match=err):
e.run('from os import path')
assert 'path' not in e.symbol_table
def test_lambda():
e = Evaluator()
err = 'Defining new function via lambda syntax is not allowed'
with pytest.raises(BadSyntax, match=err):
e.run('lambda x: x*2')
def test_list():
e = Evaluator()
assert e.run('[1, 2, 3]') == [1, 2, 3]
e.run('a = [1, 2, 3]')
assert e.symbol_table['a'] == [1, 2, 3]
def test_listcomp():
e = Evaluator()
assert e.run('[x ** 2 for x in [1, 2, 3]]') == [1, 4, 9]
assert 'x' not in e.symbol_table
assert e.run('[x ** 2 + y for x in [1, 2, 3] for y in [10, 20, 30]]') == (
[x ** 2 + y for x in [1, 2, 3] for y in [10, 20, 30]]
)
assert 'x' not in e.symbol_table
assert 'y' not in e.symbol_table
assert e.run('[y ** 2 for x in [1, 2, 3] for y in [x+1, x+3, x+5]]') == (
[y ** 2 for x in [1, 2, 3] for y in [x + 1, x + 3, x + 5]]
)
assert 'x' not in e.symbol_table
assert 'y' not in e.symbol_table
def test_nameconstant():
e = Evaluator()
assert e.run('True') is True
assert e.run('False') is False
assert e.run('None') is None
e.run('x = True')
e.run('y = False')
e.run('z = None')
assert e.symbol_table['x'] is True
assert e.symbol_table['y'] is False
assert e.symbol_table['z'] is None
def test_nonlocal():
e = Evaluator()
err = 'You can not use `nonlocal` syntax'
with pytest.raises(BadSyntax, match=err):
e.run('nonlocal x')
def test_num():
e = Evaluator()
assert e.run('123') == 123
e.run('a = 123')
assert e.symbol_table['a'] == 123
def test_pass():
e = Evaluator()
e.run('pass')
def test_raise():
e = Evaluator()
err = 'You can not use `raise` syntax'
with pytest.raises(BadSyntax, match=err):
e.run('raise NameError')
def test_return():
e = Evaluator()
err = 'You can not use `return` syntax'
with pytest.raises(BadSyntax, match=err):
e.run('return True')
def test_set():
e = Evaluator()
assert e.run('{1, 1, 2, 3, 3}') == {1, 2, 3}
e.run('a = {1, 1, 2, 3, 3}')
assert e.symbol_table['a'] == {1, 2, 3}
def test_setcomp():
e = Evaluator()
assert e.run('{x ** 2 for x in [1, 2, 3, 3]}') == {1, 4, 9}
assert 'x' not in e.symbol_table
assert e.run('{x ** 2 + y for x in [1, 2, 3] for y in [10, 20, 30]}') == (
{x ** 2 + y for x in [1, 2, 3] for y in [10, 20, 30]}
)
assert 'x' not in e.symbol_table
assert 'y' not in e.symbol_table
assert e.run('{y ** 2 for x in [1, 2, 3] for y in [x+1, x+3, x+5]}') == (
{y ** 2 for x in [1, 2, 3] for y in [x + 1, x + 3, x + 5]}
)
assert 'x' not in e.symbol_table
assert 'y' not in e.symbol_table
def test_slice():
e = Evaluator()
e.symbol_table['obj'] = GetItemSpy()
e.run('obj[10:20:3]')
s = e.symbol_table['obj'].queue.pop()
assert isinstance(s, slice)
assert s.start == 10
assert s.stop == 20
assert s.step == 3
def test_str():
e = Evaluator()
assert e.run('"asdf"') == 'asdf'
e.run('a = "asdf"')
assert e.symbol_table['a'] == 'asdf'
def test_subscript():
e = Evaluator()
assert e.run('[10, 20, 30][0]') == 10
assert e.run('(100, 200, 300)[0]') == 100
assert e.run('{"a": 1000, "b": 2000, "c": 3000}["a"]') == 1000
e.run('a = [10, 20, 30][0]')
e.run('b = (100, 200, 300)[0]')
e.run('c = {"a": 1000, "b": 2000, "c": 3000}["a"]')
assert e.symbol_table['a'] == 10
assert e.symbol_table['b'] == 100
assert e.symbol_table['c'] == 1000
e.symbol_table['l'] = [11, 22, 33]
assert e.run('l[2]') == 33
e.run('l[2] = 44')
assert e.symbol_table['l'] == [11, 22, 44]
def test_try():
e = Evaluator()
err = 'You can not use `try` syntax'
with pytest.raises(BadSyntax, match=err):
e.run(
'''
try:
x = 1
except:
pass
'''
)
assert 'x' not in e.symbol_table
def test_tuple():
e = Evaluator()
assert e.run('(1, 1, 2, 3, 3)') == (1, 1, 2, 3, 3)
e.run('a = (1, 1, 2, 3, 3)')
assert e.symbol_table['a'] == (1, 1, 2, 3, 3)
def test_unaryop():
e = Evaluator()
assert e.run('~100') == ~100
assert e.run('not 100') == (not 100)
assert e.run('+100') == +100
assert e.run('-100') == -100
def test_while():
total = 0
i = 1
while total > 100:
total += i
i += i
if i % 10 == 0:
i += 1
else:
total = total + 10000
e = Evaluator()
e.run(
'''
total = 0
i = 1
while total > 100:
total += i
i += i
if i % 10 == 0:
i += 1
else:
total = total + 10000
'''
)
assert e.symbol_table['total'] == total
r = 0
while True:
break
else:
r += 10
e.run(
'''
r = 0
while True:
break
else:
r += 10
'''
)
assert e.symbol_table['r'] == 0
def test_with():
e = Evaluator()
err = 'You can not use `with` syntax'
with pytest.raises(BadSyntax, match=err):
e.run(
'''
with some:
x = 1
'''
)
assert 'x' not in e.symbol_table
def test_yield():
e = Evaluator()
err = 'You can not use `yield` syntax'
with pytest.raises(BadSyntax, match=err):
e.run('x = yield f()')
assert 'x' not in e.symbol_table
def test_yield_from():
e = Evaluator()
err = 'You can not use `yield from` syntax'
with pytest.raises(BadSyntax, match=err):
e.run('x = yield from f()')
assert 'x' not in e.symbol_table
@pytest.fixture(scope='module')
def event_loop():
loop = asyncio.new_event_loop()
yield loop
loop.close()
@pytest.fixture(scope='module')
async def bot(event_loop):
return FakeBot(
loop=event_loop,
process_pool_executor=ProcessPoolExecutor(),
)
@pytest.mark.asyncio
@pytest.mark.parametrize(
(
'expr, expected_decimal_result, expected_num_result,'
'expected_decimal_local, expected_num_local'
),
[
('1', D('1'), 1, {}, {}),
('1+2', D('3'), 3, {}, {}),
(
'0.1+0.1+0.1+0.1+0.1+0.1+0.1+0.1+0.1+0.1',
D('1'),
0.1 + 0.1 + 0.1 + 0.1 + 0.1 + 0.1 + 0.1 + 0.1 + 0.1 + 0.1,
{},
{},
),
('1-2', D('-1'), -1, {}, {}),
('4*5', D('20'), 20, {}, {}),
('1/2', D('0.5'), 0.5, {}, {}),
('10%3', D('1'), 1, {}, {}),
('2**3', D('8'), 8, {}, {}),
('(1+2)**3', D('27'), 27, {}, {}),
('max(1,2,3,4,5)', D('5'), 5, {}, {}),
('math.floor(3.2)', D('3'), 3, {}, {}),
('1+math.e', D(math.e) + D('1'), math.e + 1, {}, {}),
('[1,2,3]', [D('1'), D('2'), D('3')], [1, 2, 3], {}, {}),
(
'[x*10 for x in [0,1,2]]',
[D('0'), D('10'), D('20')],
[0, 10, 20],
{},
{},
),
('(1,2,3)', (D('1'), D('2'), D('3')), (1, 2, 3), {}, {}),
('{3,2,10}', {D('2'), D('3'), D('10')}, {2, 3, 10}, {}, {}),
('{x%2 for x in [1,2,3,4]}', {D('0'), D('1')}, {0, 1}, {}, {}),
('{"ab": 123}', {'ab': D('123')}, {'ab': 123}, {}, {}),
(
'{"k"+str(x): x-1 for x in [1,2,3]}',
{'k1': D('0'), 'k2': D('1'), 'k3': D('2')},
{'k1': 0, 'k2': 1, 'k3': 2},
{},
{},
),
('3 in [1,2,3]', True, True, {}, {}),
('[1,2,3,12,3].count(3)', 2, 2, {}, {}),
('{1,2} & {2,3}', {D('2')}, {2}, {}, {}),
('"item4"', 'item4', 'item4', {}, {}),
('"{}4".format("item")', 'item4', 'item4', {}, {}),
('money = 1000', None, None, {'money': D('1000')}, {'money': 1000}),
(
'money = 1000; money * 2',
D('2000'),
2000,
{'money': D('1000')},
{'money': 1000},
),
(
'money = 1000; f"{money}원"',
'1000원',
'1000원',
{'money': D('1000')},
{'money': 1000},
),
(
'a = 11;\nif a > 10:\n a += 100\na',
D('111'),
111,
{'a': D(111)},
{'a': 111},
),
],
)
async def test_calculate_fine(
bot,
expr: str,
expected_decimal_result,
expected_num_result,
expected_decimal_local: dict,
expected_num_local: dict,
):
decimal_result, decimal_local = await bot.run_in_other_process(
calculate,
expr,
decimal_mode=True,
)
num_result, num_local = await bot.run_in_other_process(
calculate,
expr,
decimal_mode=False,
)
assert expected_decimal_result == decimal_result
assert expected_decimal_local.keys() == decimal_local.keys()
for key in decimal_local.keys():
expected = expected_decimal_local[key]
local = decimal_local[key]
assert type(expected) == type(local)
if callable(expected):
assert expected(1) == local(1)
else:
assert expected == local
assert expected_num_result == num_result
assert expected_num_local.keys() == num_local.keys()
for key in num_local.keys():
expected = expected_num_local[key]
local = num_local[key]
assert type(expected) == type(local)
assert expected == local
|
93958
|
from whos_there import __version__
def test_version():
# real version initialized in package from poetry and resolved from resources
assert __version__ == "0.0.0"
|
93978
|
try:
# Python 3
from http.client import HTTPResponse, IncompleteRead
except (ImportError):
# Python 2
from httplib import HTTPResponse, IncompleteRead
from ..console_write import console_write
class DebuggableHTTPResponse(HTTPResponse):
"""
A custom HTTPResponse that formats debugging info for Sublime Text
"""
_debug_protocol = 'HTTP'
def __init__(self, sock, debuglevel=0, method=None, **kwargs):
# We have to use a positive debuglevel to get it passed to here,
# however we don't want to use it because by default debugging prints
# to the stdout and we can't capture it, so we use a special -1 value
if debuglevel == 5:
debuglevel = -1
HTTPResponse.__init__(self, sock, debuglevel=debuglevel, method=method)
def begin(self):
return_value = HTTPResponse.begin(self)
if self.debuglevel == -1:
console_write(u'Urllib %s Debug Read' % self._debug_protocol, True)
# Python 2
if hasattr(self.msg, 'headers'):
headers = self.msg.headers
# Python 3
else:
headers = []
for header in self.msg:
headers.append("%s: %s" % (header, self.msg[header]))
versions = {
9: 'HTTP/0.9',
10: 'HTTP/1.0',
11: 'HTTP/1.1'
}
status_line = versions[self.version] + ' ' + str(self.status) + ' ' + self.reason
headers.insert(0, status_line)
for line in headers:
console_write(u" %s" % line.rstrip())
return return_value
def is_keep_alive(self):
# Python 2
if hasattr(self.msg, 'headers'):
connection = self.msg.getheader('connection')
# Python 3
else:
connection = self.msg['connection']
if connection and connection.lower() == 'keep-alive':
return True
return False
def read(self, *args):
try:
return HTTPResponse.read(self, *args)
except (IncompleteRead) as e:
return e.partial
|
93980
|
import info
class subinfo(info.infoclass):
def setTargets( self ):
self.description = 'Real-time Noise Suppression Plugin'
#self.svnTargets["master"] = "https://github.com/werman/noise-suppression-for-voice"
ver = "0.91"
self.targets[ver] = f"https://github.com/werman/noise-suppression-for-voice/archive/refs/tags/v{ver}.tar.gz"
self.targetInstSrc[ver] = f"noise-suppression-for-voice-{ver}"
self.targetDigests[ver] = (['4f3a112534d4abb5ee2b6c328cde89193dbdb2146cffc98505972c3b5397a35e'], CraftHash.HashAlgorithm.SHA256)
self.patchToApply[ver] = [("0001-install-ladspa-in-lib.patch", 1)]
self.defaultTarget = "0.91"
def setDependencies( self ):
self.runtimeDependencies["virtual/base"] = None
from Package.CMakePackageBase import *
class Package(CMakePackageBase):
def __init__( self, **args ):
CMakePackageBase.__init__( self )
self.subinfo.options.configure.args += " -DBUILD_VST_PLUGIN=OFF -DBUILD_LV2_PLUGIN=OFF "
def install(self):
if not super().install():
return False
if CraftCore.compiler.isMacOS:
return utils.mergeTree(self.installDir()/"lib", self.installDir()/"plugins")
return True
|
93985
|
import os
import re
import json
import time
import numpy as np
import pandas as pd
from plotnine import *
# Config
PATH = os.getcwd()
path_n = re.split(pattern=r"/|\\", string=PATH)[1:]
if os.name == "posix":
path_n = "/" + os.path.join(*path_n)
else:
drive = PATH[0:3]
path_n = drive + os.path.join(*path_n)
RUNS = 100
def infer_column_cats(dir: "Path to working directoty.") -> tuple:
"""Helper function to identify dataset sizes based on file names."""
files = os.listdir(os.path.join(dir, "data"))
cats = set([re.match(pattern=".*_(.*).csv$", string=file).group(1) for file in files])
cols = set([re.match(pattern=".*_(.*)_.*.csv$", string=file).group(1) for file in files])
return cats, cols
def time_function(func: "Function call to be evaluted as str.") -> float:
"""Helper function to time data access."""
start = time.time()
exec(func)
return time.time() - start
def create_stats(measures: "List of function timings.",
col: "Current Column.", row: "Current Row",
scenario: "Current Scenario.") -> dict:
"""Helper function to create result dataset."""
return {"scenario": scenario,
"no_column": col,
"data_length": row,
"min": np.min(measures),
"max": np.max(measures),
"avg": np.mean(measures),
"q50": np.median(measures)}
scenarios = json.load(open(os.path.join(path_n, "output", "mutate.JSON")))
nrows, ncols = infer_column_cats(path_n)
timings, results = [], []
for col in ncols:
print(f"-Column: {col}--")
for row in nrows:
print(f"--Row: {row}")
data = pd.read_csv(os.path.join(path_n, "data", f"sim_data_{col}_{row}.csv"))
for i, scenario in enumerate(scenarios[col]["mutate"]):
print(f"---Scenario {i+1}: {scenario}---")
sel = re.search(pattern=r'([A-Z]{3})', string=scenario).group(1)
print(sel)
if sel == "INT":
func = f"temp['result'] = temp['{scenario}'] + 1"
elif sel == "DBL":
func = f"temp['result'] = temp['{scenario}'] * 2"
elif sel == "STR":
func = f"temp['result'] = temp['{scenario}'] + 'a'"
elif sel == "LGL":
func = f"temp['result'] = ~temp['{scenario}']"
for j in range(RUNS):
temp = data
timings.append(time_function(func=func))
temp = None
results.append(create_stats(measures=timings, col=col, row=row, scenario=sel))
print(results[-1])
timings = []
results_df = pd.DataFrame(results)
results_df[["data_length", "no_column"]] = results_df[["data_length", "no_column"]].apply(pd.to_numeric,
axis=1,
downcast="integer")
results_df.sort_values(["data_length", "no_column"])
results_df[["min", "max", "q50", "avg"]] = round(results_df[["min", "max", "q50", "avg"]] * 1000, 2)
# results_df["sel_col"] = results_df["scenario"].apply(lambda x: re.search(pattern="([13])", string=x).group(1))
# results_df["pos_col"] = results_df["scenario"].apply(lambda x: re.search(pattern="[13](.*)$", string=x).group(1))
results_df.to_csv(os.path.join(path_n, "output", "mutate_results_pandas.csv"), index=False)
|
93987
|
import pytest
from numerous.engine.model import Model
from numerous.engine.simulation import Simulation
from numerous.utils.logger_levels import LoggerLevel
from numerous.multiphysics.equation_base import EquationBase
from numerous.multiphysics.equation_decorators import Equation
from numerous.engine.system.item import Item
from numerous.engine.system.subsystem import Subsystem
from numerous.engine.simulation.solvers.base_solver import solver_types
import numpy as np
INFO = LoggerLevel.INFO
DEBUG = LoggerLevel.DEBUG
ALL = LoggerLevel.ALL
@pytest.fixture(autouse=True)
def run_before_and_after_tests():
import shutil
shutil.rmtree('../tmp', ignore_errors=True)
yield
class TestLogItem1(Item, EquationBase):
def __init__(self, tag='testlogitem1'):
super(TestLogItem1, self).__init__(tag)
self.t1 = self.create_namespace('t1')
self.add_state('v', 0, logger_level=INFO)
self.add_state('s', 0.5, logger_level=DEBUG)
self.add_parameter('p', 1, logger_level=ALL)
self.t1.add_equations([self])
return
@Equation()
def eval(self, scope):
scope.v_dot = 1
scope.s_dot = -2 / ((np.exp(scope.v) + np.exp(-scope.v)) ** 2)
class TestLogSubsystem1(Subsystem):
def __init__(self, tag='testlogsubsystem1'):
super().__init__(tag)
item = TestLogItem1()
self.register_items([item])
def sigmoidlike(t):
return 1 / (1 + np.exp(2 * t))
@pytest.mark.parametrize("solver", solver_types)
@pytest.mark.parametrize("use_llvm", [True, False])
def test_logger_levels(solver, use_llvm):
num = 100
t_stop = 100
t_start = 0
sys = TestLogSubsystem1()
model = Model(sys, logger_level=ALL, use_llvm=use_llvm)
tvec = np.linspace(t_start, t_stop, num + 1, dtype=np.float64)
sim = Simulation(model, t_start=t_start, t_stop=t_stop, num=num, num_inner=1, solver_type=solver,
rtol=1e-8, atol=1e-8)
sim.solve()
df = sim.model.historian_df
s_analytic = sigmoidlike(tvec)
prefix = 'testlogsubsystem1.testlogitem1.t1'
p = f"{prefix}.p"
v = f"{prefix}.v"
s = f"{prefix}.s"
expected_results = {v: tvec, p: np.ones(num + 1), s: s_analytic}
for k, v in expected_results.items():
assert pytest.approx(v, abs=1e-5) == df.get(k), "expected results do not match actual results"
|
94035
|
from typing import List, Optional
import pickle
import pandas as pd
import numpy as np
from pathlib import Path
from sklearn.pipeline import Pipeline
def fit(
X: pd.DataFrame,
y: pd.Series,
output_dir: str,
class_order: Optional[List[str]] = None,
row_weights: Optional[np.ndarray] = None,
**kwargs,
) -> None:
estimator = pipeline(X)
estimator.fit(X, y)
output_dir_path = Path(output_dir)
if output_dir_path.exists() and output_dir_path.is_dir():
with open("{}/artifact.pkl".format(output_dir), "wb") as fp:
pickle.dump(estimator, fp)
class RoundInput():
"""
Goal is to round the output of a prior model, so using those unrounded
predictions as inputs here.
"""
def __init__(self, X):
self.X = X
def fit(self, X, y=None, **kwargs):
self.X = round(X)
return self
def transform(self, X):
return np.array(round(X[X.columns[0]])).reshape(-1, 1)
class EmptyEstimator():
"""
This is empty because the rounding is done in the above step of the pipeline.
Still need this for the pipeline to run though.
"""
def fit(self, X, y):
return self
def predict(self, data: pd.DataFrame):
return data[:,0]
def pipeline(X):
return Pipeline(steps=[("preprocessing", RoundInput(X)), ("model", EmptyEstimator())])
def score(data: pd.DataFrame, model, **kwargs) -> pd.DataFrame:
return pd.DataFrame(data=model.predict(data), columns = ['Predictions'])
|
94061
|
from torch import Tensor
from torch_geometric.data import Data
import torch
import torch_geometric.utils as tu
from torch_scatter import scatter_add
def degree(edge_index: Tensor, direction='out', num_nodes=None, edge_weight=None):
"""calulcates the degree of each node in the graph
Args:
edge_index (Tensor): tensor edge_index encoding the graph structure
direction (str, optional): either calculate 'in'-degree or 'out'-degree. Defaults to 'out'.
num_nodes (int, optional): number of nodes. Defaults to None.
edge_weight (Tensor, optional): weight of edges. Defaults to None.
Raises:
AssertionError: raised if unsupported direction is passed
Returns:
Tensor: node degree
"""
row, col = edge_index[0], edge_index[1]
if num_nodes is None:
num_nodes = edge_index.max() + 1
if edge_weight is None:
edge_weight = torch.ones(
(edge_index.size(1), ),
device=edge_index.device)
if direction == 'out':
return scatter_add(edge_weight, row, dim=0, dim_size=num_nodes)
elif direction == 'in':
return scatter_add(edge_weight, col, dim=0, dim_size=num_nodes)
else:
raise AssertionError
def get_k_hop_diversity(data: Data, k=1, kind='diversity'):
"""returns k-hop-diversity of each node in the grap
Args:
data (Data): pytorch-geometric data object representing graph
k (int, optional): k specifying k-hop neighborhood. Defaults to 1.
kind (str, optional): either return 'purity' or 'diversity'. Defaults to 'diversity'.
Raises:
AssertionError: raised if unsurported kind is passed
Returns:
Tensor: divsierty of purity
"""
n_nodes = data.y.size(0)
diversity = torch.zeros_like(data.y)
if kind == 'purity':
diversity = diversity.float()
for n in range(n_nodes):
k_hop_nodes, _, _, _ = tu.k_hop_subgraph(n, k, data.edge_index)
if kind == 'diversity':
div = len(data.y[k_hop_nodes].unique())
elif kind == 'purity':
y_center = data.y[n]
y_hop = data.y[k_hop_nodes]
div = (y_hop == y_center.item()).float().mean()
else:
raise AssertionError
diversity[n] = div
return diversity
|
94064
|
import re
from wagtail import __version__ as WAGTAIL_VERSION
def is_wagtail_version_more_than_equal_to_2_5():
expression = '^((2.([5-9]{1,}|([1-9]{1,}[0-9]{1,}))(.\d+)*)|(([3-9]{1,})(.\d+)*))$'
return re.search(expression, WAGTAIL_VERSION)
def is_wagtail_version_more_than_equal_to_2_0():
expression = '^((2.([0-9]{1,}|([1-9]{1,}[0-9]{1,}))(.\d+)*)|(([3-9]{1,})(.\d+)*))$'
return re.search(expression, WAGTAIL_VERSION)
|
94065
|
import re
import string
from collections import Counter
from pathlib import Path
from gutenberg_cleaner import simple_cleaner
from more_itertools import windowed
import nltk
from gender_analysis.text import common
class Document:
"""
The Document class loads and holds the full text and
metadata (author, title, publication date, etc.) of a document
:param metadata_dict: Dictionary with metadata fields as keys and data as values
>>> from gender_analysis import Document
>>> from pathlib import Path
>>> from gender_analysis.testing.common import TEST_DATA_DIR
>>> document_metadata = {'author': '<NAME>', 'title': 'Persuasion', 'date': '1818',
... 'filename': 'austen_persuasion.txt',
... 'filepath': Path(TEST_DATA_DIR,
... 'sample_novels', 'texts', 'austen_persuasion.txt')}
>>> austen = Document(document_metadata)
>>> type(austen.text)
<class 'str'>
>>> len(austen.text)
466887
"""
def __init__(self, metadata_dict):
if not isinstance(metadata_dict, dict):
raise TypeError(
'metadata must be passed in as a dictionary value'
)
# Check that the essential attributes for the document exists.
if 'filename' not in metadata_dict:
raise ValueError(str(metadata_dict) + 'metadata_dict must have an entry for filename')
self.members = list(metadata_dict.keys())
for key in metadata_dict:
if hasattr(self, str(key)):
raise KeyError(
'Key name ',
str(key),
' is reserved in the Document class. Please use another name.'
)
setattr(self, str(key), metadata_dict[key])
# optional attributes
# Check that the date is a year (where negative represent the BCE year)
if 'date' in metadata_dict:
if not re.match(r'^[-]?\d+$', metadata_dict['date']):
raise ValueError('The document date should be a year, not',
f'{metadata_dict["date"]}. Full metadata: {metadata_dict}')
try:
self.date = int(metadata_dict['date'])
except KeyError:
self.date = None
self._word_counts_counter = None
self._word_count = None
self._tokenized_text = None
if not metadata_dict['filename'].endswith('.txt'):
raise ValueError(
f"The document filename {metadata_dict['filename']}"
+ f"does not end in .txt . Full metadata: '{metadata_dict}.'"
)
if 'label' not in metadata_dict:
self.label = self.filename[0:len(self.filename) - 4]
# memoization
self._part_of_speech_tags = None
self.text = self._load_document_text()
@property
def word_count(self):
"""
Lazy-loading for **Document.word_count** attribute.
Returns the number of words in the document.
The word_count attribute is useful for the get_word_freq function.
However, it is performance-wise costly, so it's only loaded when it's actually required.
:return: Number of words in the document's text as an int
>>> from gender_analysis import Document
>>> from pathlib import Path
>>> from gender_analysis.testing.common import TEST_DATA_DIR
>>> document_metadata = {'author': '<NAME>', 'title': 'Persuasion', 'date': '1818',
... 'filename': 'austen_persuasion.txt',
... 'filepath': Path(TEST_DATA_DIR, 'sample_novels',
... 'texts', 'austen_persuasion.txt')}
>>> austen = Document(document_metadata)
>>> austen.word_count
83285
"""
if self._word_count is None:
self._word_count = len(self.get_tokenized_text())
return self._word_count
def __str__(self):
"""
Overrides python print method for user-defined objects for Document class
Returns the filename without the extension - author and title word
:return: str
>>> from gender_analysis import Document
>>> from pathlib import Path
>>> from gender_analysis.testing.common import TEST_DATA_DIR
>>> document_metadata = {'author': '<NAME>', 'title': 'Persuasion', 'date': '1818',
... 'filename': 'austen_persuasion.txt',
... 'filepath': Path(TEST_DATA_DIR, 'sample_novels',
... 'texts', 'austen_persuasion.txt')}
>>> austen = Document(document_metadata)
>>> document_string = str(austen)
>>> document_string
'austen_persuasion'
"""
return self.label
def __repr__(self):
'''
Overrides the built-in __repr__ method
Returns the object type (Document) and then the filename without the extension
in <>.
:return: string
>>> from gender_analysis import Document
>>> from pathlib import Path
>>> from gender_analysis.testing.common import TEST_DATA_DIR
>>> document_metadata = {'author': '<NAME>', 'title': 'Persuasion', 'date': '1818',
... 'filename': 'austen_persuasion.txt',
... 'filepath': Path(TEST_DATA_DIR, 'sample_novels',
... 'texts', 'austen_persuasion.txt')}
>>> austen = Document(document_metadata)
>>> repr(austen)
'<Document (austen_persuasion)>'
'''
return f'<Document ({self.label})>'
def __eq__(self, other):
"""
Overload the equality operator to enable comparing and sorting documents.
Returns True if the document filenames and text are the same.
>>> from gender_analysis import Document
>>> from pathlib import Path
>>> from gender_analysis.testing.common import TEST_DATA_DIR
>>> austen_metadata = {'author': '<NAME>', 'title': 'Persuasion', 'date': '1818',
... 'filename': 'austen_persuasion.txt',
... 'filepath': Path(TEST_DATA_DIR, 'sample_novels',
... 'texts', 'austen_persuasion.txt')}
>>> austen = Document(austen_metadata)
>>> austen2 = Document(austen_metadata)
>>> austen == austen2
True
>>> austen.text += 'no longer equal'
>>> austen == austen2
False
:return: bool
"""
if not isinstance(other, Document):
raise NotImplementedError("Only a Document can be compared to another Document.")
attributes_required_to_be_equal = ['filename']
for attribute in attributes_required_to_be_equal:
if not hasattr(other, attribute):
raise common.MissingMetadataError(
[attribute], f'{str(other)} lacks attribute {attribute}.'
)
if getattr(self, attribute) != getattr(other, attribute):
return False
if self.text != other.text:
return False
return True
def __lt__(self, other):
"""
Overload less than operator to enable comparing and sorting documents.
Sorts first by author, title, and then date.
If these are not available, it sorts by filenames.
>>> from gender_analysis import Document
>>> from pathlib import Path
>>> from gender_analysis.testing.common import TEST_DATA_DIR
>>> austen_metadata = {'author': '<NAME>', 'title': 'Persuasion', 'date': '1818',
... 'filename': 'austen_persuasion.txt',
... 'filepath': Path(TEST_DATA_DIR, 'sample_novels',
... 'texts', 'austen_persuasion.txt')}
>>> austen = Document(austen_metadata)
>>> hawthorne_metadata = {'author': '<NAME>', 'title': 'Scarlet Letter',
... 'date': '1850', 'filename': 'hawthorne_scarlet.txt',
... 'filepath': Path(TEST_DATA_DIR, 'sample_novels',
... 'texts', 'hawthorne_scarlet.txt')}
>>> hawthorne = Document(hawthorne_metadata)
>>> hawthorne < austen
False
>>> austen < hawthorne
True
:return: bool
"""
if not isinstance(other, Document):
raise NotImplementedError("Only a Document can be compared to another Document.")
try:
return (self.author, self.title, self.date) < (other.author, other.title, other.date)
except AttributeError:
return self.filename < other.filename
def __hash__(self):
"""
Makes the Document object hashable
:return:
"""
return hash(repr(self))
@staticmethod
def _clean_quotes(text):
"""
Scans through the text and replaces all of the smart quotes and apostrophes with their
"normal" ASCII variants
>>> from gender_analysis import Document
>>> smart_text = 'This is a “smart” phrase'
>>> Document._clean_quotes(smart_text)
'This is a "smart" phrase'
:param text: The string to reformat
:return: A string that is idential to `text`, except with its smart quotes exchanged
"""
# Define the quotes that will be swapped out
smart_quotes = {
'“': '"',
'”': '"',
"‘": "'",
"’": "'",
}
# Replace all entries one by one
output_text = text
for quote in smart_quotes:
output_text = output_text.replace(quote, smart_quotes[quote])
return output_text
@staticmethod
def _gutenberg_cleaner(text):
"""
Checks to see if a given text is from Project Gutenberg.
If it is, removes the header + footer.
:param text: The string to reformat
:return: A string that is idential to 'text' unless 'text' is from Gutenberg, in which case
the Gutenberg header and footer is removed
"""
output_text = text
beginning = text[0:100]
if "project gutenberg" in beginning.lower():
output_text = simple_cleaner(output_text)
return output_text
def _load_document_text(self):
"""
Loads the text of the document at the filepath specified in initialization.
:return: str
"""
file_path = Path(self.filepath)
try:
text = common.load_txt_to_string(file_path)
except FileNotFoundError as original_err:
err = (
f'The filename {self.filename} present in your metadata csv does not exist in your '
+ 'files directory.\nPlease check that your metadata matches your dataset.'
)
raise FileNotFoundError(err) from original_err
# Replace smart quotes with regular quotes to standardize input
text = self._clean_quotes(text)
return text
def get_tokenized_text(self):
"""
Tokenizes the text and returns it as a list of tokens, while removing all punctuation.
Note: This does not currently properly handle dashes or contractions.
:return: List of each word in the Document
>>> from gender_analysis import Document
>>> from pathlib import Path
>>> from gender_analysis.testing.common import TEST_DATA_DIR
>>> document_metadata = {'author': '<NAME>', 'title': 'Persuasion', 'date': '1818',
... 'filename': 'test_text_1.txt',
... 'filepath': Path(TEST_DATA_DIR,
... 'document_test_files', 'test_text_1.txt')}
>>> austin = Document(document_metadata)
>>> tokenized_text = austin.get_tokenized_text()
>>> tokenized_text
['allkinds', 'of', 'punctuation', 'and', 'special', 'chars']
"""
# Excluded characters: !"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~
if self._tokenized_text is None:
excluded_characters = set(string.punctuation)
cleaned_text = ''
for character in self.text:
if character not in excluded_characters:
cleaned_text += character
tokenized_text = cleaned_text.lower().split()
self._tokenized_text = tokenized_text
return tokenized_text
else:
return self._tokenized_text
def find_quoted_text(self):
"""
Finds all of the quoted statements in the document text.
:return: List of strings enclosed in double-quotations
>>> from gender_analysis import Document
>>> from pathlib import Path
>>> from gender_analysis.testing.common import TEST_DATA_DIR
>>> document_metadata = {'author': '<NAME>', 'title': 'Persuasion',
... 'date': '1818', 'filename': 'test_text_0.txt',
... 'filepath': Path(TEST_DATA_DIR,
... 'document_test_files', 'test_text_0.txt')}
>>> document_novel = Document(document_metadata)
>>> document_novel.find_quoted_text()
['"This is a quote"', '"This is my quote"']
"""
text_list = self.text.split()
quotes = []
current_quote = []
quote_in_progress = False
quote_is_paused = False
for word in text_list:
if word[0] == "\"":
quote_in_progress = True
quote_is_paused = False
current_quote.append(word)
elif quote_in_progress:
if not quote_is_paused:
current_quote.append(word)
if word[-1] == "\"":
if word[-2] != ',':
quote_in_progress = False
quote_is_paused = False
quotes.append(' '.join(current_quote))
current_quote = []
else:
quote_is_paused = True
return quotes
def get_count_of_word(self, word):
"""
.. _get-count-of-word:
Returns the number of instances of a word in the text. Not case-sensitive.
If this is your first time running this method, this can be slow.
:param word: word to be counted in text
:return: Number of occurences of the word, as an int
>>> from gender_analysis import Document
>>> from pathlib import Path
>>> from gender_analysis.testing.common import TEST_DATA_DIR
>>> document_metadata = {'author': '<NAME>', 'title': 'Scarlet Letter',
... 'date': '2018', 'filename': 'test_text_2.txt',
... 'filepath': Path(TEST_DATA_DIR,
... 'document_test_files', 'test_text_2.txt')}
>>> scarlett = Document(document_metadata)
>>> scarlett.get_count_of_word("sad")
4
>>> scarlett.get_count_of_word('ThisWordIsNotInTheWordCounts')
0
"""
# If word_counts were not previously initialized, do it now and store it for the future.
if not self._word_counts_counter:
self._word_counts_counter = Counter(self.get_tokenized_text())
return self._word_counts_counter[word]
def get_count_of_words(self, words):
"""
A helper method for retrieving the number of occurrences of a given set of words within
a Document.
:param words: a list of strings.
:return: a Counter with each word in words keyed to its number of occurrences.
>>> from gender_analysis.text.document import Document
>>> from pathlib import Path
>>> from gender_analysis.testing.common import TEST_DATA_DIR
>>> document_filepath = Path(TEST_DATA_DIR, 'document_test_files', 'test_text_9.txt')
>>> document_metadata = {'filename': 'test_text_2.txt', 'filepath': document_filepath}
>>> test_document = Document(document_metadata)
>>> test_document.get_count_of_words(['sad', 'was', 'sadness', 'very'])
Counter({'was': 5, 'sad': 1, 'very': 1, 'sadness': 0})
"""
return Counter({word: self.get_count_of_word(word) for word in words})
def get_wordcount_counter(self):
"""
Returns a counter object of all of the words in the text.
If this is your first time running this method, this can be slow.
:return: Python Counter object
>>> from gender_analysis import Document
>>> from pathlib import Path
>>> from gender_analysis.testing.common import TEST_DATA_DIR
>>> document_metadata = {'author': '<NAME>', 'title': 'Scarlet Letter',
... 'date': '2018', 'filename': 'test_text_10.txt',
... 'filepath': Path(TEST_DATA_DIR,
... 'document_test_files', 'test_text_10.txt')}
>>> scarlett = Document(document_metadata)
>>> scarlett.get_wordcount_counter()
Counter({'was': 2, 'convicted': 2, 'hester': 1, 'of': 1, 'adultery': 1})
"""
# If word_counts were not previously initialized, do it now and store it for the future.
if not self._word_counts_counter:
self._word_counts_counter = Counter(self.get_tokenized_text())
return self._word_counts_counter
def words_associated(self, target_word):
"""
.. _words-associated:
Returns a Counter of the words found after a given word.
In the case of double/repeated words, the counter would include the word itself and the next
new word.
Note: words always return lowercase.
:param word: Single word to search for in the document's text
:return: a Python Counter() object with {associated_word: occurrences}
>>> from gender_analysis import Document
>>> from pathlib import Path
>>> from gender_analysis.testing.common import TEST_DATA_DIR
>>> document_metadata = {'author': '<NAME>', 'title': 'Scarlet Letter',
... 'date': '2018', 'filename': 'test_text_11.txt',
... 'filepath': Path(TEST_DATA_DIR,
... 'document_test_files', 'test_text_11.txt')}
>>> scarlett = Document(document_metadata)
>>> scarlett.words_associated("his")
Counter({'cigarette': 1, 'speech': 1})
"""
target_word = target_word.lower()
word_count = Counter()
check = False
text = self.get_tokenized_text()
for word in text:
if check:
word_count[word] += 1
check = False
if word == target_word:
check = True
return word_count
# pylint: disable=line-too-long
def get_word_windows(self, search_terms, window_size=2):
"""
.. _get-word-windows:
Finds all instances of `word` and returns a counter of the words around it.
window_size is the number of words before and after to return, so the total window is
2*window_size + 1.
This is not case sensitive.
:param search_terms: String or list of strings to search for
:param window_size: integer representing number of words to search for in either direction
:return: Python Counter object
>>> from gender_analysis import Document
>>> from pathlib import Path
>>> from gender_analysis.testing.common import TEST_DATA_DIR
>>> document_metadata = {'author': '<NAME>', 'title': 'Scarlet Letter',
... 'date': '2018', 'filename': 'test_text_12.txt',
... 'filepath': Path(TEST_DATA_DIR,
... 'document_test_files', 'test_text_12.txt')}
>>> scarlett = Document(document_metadata)
search_terms can be either a string...
>>> scarlett.get_word_windows("his", window_size=2)
Counter({'he': 1, 'lit': 1, 'cigarette': 1, 'and': 1, 'then': 1, 'began': 1, 'speech': 1, 'which': 1})
... or a list of strings.
>>> scarlett.get_word_windows(['purse', 'tears'])
Counter({'her': 2, 'of': 1, 'and': 1, 'handed': 1, 'proposal': 1, 'drowned': 1, 'the': 1})
"""
if isinstance(search_terms, str):
search_terms = [search_terms]
search_terms = set(i.lower() for i in search_terms)
counter = Counter()
for text_window in windowed(self.get_tokenized_text(), 2 * window_size + 1):
if text_window[window_size] in search_terms:
for surrounding_word in text_window:
if surrounding_word not in search_terms:
counter[surrounding_word] += 1
return counter
def get_word_freq(self, word):
"""
.. _get-word-freq:
Returns the frequency of appearance of a word in the document
:param word: str to search for in document
:return: float representing the portion of words in the text that are the parameter word
>>> from gender_analysis import Document
>>> from pathlib import Path
>>> from gender_analysis.testing.common import TEST_DATA_DIR
>>> document_metadata = {'author': '<NAME>', 'title': 'Scarlet Letter',
... 'date': '1900', 'filename': 'test_text_2.txt',
... 'filepath': Path(TEST_DATA_DIR,
... 'document_test_files', 'test_text_2.txt')}
>>> scarlett = Document(document_metadata)
>>> frequency = scarlett.get_word_freq('sad')
>>> frequency
0.13333333333333333
"""
word_frequency = self.get_count_of_word(word) / self.word_count
return word_frequency
def get_word_frequencies(self, words):
"""
A helper method for retreiving the frequencies of a given set of words within a Document.
:param words: a list of strings.
:return: a dictionary of words keyed to float frequencies.
>>> from gender_analysis.text.document import Document
>>> from pathlib import Path
>>> from gender_analysis.testing.common import TEST_DATA_DIR
>>> document_filepath = Path(TEST_DATA_DIR, 'document_test_files', 'test_text_9.txt')
>>> document_metadata = {'filename': 'test_text_2.txt', 'filepath': document_filepath}
>>> test_document = Document(document_metadata)
>>> test_document.get_word_frequencies(['peace', 'died', 'foobar'])
{'peace': 0.02702702702702703, 'died': 0.02702702702702703, 'foobar': 0.0}
"""
word_frequencies = {word: self.get_count_of_word(word) / self.word_count for word in words}
return word_frequencies
def get_part_of_speech_tags(self):
"""
.. _get-pos:
Returns the part of speech tags as a list of tuples. The first part of each tuple is the
term, the second one the part of speech tag.
Note: the same word can have a different part of speech tags. In the example below,
see "refuse" and "permit".
:return: List of tuples (term, speech_tag)
>>> from gender_analysis import Document
>>> from pathlib import Path
>>> from gender_analysis.testing.common import TEST_DATA_DIR
>>> document_metadata = {'author': '<NAME>', 'title': 'Scarlet Letter',
... 'date': '1900', 'filename': 'test_text_13.txt',
... 'filepath': Path(TEST_DATA_DIR,
... 'document_test_files', 'test_text_13.txt')}
>>> document = Document(document_metadata)
>>> document.get_part_of_speech_tags()[:4]
[('They', 'PRP'), ('refuse', 'VBP'), ('to', 'TO'), ('permit', 'VB')]
>>> document.get_part_of_speech_tags()[-4:]
[('the', 'DT'), ('refuse', 'NN'), ('permit', 'NN'), ('.', '.')]
"""
if self._part_of_speech_tags is not None:
return self._part_of_speech_tags
common.download_nltk_package_if_not_present('tokenizers/punkt')
common.download_nltk_package_if_not_present('taggers/averaged_perceptron_tagger')
text = nltk.word_tokenize(self.text)
pos_tags = nltk.pos_tag(text)
self._part_of_speech_tags = pos_tags
return pos_tags
def get_part_of_speech_words(self, words, remove_swords=True):
"""
A helper method for retrieving the number of occurrences of input words keyed to their
NLTK tag values (i.e., 'NN' for noun).
:param words: a list of strings.
:param remove_swords: optional boolean, remove stop words from return.
:return: a dictionary keying NLTK tag strings to Counter instances.
>>> from gender_analysis.text.document import Document
>>> from pathlib import Path
>>> from gender_analysis.testing.common import TEST_DATA_DIR
>>> document_filepath = Path(TEST_DATA_DIR, 'document_test_files', 'test_text_9.txt')
>>> document_metadata = {'filename': 'test_text_2.txt', 'filepath': document_filepath}
>>> test_document = Document(document_metadata)
>>> test_document.get_part_of_speech_words(['peace', 'died', 'beautiful', 'foobar'])
{'JJ': Counter({'beautiful': 3}), 'VBD': Counter({'died': 1}), 'NN': Counter({'peace': 1})}
"""
common.download_nltk_package_if_not_present('corpora/stopwords')
stop_words = set(nltk.corpus.stopwords.words('english'))
document_pos_tags = self.get_part_of_speech_tags()
words_set = {word.lower() for word in words}
output = {}
for token, tag in document_pos_tags:
lowered_token = token.lower()
if remove_swords is True and token in stop_words:
continue
if token not in words_set:
continue
if tag not in output:
output[tag] = Counter()
output[tag][lowered_token] += 1
return output
def update_metadata(self, new_metadata):
"""
Updates the metadata of the document without requiring a complete reloading
of the text and other properties.
'filename' cannot be updated with this method.
:param new_metadata: dict of new metadata to apply to the document
:return: None
This can be used to correct mistakes in the metadata:
>>> from gender_analysis import Document
>>> from gender_analysis.testing.common import TEST_CORPUS_PATH
>>> from pathlib import Path
>>> metadata = {'filename': 'aanrud_longfrock.txt',
... 'filepath': Path(TEST_CORPUS_PATH, 'aanrud_longfrock.txt'),
... 'date': '2098'}
>>> d = Document(metadata)
>>> new_metadata = {'date': '1903'}
>>> d.update_metadata(new_metadata)
>>> d.date
1903
Or it can be used to add completely new attributes:
>>> new_attribute = {'cookies': 'chocolate chip'}
>>> d.update_metadata(new_attribute)
>>> d.cookies
'chocolate chip'
"""
if not isinstance(new_metadata, dict):
raise ValueError(
f'new_metadata must be a dictionary of metadata keys, not type {type(new_metadata)}'
)
if 'filename' in new_metadata and new_metadata['filename'] != self.filename:
raise KeyError(
'You cannot update the filename of a document; '
f'consider removing {str(self)} from the Corpus object '
'and adding the document again with the updated filename'
)
for key in new_metadata:
if key == 'date':
try:
new_metadata[key] = int(new_metadata[key])
except ValueError as err:
raise ValueError(
f"the metadata field 'date' must be a number for document {self.filename},"
f" not '{new_metadata['date']}'"
) from err
setattr(self, key, new_metadata[key])
|
94077
|
import arcpy
import pandas as pd
import os
fc = r"C:\\temp\\GasPipelineEnterpriseDataManagement\\Databases\\UPDM_UtilityNetwork.gdb\\UtilityNetwork\\PipelineLine"
field_group_name = 'Limit Material By Asset Type'
def view_cav(table, subtype_field):
index = ['fieldGroupName', 'subtype', 'isRetired', 'id']
data = {}
for cav in arcpy.da.ListContingentValues(table):
contingent_value = {k: getattr(cav, k, None) for k in index}
for field in cav.values:
contingent_value[field.name] = dict(CODED_VALUE=field.code,
RANGE=field.range,
ANY='|ANY|',
NULL='<NULL>')[field.type]
data.setdefault(cav.fieldGroupName, []).append(contingent_value)
return [pd.DataFrame(values).set_index(index).rename_axis(index={'subtype': subtype_field}).fillna('<NULL>') for
values in data.values()]
desc = arcpy.Describe(fc)
for df in view_cav(fc, desc.subtypeFieldName):
if field_group_name in df.index:
subtypes = set()
valid_combos = []
df = df.reset_index().drop(['fieldGroupName', 'id'], axis=1)
df = df[df['isRetired'] == False].drop(['isRetired'], axis=1)
for row in df.itertuples(index=False):
valid_combos.append("::".join(map(str, row)).replace('<NULL>', ''))
subtypes.add(str(row[0]))
subtypes = sorted(subtypes)
field_list = [f'$feature.{fld}' for fld in df.columns]
func = f'''
// Assigned To: {os.path.basename(fc)}
// Type: Constraint
// Name: {field_group_name}
// Description: Limit values combinations using the fields {', '.join(list(df.columns))}
// Subtypes: All
// Error: 5601
// Error Message: Incompatible types for {', '.join(list(df.columns))}
// Trigger: Insert, Update
// ************* User Variables *************
// This section has the functions and variables that need to be adjusted based on your implementation
var valid_asset_groups = [{', '.join(subtypes)}];
if (indexof(valid_asset_groups, $feature.{desc.subtypeFieldName}) == -1) {{
return true;
}}
var feature_fields = [{', '.join(field_list)}];
var valid_values = {valid_combos};
// ************* End User Variables Section *************
function splice(arr, start, end) {{
var new_arr = [];
var k = 0;
for (var i = start; i < end; i++) {{
new_arr[k++] = arr[i];
}}
return new_arr;
}}
function join_array(a, b) {{
var new_arr = [];
var k = 0;
for (var i in a) {{
new_arr[k++] = a[i];
}}
for (var i in b) {{
new_arr[k++] = b[i];
}}
return new_arr;
}}
var feature_values = [Concatenate(feature_fields, '::')];
var any_sent = '|ANY|';
var fld_count = Count(feature_fields);
for (var i = 0; i < fld_count; i++) {{
var start_arr = splice(feature_fields, 0, i)
start_arr[Count(start_arr)] = any_sent;
var end_arr = splice(feature_fields, i + 1, fld_count)
feature_values[i + 1] = Concatenate(join_array(start_arr, end_arr), '::')
}}
var match_found = false;
for (var i = 0; i < Count(feature_values); i++){{
if (IndexOf(valid_values, feature_values[i]) > -1) {{
match_found = true;
break;
}}
}}
if (match_found == false) {{
return {{"errorMessage": "The selected attributes for {', '.join(list(df.columns))} are not valid."}}
}}
return true;
'''
print(func)
break
|
94079
|
import itertools
class Solution:
def combinationSum(self, k, n):
ls = []
for c in itertools.combinations(range(1, 10), k):
s = sum(c)
if s > c:
break
elif s == c:
ls.append(list(c))
return ls
|
94095
|
from reprojection import runSuperGlueSinglePair,image_pair_candidates, runSIFTSinglePair
from ray_dist_loss import preprocess_match, proj_ray_dist_loss_single
import torch
import numpy as np
import os
from random import random
import numpy as np
import torch
import torchvision.transforms as TF
import matplotlib.pyplot as plt
tol=1e-4
match_num = 4
run_unit_test = lambda args, kwargs, test_name: None if not args.debug else \
test_name(**kwargs)
def unit_test_matches(**kwargs):
msg = "Failed to pass the unit test named matches"
print("Starting Unit Test : matches")
dirname = "_unit_test_matches_result"
# Check whether argument is currently provided.
assert "args" in kwargs.keys(), msg
assert "result" in kwargs.keys(), msg
assert "img_i" in kwargs.keys(), msg
assert "img_j" in kwargs.keys(), msg
assert "img_i_idx" in kwargs.keys(), msg
assert "img_j_idx" in kwargs.keys(), msg
args= kwargs["args"]
result = kwargs["result"]
img_i, img_j = kwargs["img_i"], kwargs["img_j"]
img_i_idx, img_j_idx = kwargs["img_i_idx"], kwargs["img_j_idx"]
kps1, kps2 = result
W = img_i.shape[1]
# Draw matches and save them
assert hasattr(args, "datadir"), msg
scene_name = args.datadir.split("/")[-1]
scene_path = os.path.join(dirname, scene_name)
os.makedirs(scene_path, exist_ok=True)
img_name = "{}_{}.png".format(img_i_idx, img_j_idx)
img_path = os.path.join(scene_path, img_name)
img_cat = torch.cat([img_i, img_j], dim=1)
img_cat_pil = TF.ToPILImage()(img_cat.permute(2, 0, 1))
plt.imshow(img_cat_pil)
i_visualize = np.random.choice(range(len(kps1)), match_num)
for i in i_visualize:
kp1, kp2 = kps1[i].cpu().numpy(), kps2[i].cpu().numpy()
color = (random(), random(), random())
plt.plot([kp1[0], kp2[0]+W], [kp1[1], kp2[1]], c=color, lw=2)
plt.savefig(img_path)
plt.close()
def projected_ray_distance_evaluation(
images,
index_list,
args,
ray_fun,
ray_fun_gt,
H,
W,
mode,
matcher,
gt_intrinsic,
gt_extrinsic,
method,
device,
intrinsic=None,
extrinsic=None,
camera_model=None,
i_map=None,
):
prd_list = []
match_fun = runSuperGlueSinglePair if args.matcher == "superglue" else \
runSIFTSinglePair
extrinsic_gt_numpy = gt_extrinsic[index_list].cpu().numpy()
with torch.no_grad():
feasible_image_pairs = image_pair_candidates(
extrinsic_gt_numpy, args, index_list
)
for img_i in feasible_image_pairs.keys():
for img_j in feasible_image_pairs[img_i]:
if img_i >= img_j:
continue
result = match_fun(
matcher,
images[img_i],
images[img_j],
0,
args
)
kps0_list, kps1_list = preprocess_match(result)
if kps0_list is None and kps1_list is None:
continue
result = kps0_list, kps1_list
kwargs_unit_test = {
"args": args,
"result": result,
"img_i": images[img_i],
"img_j": images[img_j],
"img_i_idx": img_i,
"img_j_idx": img_j
}
run_unit_test(
args, kwargs_unit_test, unit_test_matches
)
if mode != "train":
# Acquiring correct matches using the ground truth camera info
# In the training mode, we don't use the ground truth information.
rays_i_gt = ray_fun_gt(
H=H, W=W,focal=gt_intrinsic[0][0],
extrinsic=gt_extrinsic[img_i], kps_list=kps0_list
)
rays_j_gt = ray_fun_gt(
H=H, W=W,focal=gt_intrinsic[0][0],
extrinsic=gt_extrinsic[img_j], kps_list=kps1_list
)
filter_idx = filter_matches_with_gt(
kps0_list=kps0_list,
kps1_list=kps1_list,
H=H,
W=W,
gt_intrinsic=gt_intrinsic,
gt_extrinsic=gt_extrinsic[[img_i, img_j]],
rays0=rays_i_gt,
rays1=rays_j_gt,
args=args,
device=device,
method=method
)
kps0_list = kps0_list[filter_idx]
kps1_list = kps1_list[filter_idx]
if camera_model is None:
# Evaluate with gt_extrinsic for val,test
# Evaluate with noisy_extrinsic for train
extrinsic_evaluate = gt_extrinsic if mode != "train" else \
extrinsic
rays_i = ray_fun(
H=H, W=W, focal=intrinsic[0][0],
extrinsic=extrinsic_evaluate[img_i], kps_list=kps0_list
)
rays_j = ray_fun(
H=H, W=W, focal=intrinsic[0][0],
extrinsic=extrinsic_evaluate[img_j], kps_list=kps1_list
)
projected_ray_dist, _ = proj_ray_dist_loss_single(
kps0_list=kps0_list, kps1_list=kps1_list, img_idx0=img_i,
img_idx1=img_j, rays0=rays_i, rays1=rays_j, mode=mode,
device=device, H=H, W=W, args=args,
intrinsic=gt_intrinsic, extrinsic=extrinsic_evaluate
)
else:
# In the train mode, we use the
extrinsic_evaluate = gt_extrinsic if mode != "train" else \
None
extrinsic_evaluate_i = gt_extrinsic[img_i] if mode != "train" \
else None
extrinsic_evaluate_j = gt_extrinsic[img_j] if mode != "train" \
else None
camera_idx_i = np.where(i_map == img_i)[0][0] \
if mode == "train" else None
camera_idx_j = np.where(i_map == img_j)[0][0] \
if mode == "train" else None
rays_i = ray_fun(
H=H, W=W, camera_model=camera_model,
extrinsic=extrinsic_evaluate_i, kps_list=kps0_list,
idx_in_camera_param=camera_idx_i
)
rays_j = ray_fun(
H=H, W=W, camera_model=camera_model,
extrinsic=extrinsic_evaluate_j, kps_list=kps1_list,
idx_in_camera_param=camera_idx_j
)
projected_ray_dist, _ = proj_ray_dist_loss_single(
kps0_list=kps0_list, kps1_list=kps1_list, img_idx0=img_i,
img_idx1=img_j, rays0=rays_i, rays1=rays_j, mode=mode,
device=device, H=H, W=W, args=args, i_map=i_map,
camera_model=camera_model, extrinsic=extrinsic_evaluate
)
if not torch.isnan(projected_ray_dist):
prd_list.append(projected_ray_dist.item())
prd_list = torch.tensor(prd_list)
return prd_list.mean()
# Since SuperGlue sometimes fail to acquire reliable matches,
# we filter matches using the ground truth information only when
# evaluating PRD on val/test.
def filter_matches_with_gt(
kps0_list,
kps1_list,
W,
H,
gt_intrinsic,
gt_extrinsic,
rays0,
rays1,
args,
method,
device,
eps=1e-6
):
assert method in ["NeRF", "NeRF++"]
assert kps0_list.dim() == 2 and kps1_list.dim() == 2
gt_intrinsic=gt_intrinsic.clone().detach()
# NeRF is using an opposite coordinate.
if method == "NeRF":
gt_intrinsic[0][0] = -gt_intrinsic[0][0]
rays0_o, rays0_d = rays0
rays1_o, rays1_d = rays1
rays0_o, rays0_d = rays0_o.unsqueeze(0), rays0_d.unsqueeze(0)
rays1_o, rays1_d = rays1_o.unsqueeze(0), rays1_d.unsqueeze(0)
gt_extrinsic_inv = torch.inverse(gt_extrinsic.cpu())
gt_extrinsic_inv = gt_extrinsic_inv.to(device)
rays0_d = rays0_d / (rays0_d.norm(p=2, dim=-1)[:, :, None] + eps)
rays1_d = rays1_d / (rays1_d.norm(p=2, dim=-1)[:, :, None] + eps)
rays0_o_world = torch.cat(
[
rays0_o,
torch.ones((rays0_o.shape[:2]), device=device)[:, :, None]
],
dim=-1
)[:, :, :3]
rays1_o_world = torch.cat(
[
rays1_o,
torch.ones((rays1_o.shape[:2]), device=device)[:, :, None]
],
dim=-1
)[:, :, :3]
rays0_d_world = rays0_d[:, :, :3]
rays1_d_world = rays1_d[:, :, :3]
r0_r1 = torch.einsum("ijk, ijk -> ij", rays0_d_world, rays1_d_world)
t0 = (
torch.einsum(
"ijk, ijk -> ij", rays0_d_world, rays0_o_world - rays1_o_world
) - r0_r1
* torch.einsum(
"ijk, ijk -> ij", rays1_d_world, rays0_o_world - rays1_o_world
)
) / (r0_r1 ** 2 - 1 + eps)
t1 = (
torch.einsum(
"ijk, ijk -> ij", rays1_d_world, rays1_o_world - rays0_o_world
) - r0_r1
* torch.einsum(
"ijk, ijk -> ij", rays0_d_world, rays1_o_world - rays0_o_world
)
) / (r0_r1 ** 2 - 1 + eps)
p0 = t0[:, :, None] * rays0_d_world + rays0_o_world
p1 = t1[:, :, None] * rays1_d_world + rays1_o_world
p0_4d = torch.cat(
[p0, torch.ones((p0.shape[:2]), device=device)[:, :, None]], dim=-1
)
p1_4d = torch.cat(
[p1, torch.ones((p1.shape[:2]), device=device)[:, :, None]], dim=-1
)
p0_proj_to_im1 = torch.einsum(
"ijk, ipk -> ijp", p0_4d, gt_extrinsic_inv[1:]
)
p1_proj_to_im0 = torch.einsum(
"ijk, ipk -> ijp", p1_4d, gt_extrinsic_inv[:-1]
)
p0_norm_im1 = torch.einsum("ijk, pk -> ijp", p0_proj_to_im1, gt_intrinsic)
p1_norm_im0 = torch.einsum("ijk, pk -> ijp", p1_proj_to_im0, gt_intrinsic)
p0_norm_im1_2d = p0_norm_im1[:, :, :2] / (p0_norm_im1[:, :, 2, None] + eps)
p1_norm_im0_2d = p1_norm_im0[:, :, :2] / (p1_norm_im0[:, :, 2, None] + eps)
# Chirality check: remove rays behind cameras
# First, flatten the correspondences
# Find indices of valid rays
valid_t0 = (t0 > 0).flatten()
valid_t1 = (t1 > 0).flatten()
valid = torch.logical_and(valid_t0, valid_t1)
# Second, select losses that are valid
# When using NeRF++
loss0_list = ((p1_norm_im0_2d - kps0_list) ** 2).sum(-1).flatten()
loss1_list = ((p0_norm_im1_2d - kps1_list) ** 2).sum(-1).flatten()
# Remove cloned tensor after the computation
del gt_intrinsic
return torch.logical_and(
torch.logical_and(loss0_list < 1.0, loss1_list < 1.0), valid
)
|
94102
|
from .utils import (
find_offending_time_points,
temporal_variance_mask,
generate_summarize_tissue_mask,
NuisanceRegressor
)
from .nuisance import (
create_regressor_workflow,
create_nuisance_regression_workflow,
filtering_bold_and_regressors
)
from .bandpass import (
bandpass_voxels
)
from .utils.compcor import (
cosine_filter
)
__all__ = [
'create_regressor_workflow',
'create_nuisance_regression_workflow',
'filtering_bold_and_regressors',
'find_offending_time_points',
'temporal_variance_mask',
'generate_summarize_tissue_mask',
'bandpass_voxels',
'cosine_filter'
]
|
94103
|
import librosa
import numpy as np
import matplotlib.pyplot as plt
from sys import argv
script, content_audio_name, style_audio_name, output_audio_name = argv
N_FFT=2048
def read_audio_spectum(filename):
x, fs = librosa.load(filename, duration=58.04) # Duration=58.05 so as to make sizes convenient
S = librosa.stft(x, N_FFT)
p = np.angle(S)
S = np.log1p(np.abs(S))
return S, fs
style_audio, style_sr = read_audio_spectum(style_audio_name)
content_audio, content_sr = read_audio_spectum(content_audio_name)
output_audio, output_sr = read_audio_spectum(output_audio_name)
print(style_audio.shape)
print(content_audio.shape)
print(output_audio.shape)
plt.figure(figsize=(15,25))
plt.subplot(1,3,1)
plt.title('Content')
plt.imshow(content_audio[:500,:500])
plt.subplot(1,3,2)
plt.title('Style')
plt.imshow(style_audio[:500,:500])
plt.subplot(1,3,3)
plt.title('Result')
plt.imshow(output_audio[:500,:500])
plt.show()
|
94113
|
import six
class InvalidPaddingError(Exception):
pass
class Padding(object):
"""Base class for padding and unpadding."""
def __init__(self, block_size):
self.block_size = block_size
def pad(self, value):
raise NotImplementedError('Subclasses must implement this!')
def unpad(self, value):
raise NotImplementedError('Subclasses must implement this!')
class PKCS5Padding(Padding):
"""Provide PKCS5 padding and unpadding."""
def pad(self, value):
if not isinstance(value, six.binary_type):
value = value.encode()
padding_length = (self.block_size - len(value) % self.block_size)
padding_sequence = padding_length * six.b(chr(padding_length))
value_with_padding = value + padding_sequence
return value_with_padding
def unpad(self, value):
# Perform some input validations.
# In case of error, we throw a generic InvalidPaddingError()
if not value or len(value) < self.block_size:
# PKCS5 padded output will always be at least 1 block size
raise InvalidPaddingError()
if len(value) % self.block_size != 0:
# PKCS5 padded output will be a multiple of the block size
raise InvalidPaddingError()
if isinstance(value, six.binary_type):
padding_length = value[-1]
if isinstance(value, six.string_types):
padding_length = ord(value[-1])
if padding_length == 0 or padding_length > self.block_size:
raise InvalidPaddingError()
def convert_byte_or_char_to_number(x):
return ord(x) if isinstance(x, six.string_types) else x
if any([padding_length != convert_byte_or_char_to_number(x)
for x in value[-padding_length:]]):
raise InvalidPaddingError()
value_without_padding = value[0:-padding_length]
return value_without_padding
class OneAndZeroesPadding(Padding):
"""Provide the one and zeroes padding and unpadding.
This mechanism pads with 0x80 followed by zero bytes.
For unpadding it strips off all trailing zero bytes and the 0x80 byte.
"""
BYTE_80 = 0x80
BYTE_00 = 0x00
def pad(self, value):
if not isinstance(value, six.binary_type):
value = value.encode()
padding_length = (self.block_size - len(value) % self.block_size)
one_part_bytes = six.b(chr(self.BYTE_80))
zeroes_part_bytes = (padding_length - 1) * six.b(chr(self.BYTE_00))
padding_sequence = one_part_bytes + zeroes_part_bytes
value_with_padding = value + padding_sequence
return value_with_padding
def unpad(self, value):
value_without_padding = value.rstrip(six.b(chr(self.BYTE_00)))
value_without_padding = value_without_padding.rstrip(
six.b(chr(self.BYTE_80)))
return value_without_padding
class ZeroesPadding(Padding):
"""Provide zeroes padding and unpadding.
This mechanism pads with 0x00 except the last byte equals
to the padding length. For unpadding it reads the last byte
and strips off that many bytes.
"""
BYTE_00 = 0x00
def pad(self, value):
if not isinstance(value, six.binary_type):
value = value.encode()
padding_length = (self.block_size - len(value) % self.block_size)
zeroes_part_bytes = (padding_length - 1) * six.b(chr(self.BYTE_00))
last_part_bytes = six.b(chr(padding_length))
padding_sequence = zeroes_part_bytes + last_part_bytes
value_with_padding = value + padding_sequence
return value_with_padding
def unpad(self, value):
if isinstance(value, six.binary_type):
padding_length = value[-1]
if isinstance(value, six.string_types):
padding_length = ord(value[-1])
value_without_padding = value[0:-padding_length]
return value_without_padding
class NaivePadding(Padding):
"""Naive padding and unpadding using '*'.
The class is provided only for backwards compatibility.
"""
CHARACTER = six.b('*')
def pad(self, value):
num_of_bytes = (self.block_size - len(value) % self.block_size)
value_with_padding = value + num_of_bytes * self.CHARACTER
return value_with_padding
def unpad(self, value):
value_without_padding = value.rstrip(self.CHARACTER)
return value_without_padding
PADDING_MECHANISM = {
'pkcs5': PKCS5Padding,
'oneandzeroes': OneAndZeroesPadding,
'zeroes': ZeroesPadding,
'naive': NaivePadding
}
|
94148
|
from . import api
from flask import render_template
@api.route('/', methods=['GET'])
def index():
# example of a route with HTML templating
message = "Hello, World"
entries = []
entries.append({"title":"entry 1", "text":"text 1 ?</>"})
entries.append({"title":"entry 2", "text":"text 2 ?</>"})
return render_template('index.html', message=message, entries=entries)
@api.route('/hello', methods=['GET'])
def hello_world():
return 'Hello, World!'
|
94172
|
from datetime import datetime
from utils import build_accuracy
import tempfile
import tensorflow as tf
import tensorflow.contrib.slim as slim
flags = tf.app.flags
FLAGS = flags.FLAGS
class OneStreamTrainer(object):
def __init__(self, model, logger=None, display_freq=1,
learning_rate=0.0001, num_classes=14, num_epochs=1, num_frames=12, temporal_pooling=False):
self.model = model
self.logger = logger
self.display_freq = display_freq
self.learning_rate = learning_rate
self.num_classes = num_classes
self.num_epochs = num_epochs
self.num_frames = num_frames
self.temporal_pooling = temporal_pooling
self.labels = tf.placeholder(tf.float32, [None, num_classes], 'labels')
if self.temporal_pooling:
expanded_shape = [-1, FLAGS.sample_length * num_frames, num_classes]
self.logits = tf.reduce_mean(tf.reshape(self.model.output, shape=expanded_shape), axis=1)
else:
self.logits = self.model.output
# Extract input model shape
self.shape = self.model.network['input'].get_shape().as_list()
# Define loss
self.cross_loss = tf.losses.softmax_cross_entropy(
onehot_labels=self.labels,
logits=self.logits,
scope='cross_loss'
)
self.loss = tf.losses.get_total_loss()
# Define accuracy
self.accuracy = build_accuracy(self.logits, self.labels)
# Initialize counters and stats
self.global_step = tf.train.create_global_step()
# Define optimizer
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
self.train_step = slim.learning.create_train_op(total_loss=self.loss,
optimizer=self.optimizer,
global_step=self.global_step,
variables_to_train=self.model.train_vars)
# Initialize model saver
self.saver = tf.train.Saver(max_to_keep=None)
def _get_optimizer_variables(self, optimizer):
optimizer_vars = [optimizer.get_slot(var, name)
for name in optimizer.get_slot_names() for var in self.model.train_vars if var is not None]
optimizer_vars.extend(list(optimizer._get_beta_accumulators()))
return optimizer_vars
def _init_model(self, session):
if FLAGS.init_checkpoint is not None:
# Initialize global step
print('{}: {} - Initializing global step'.format(datetime.now(), FLAGS.exp_name))
session.run(self.global_step.initializer)
print('{}: {} - Done'.format(datetime.now(), FLAGS.exp_name))
# Initialize optimizer variables
print('{}: {} - Initializing optimizer variables'.format(datetime.now(), FLAGS.exp_name))
optimizer_vars = self._get_optimizer_variables(self.optimizer)
optimizer_init_op = tf.variables_initializer(optimizer_vars)
session.run(optimizer_init_op)
print('{}: {} - Done'.format(datetime.now(), FLAGS.exp_name))
# Initialize model
print('{}: {} - Initializing model'.format(datetime.now(), FLAGS.exp_name))
self.model.init_model(session, FLAGS.init_checkpoint)
print('{}: {} - Done'.format(datetime.now(), FLAGS.exp_name))
elif FLAGS.restore_checkpoint is not None:
# Restore session from checkpoint
self._restore_model(session)
else:
# Initialize all variables
print('{}: {} - Initializing full model'.format(datetime.now(), FLAGS.exp_name))
session.run(tf.global_variables_initializer())
print('{}: {} - Done'.format(datetime.now(), FLAGS.exp_name))
def _restore_model(self, session):
# Restore model
print('{}: {} - Restoring session'.format(datetime.now(), FLAGS.exp_name))
saver = tf.train.Saver()
saver.restore(session, FLAGS.restore_checkpoint)
print('{}: {} - Done'.format(datetime.now(), FLAGS.exp_name))
def train(self, train_data=None, valid_data=None):
# Assert training and validation sets are not None
assert train_data is not None
assert valid_data is not None
# Add trainable variables to the summary
# for var in self.model.train_vars:
# self.logger.log_histogram(var.name, var)
# Add image/sounds to the summary
# self.logger.log_image('input', self.model.network['input'])
# self.logger.log_sound('input', self.model.network['input'])
# Add the losses to summary
self.logger.log_scalar('cross_entropy_loss', self.cross_loss)
self.logger.log_scalar('train_loss', self.loss)
# Add the accuracy to the summary
self.logger.log_scalar('train_accuracy', self.accuracy)
# Merge all summaries together
self.logger.merge_summary()
# Create a re-initializable iterator given the dataset structure
iterator = tf.data.Iterator.from_structure(train_data.data.output_types,
train_data.data.output_shapes)
next_batch = iterator.get_next()
# Create operation for initializing the iterator
training_init_op = iterator.make_initializer(train_data.data)
# Start training session
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, gpu_options=tf.GPUOptions(allow_growth=True))) as session:
# Initialize model either randomly or with a checkpoint
self._init_model(session)
# Add the model graph to TensorBoard
self.logger.write_graph(session.graph)
start_epoch = int(tf.train.global_step(session, self.global_step) / train_data.total_batches)
best_epoch = -1
best_accuracy = -1.0
best_loss = -1.0
# For each epoch
for epoch in range(start_epoch, start_epoch + self.num_epochs):
# Initialize counters and stats
step = 0
# Initialize iterator over the training set
session.run(training_init_op, feed_dict={train_data.seed: epoch})
# For each mini-batch
while True:
try:
# Retrieve batch according to model needs
input_data, labels_data = self._retrieve_batch(session, next_batch)
# Compute mini-batch error
if step % self.display_freq == 0:
train_loss, train_accuracy, train_summary = session.run(
[self.loss, self.accuracy, self.logger.summary_op],
feed_dict={self.model.network['input']: input_data,
self.labels: labels_data,
self.model.network['keep_prob']: 1.0,
self.model.network['is_training']: False})
print('{}: {} - Iteration: [{:3}]\t Training_Loss: {:6f}\t Training_Accuracy: {:6f}'.format(
datetime.now(), FLAGS.exp_name, step, train_loss, train_accuracy))
self.logger.write_summary(train_summary, tf.train.global_step(session, self.global_step))
# Forward batch through the network
session.run(self.train_step, feed_dict={self.model.network['input']: input_data,
self.labels: labels_data,
self.model.network['keep_prob']: 0.5,
self.model.network['is_training']: True})
# Update counters and stats
step += 1
except tf.errors.OutOfRangeError:
break
# Save model
if FLAGS.save_best_only is False:
self._save_checkpoint(session, epoch)
# Evaluate model on validation set
total_loss, total_accuracy = self._valid(session, valid_data)
print('{}: {} - Epoch: {}\t Validation_Loss: {:6f}\t Validation_Accuracy: {:6f}'.format(datetime.now(),
FLAGS.exp_name,
epoch,
total_loss,
total_accuracy))
self.logger.write_summary(tf.Summary(value=[
tf.Summary.Value(tag="valid_loss", simple_value=total_loss),
tf.Summary.Value(tag="valid_accuracy", simple_value=total_accuracy)
]), epoch)
self.logger.flush_writer()
if total_accuracy >= best_accuracy:
best_epoch = epoch
best_accuracy = total_accuracy
best_loss = total_loss
self._save_checkpoint(session)
print('{}: {} - Best Epoch: {}\t Validation_Loss: {:6f}\t Validation_Accuracy: {:6f}'.format(datetime.now(),
FLAGS.exp_name,
best_epoch,
best_loss,
best_accuracy))
def _save_checkpoint(self, session, epoch=None):
checkpoint_dir = FLAGS.checkpoint_dir
checkpoint_dir = tempfile.mkdtemp() if checkpoint_dir is None or not tf.gfile.Exists(checkpoint_dir) else checkpoint_dir
checkpoint_dir = '{}/{}'.format(checkpoint_dir, FLAGS.exp_name)
model_name = 'model.ckpt' if epoch is None else 'epoch_{}.ckpt'.format(epoch)
if not tf.gfile.Exists(checkpoint_dir):
tf.gfile.MakeDirs(checkpoint_dir)
print('{}: {} - Saving model to {}/{}'.format(datetime.now(), FLAGS.exp_name, checkpoint_dir, model_name))
self.saver.save(session, '{}/{}'.format(checkpoint_dir, model_name))
def _valid(self, session, valid_data):
# Create a re-initializable iterator given the dataset structure
iterator = tf.data.Iterator.from_structure(valid_data.data.output_types,
valid_data.data.output_shapes)
next_batch = iterator.get_next()
# Create operation for initializing the iterator
validation_init_op = iterator.make_initializer(valid_data.data)
# Initialize iterator over the training set
session.run(validation_init_op)
return self._evaluate(session, next_batch)
def _evaluate(self, session, next_batch):
# Initialize counters and stats
loss_sum = 0
accuracy_sum = 0
data_set_size = 0
# For each mini-batch
while True:
try:
# Retrieve batch according to model needs
input_data, labels_data = self._retrieve_batch(session, next_batch)
# Compute batch loss and accuracy
batch_loss, batch_accuracy = session.run([self.loss, self.accuracy],
feed_dict={self.model.network['input']: input_data,
self.labels: labels_data,
self.model.network['keep_prob']: 1.0,
self.model.network['is_training']: False})
# Update counters
data_set_size += labels_data.shape[0]
loss_sum += batch_loss * labels_data.shape[0]
accuracy_sum += batch_accuracy * labels_data.shape[0]
except tf.errors.OutOfRangeError:
break
total_loss = loss_sum / data_set_size
total_accuracy = accuracy_sum / data_set_size
return total_loss, total_accuracy
def _retrieve_batch(self, session, next_batch):
if FLAGS.model == 'ResNet50' or FLAGS.model == 'SeeNet':
data = tf.reshape(next_batch[2], shape=[-1, self.shape[1], self.shape[2], self.shape[3]])
if self.temporal_pooling:
labels = tf.reshape(next_batch[3], shape=[-1, self.num_classes])
else:
# Replicate labels to match the number of frames
multiples = [1, FLAGS.sample_length * self.num_frames]
labels = tf.reshape(tf.tile(next_batch[3], multiples), shape=[-1, self.num_classes])
elif FLAGS.model == 'TemporalResNet50':
data = tf.reshape(next_batch[2], shape=[-1, self.shape[1], self.shape[2], self.shape[3]])
labels = tf.reshape(next_batch[3], shape=[-1, self.num_classes])
elif FLAGS.model == 'DualCamNet' or FLAGS.model == 'DualCamHybridNet':
data = tf.reshape(next_batch[0], shape=[-1, self.shape[1], self.shape[2], self.shape[3]])
if self.temporal_pooling:
labels = tf.reshape(next_batch[3], shape=[-1, self.num_classes])
else:
# Replicate labels to match the number of frames
multiples = [1, FLAGS.sample_length * self.num_frames]
labels = tf.reshape(tf.tile(next_batch[3], multiples), shape=[-1, self.num_classes])
elif FLAGS.model == 'HearNet':
data = tf.reshape(next_batch[1], shape=[-1, self.shape[1], self.shape[2], self.shape[3]])
labels = tf.reshape(next_batch[3], shape=[-1, self.num_classes])
elif FLAGS.model == 'SoundNet5':
data = tf.reshape(next_batch[1], shape=[-1, self.shape[1], self.shape[2], self.shape[3]])
labels = tf.reshape(next_batch[3], shape=[-1, self.num_classes])
else:
raise ValueError('Unknown model type')
return session.run([data, labels])
def test(self, test_data=None):
# Assert testing set is not None
assert test_data is not None
# Create a one-shot iterator
iterator = test_data.data.make_one_shot_iterator()
next_batch = iterator.get_next()
# Start training session
with tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))) as session:
# Initialize model either randomly or with a checkpoint if given
self._restore_model(session)
# Evaluate model over the testing set
test_loss, test_accuracy = self._evaluate(session, next_batch)
print('{}: {} - Testing_Loss: {:6f}\t Testing_Accuracy: {:6f}'.format(datetime.now(),
FLAGS.exp_name,
test_loss,
test_accuracy))
return test_loss, test_accuracy
|
94175
|
from django.contrib import admin
from .models import *
admin.site.register(Location)
admin.site.register(Conference)
admin.site.register(Section)
admin.site.register(Speaker)
admin.site.register(Lecture)
admin.site.register(Speech)
admin.site.register(Comment)
# Register your models here.
|
94212
|
from dpipe.predict.functional import *
def test_chain_decorators():
def append(num):
def decorator(func):
def wrapper():
return func() + [num]
return wrapper
return decorator
@append(1)
@append(2)
@append(3)
def f():
return []
chained = chain_decorators(
append(1), append(2), append(3),
predict=lambda: []
)
assert f() == chained()
|
94235
|
import os
import gettext
from . import db
LOCALEDIR = os.path.join(os.path.dirname(__file__), 'locales')
langcodes = [
f for f in os.listdir(LOCALEDIR)
if os.path.isdir(os.path.join(LOCALEDIR, f))
]
translates = {
lang: gettext.translation('main', LOCALEDIR, [lang])
for lang in langcodes
}
languages = {lang: translates[lang].info()['language'] for lang in langcodes}
def I18nHandler(database: db.Database):
def withi18n(func):
async def wrapper(event):
if not event.is_private:
return await func(event=event, _=translates['en'].gettext)
user = await event.get_chat()
translate_telegram = translates[user.lang_code] \
if user.lang_code in translates else translates['en']
# When a user /start the bot for the 1st time
if database.get_user_state(user) is None:
return await func(event=event, _=translate_telegram.gettext)
db_lang = database.get_user_lang(user.id)
if db_lang in ['follow', None]:
return await func(event=event, _=translate_telegram.gettext)
return await func(event=event, _=translates[db_lang].gettext)
return wrapper
return withi18n
|
94242
|
from graphviz import Digraph
dot = Digraph(
comment='First flowchart',
name='<NAME>',
filename='hello_world.dot',
)
dot.node('1', 'Inicio')
dot.node('2', '"<NAME>"', shape='invhouse')
#dot.node('2', '"<NAME>"', shapefile='assets/print.svg')
dot.node('3', 'Fin')
dot.node('4', 'Fin 2')
dot.edge('1', '2')
#dot.attr('graph', splines='ortho', nodesep='1')
dot.edge('2', '3') # constraint='false')
dot.edge('2', '4')
dot.format = 'png'
dot.render(view=True)
|
94307
|
from sklearn.feature_extraction.text import CountVectorizer
from nltk.corpus import names
from nltk.stem import WordNetLemmatizer
import glob
import os
import numpy as np
file_path = 'enron1/ham/0007.1999-12-14.farmer.ham.txt'
with open(file_path, 'r') as infile:
ham_sample = infile.read()
print(ham_sample)
file_path = 'enron1/spam/0058.2003-12-21.GP.spam.txt'
with open(file_path, 'r') as infile:
spam_sample = infile.read()
print(spam_sample)
cv = CountVectorizer(stop_words="english", max_features=500)
emails, labels = [], []
file_path = 'enron1/spam/'
for filename in glob.glob(os.path.join(file_path, '*.txt')):
with open(filename, 'r', encoding = "ISO-8859-1") as infile:
emails.append(infile.read())
labels.append(1)
file_path = 'enron1/ham/'
for filename in glob.glob(os.path.join(file_path, '*.txt')):
with open(filename, 'r', encoding = "ISO-8859-1") as infile:
emails.append(infile.read())
labels.append(0)
def letters_only(astr):
return astr.isalpha()
all_names = set(names.words())
lemmatizer = WordNetLemmatizer()
def clean_text(docs):
cleaned_docs = []
for doc in docs:
cleaned_docs.append(' '.join([lemmatizer.lemmatize(word.lower())
for word in doc.split()
if letters_only(word)
and word not in all_names]))
return cleaned_docs
cleaned_emails = clean_text(emails)
term_docs = cv.fit_transform(cleaned_emails)
print(term_docs [0])
feature_mapping = cv.vocabulary
feature_names = cv.get_feature_names()
def get_label_index(labels):
from collections import defaultdict
label_index = defaultdict(list)
for index, label in enumerate(labels):
label_index[label].append(index)
return label_index
def get_prior(label_index):
""" Compute prior based on training samples
Args:
label_index (grouped sample indices by class)
Returns:
dictionary, with class label as key, corresponding prior as the value
"""
prior = {label: len(index) for label, index in label_index.items()}
total_count = sum(prior.values())
for label in prior:
prior[label] /= float(total_count)
return prior
def get_likelihood(term_document_matrix, label_index, smoothing=0):
""" Compute likelihood based on training samples
Args:
term_document_matrix (sparse matrix)
label_index (grouped sample indices by class)
smoothing (integer, additive Laplace smoothing parameter)
Returns:
dictionary, with class as key, corresponding conditional probability P(feature|class) vector as value
"""
likelihood = {}
for label, index in label_index.items():
likelihood[label] = term_document_matrix[index, :].sum(axis=0) + smoothing
likelihood[label] = np.asarray(likelihood[label])[0]
total_count = likelihood[label].sum()
likelihood[label] = likelihood[label] / float(total_count)
return likelihood
feature_names[:5]
def get_posterior(term_document_matrix, prior, likelihood):
""" Compute posterior of testing samples, based on prior and likelihood
Args:
term_document_matrix (sparse matrix)
prior (dictionary, with class label as key, corresponding prior as the value)
likelihood (dictionary, with class label as key, corresponding conditional probability vector as value)
Returns:
dictionary, with class label as key, corresponding posterior as value
"""
num_docs = term_document_matrix.shape[0]
posteriors = []
for i in range(num_docs):
# posterior is proportional to prior * likelihood
# = exp(log(prior * likelihood))
# = exp(log(prior) + log(likelihood))
posterior = {key: np.log(prior_label) for key, prior_label in prior.items()}
for label, likelihood_label in likelihood.items():
term_document_vector = term_document_matrix.getrow(i)
counts = term_document_vector.data
indices = term_document_vector.indices
for count, index in zip(counts, indices):
posterior[label] += np.log(likelihood_label[index]) * count
# exp(-1000):exp(-999) will cause zero division error,
# however it equates to exp(0):exp(1)
min_log_posterior = min(posterior.values())
for label in posterior:
try:
posterior[label] = np.exp(posterior[label] - min_log_posterior)
except:
# if one's log value is excessively large, assign it infinity
posterior[label] = float('inf')
# normalize so that all sums up to 1
sum_posterior = sum(posterior.values())
for label in posterior:
if posterior[label] == float('inf'):
posterior[label] = 1.0
else:
posterior[label] /= sum_posterior
posteriors.append(posterior.copy())
return posteriors
label_index = get_label_index(labels)
prior = get_prior(label_index)
smoothing = 1
likelihood = get_likelihood(term_docs, label_index, smoothing)
emails_test = [
'''Subject: flat screens
hello ,
please call or contact regarding the other flat screens requested .
<NAME> - eb 3132 b
<NAME> - eb 3132 a
also the sun blocker that was taken away from eb 3131 a .
trisha should two monitors also michael .
thanks
<NAME>''',
'''Subject: having problems in bed ? we can help !
cialis allows men to enjoy a fully normal sex life without having to plan the sexual act .
if we let things terrify us , life will not be worth living .
brevity is the soul of lingerie .
suspicion always haunts the guilty mind .''',
]
cleaned_test = clean_text(emails_test)
term_docs_test = cv.transform(cleaned_test)
posterior = get_posterior(term_docs_test, prior, likelihood)
print(posterior)
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(cleaned_emails, labels, test_size=0.33, random_state=42)
len(X_train), len(Y_train)
len(X_test), len(Y_test)
term_docs_train = cv.fit_transform(X_train)
label_index = get_label_index(Y_train)
prior = get_prior(label_index)
likelihood = get_likelihood(term_docs_train, label_index, smoothing)
term_docs_test = cv.transform(X_test)
posterior = get_posterior(term_docs_test, prior, likelihood)
correct = 0.0
for pred, actual in zip(posterior, Y_test):
if actual == 1:
if pred[1] >= 0.5:
correct += 1
elif pred[0] > 0.5:
correct += 1
print('The accuracy on {0} testing samples is: {1:.1f}%'.format(len(Y_test), correct/len(Y_test)*100))
from sklearn.naive_bayes import MultinomialNB
clf = MultinomialNB(alpha=1.0, fit_prior=True)
clf.fit(term_docs_train, Y_train)
prediction_prob = clf.predict_proba(term_docs_test)
prediction_prob[0:10]
prediction = clf.predict(term_docs_test)
prediction[:10]
accuracy = clf.score(term_docs_test, Y_test)
print('The accuracy using MultinomialNB is: {0:.1f}%'.format(accuracy*100))
from sklearn.metrics import confusion_matrix
confusion_matrix(Y_test, prediction, labels=[0, 1])
from sklearn.metrics import precision_score, recall_score, f1_score
precision_score(Y_test, prediction, pos_label=1)
recall_score(Y_test, prediction, pos_label=1)
f1_score(Y_test, prediction, pos_label=1)
f1_score(Y_test, prediction, pos_label=0)
from sklearn.metrics import classification_report
report = classification_report(Y_test, prediction)
print(report)
pos_prob = prediction_prob[:, 1]
thresholds = np.arange(0.0, 1.2, 0.1)
true_pos, false_pos = [0]*len(thresholds), [0]*len(thresholds)
for pred, y in zip(pos_prob, Y_test):
for i, threshold in enumerate(thresholds):
if pred >= threshold:
if y == 1:
true_pos[i] += 1
else:
false_pos[i] += 1
else:
break
true_pos_rate = [tp / 516.0 for tp in true_pos]
false_pos_rate = [fp / 1191.0 for fp in false_pos]
import matplotlib.pyplot as plt
plt.figure()
lw = 2
plt.plot(false_pos_rate, true_pos_rate, color='darkorange',
lw=lw)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic')
plt.legend(loc="lower right")
plt.show()
from sklearn.metrics import roc_auc_score
roc_auc_score(Y_test, pos_prob)
from sklearn.model_selection import StratifiedKFold
k = 10
k_fold = StratifiedKFold(n_splits=k)
# convert to numpy array for more efficient slicing
cleaned_emails_np = np.array(cleaned_emails)
labels_np = np.array(labels)
max_features_option = [2000, 4000, 8000]
smoothing_factor_option = [0.5, 1.0, 1.5, 2.0]
fit_prior_option = [True, False]
auc_record = {}
for train_indices, test_indices in k_fold.split(cleaned_emails, labels):
X_train, X_test = cleaned_emails_np[train_indices], cleaned_emails_np[test_indices]
Y_train, Y_test = labels_np[train_indices], labels_np[test_indices]
for max_features in max_features_option:
if max_features not in auc_record:
auc_record[max_features] = {}
cv = CountVectorizer(stop_words="english", max_features=max_features)
term_docs_train = cv.fit_transform(X_train)
term_docs_test = cv.transform(X_test)
for smoothing_factor in smoothing_factor_option:
if smoothing_factor not in auc_record[max_features]:
auc_record[max_features][smoothing_factor] = {}
for fit_prior in fit_prior_option:
clf = MultinomialNB(alpha=smoothing_factor, fit_prior=fit_prior)
clf.fit(term_docs_train, Y_train)
prediction_prob = clf.predict_proba(term_docs_test)
pos_prob = prediction_prob[:, 1]
auc = roc_auc_score(Y_test, pos_prob)
auc_record[max_features][smoothing_factor][fit_prior] \
= auc + auc_record[max_features][smoothing_factor].get(fit_prior, 0.0)
print(auc_record)
print('max features smoothing fit prior auc')
for max_features, max_feature_record in auc_record.items():
for smoothing, smoothing_record in max_feature_record.items():
for fit_prior, auc in smoothing_record.items():
print(' {0} {1} {2} {3:.4f}'.format(max_features, smoothing, fit_prior, auc/k))
|
94332
|
from bitmovin_api_sdk.encoding.encodings.live.insertable_content.schedule.schedule_api import ScheduleApi
|
94355
|
import contextlib
import os
import re
import shlex
import subprocess
from behave import *
ANSI_ESCAPE = re.compile(r"\x1B\[[0-?]*[ -/]*[@-~]")
def strip_ansi_codes(inp):
return ANSI_ESCAPE.sub("", inp)
@contextlib.contextmanager
def pushd(new_dir):
old_dir = os.getcwd()
os.chdir(new_dir)
yield
os.chdir(old_dir)
@given("a {file_name} file")
def create_file(context, file_name):
with pushd(context.working_directory):
with open(file_name, "w") as f:
f.write(context.text)
@when("running {command}")
def running(context, command):
with pushd(context.working_directory):
try:
output = subprocess.check_output(
shlex.split(command), stderr=subprocess.STDOUT
)
context.exit_code = 0
except subprocess.CalledProcessError as err:
output = err.output
context.exit_code = err.returncode
if isinstance(output, bytes):
output = output.decode("utf-8")
context.output = output
@then("it has an exit code of {exit_code:d}")
def exits_with(context, exit_code):
assert context.exit_code == exit_code, "Expected: %d. Got %d. Output was: %s" % (
exit_code,
context.exit_code,
context.output,
)
@then("it outputs the version")
def outputs_version(context):
with open("VERSION") as v:
expected_version = v.read()
assert context.output == expected_version, "Expected: '%s'. Got: '%s'." % (
expected_version,
context.output,
)
@then("the output is")
@then("it outputs")
def outputs(context):
output = strip_ansi_codes(context.output)
assert output == context.text, "Expected: '%s'. Got: '%s'." % (context.text, output)
@then("the output contains")
def output_contains(context):
output = strip_ansi_codes(context.output)
assert context.text in output, "Expected '%s' to contain '%s'." % (
output,
context.text,
)
@then("the output contains only once")
def output_contains(context):
output = strip_ansi_codes(context.output)
needle = context.text
idx = output.find(needle)
assert idx > 0, "Expected '%s' to contain '%s'." % (output, needle)
assert (
output[idx + 1 :].find(needle) < 0
), "Expected '%s' to contain only once '%s'." % (output, needle)
|
94396
|
import numpy as np
from .qnumber import is_qsparse
__all__ = ['retained_bond_indices', 'split_matrix_svd', 'qr']
def retained_bond_indices(s, tol):
"""
Indices of retained singular values based on given tolerance.
"""
w = np.linalg.norm(s)
if w == 0:
return np.array([], dtype=int)
# normalized squares
s = (s / w)**2
# accumulate values from smallest to largest
sort_idx = np.argsort(s)
s[sort_idx] = np.cumsum(s[sort_idx])
return np.where(s > tol)[0]
def split_matrix_svd(A, q0, q1, tol):
"""
Split a matrix by singular value decomposition,
taking block sparsity structure dictated by quantum numbers into account,
and truncate small singular values based on tolerance.
"""
assert A.ndim == 2
assert len(q0) == A.shape[0]
assert len(q1) == A.shape[1]
assert is_qsparse(A, [q0, -q1])
# find common quantum numbers
qis = np.intersect1d(q0, q1)
if len(qis) == 0:
assert np.linalg.norm(A) == 0
# special case: no common quantum numbers;
# use dummy intermediate dimension 1
u = np.zeros((A.shape[0], 1), dtype=A.dtype)
v = np.zeros((1, A.shape[1]), dtype=A.dtype)
s = np.zeros(1)
# single column of 'u' should have norm 1
if A.shape[0] > 0:
u[0, 0] = 1
# ensure non-zero entry in 'u' formally matches quantum numbers
q = q0[:1]
# 'v' must remain zero matrix to satisfy quantum number constraints
return (u, s, v, q)
# require NumPy arrays for indexing
q0 = np.array(q0)
q1 = np.array(q1)
# sort quantum numbers and arrange entries in A accordingly;
# using mergesort to avoid permutations of identical quantum numbers
idx0 = np.argsort(q0, kind='mergesort')
idx1 = np.argsort(q1, kind='mergesort')
if np.any(idx0 - np.arange(len(idx0))):
# if not sorted yet...
q0 = q0[idx0]
A = A[idx0, :]
if np.any(idx1 - np.arange(len(idx1))):
# if not sorted yet...
q1 = q1[idx1]
A = A[:, idx1]
# maximum intermediate dimension
max_interm_dim = min(A.shape)
# keep track of intermediate dimension
D = 0
# allocate memory for U and V matrices, singular values and
# corresponding intermediate quantum numbers
u = np.zeros((A.shape[0], max_interm_dim), dtype=A.dtype)
v = np.zeros((max_interm_dim, A.shape[1]), dtype=A.dtype)
s = np.zeros(max_interm_dim)
q = np.zeros(max_interm_dim, dtype=q0.dtype)
# for each shared quantum number...
for qn in qis:
# indices of current quantum number
iqn = np.where(q0 == qn)[0]; i0 = iqn[0]; i1 = iqn[-1] + 1
iqn = np.where(q1 == qn)[0]; j0 = iqn[0]; j1 = iqn[-1] + 1
# perform SVD decomposition of current block
usub, ssub, vsub = np.linalg.svd(A[i0:i1, j0:j1], full_matrices=False)
# update intermediate dimension
Dprev = D
D += len(ssub)
u[i0:i1, Dprev:D] = usub
v[Dprev:D, j0:j1] = vsub
s[Dprev:D] = ssub
q[Dprev:D] = qn
assert D <= max_interm_dim
# use actual intermediate dimensions
u = u[:, :D]
v = v[:D, :]
s = s[:D]
q = q[:D]
# truncate small singular values
idx = retained_bond_indices(s, tol)
u = u[:, idx]
v = v[idx, :]
s = s[idx]
q = q[idx]
# undo sorting of quantum numbers
if np.any(idx0 - np.arange(len(idx0))):
u = u[np.argsort(idx0), :]
if np.any(idx1 - np.arange(len(idx1))):
v = v[:, np.argsort(idx1)]
return (u, s, v, q)
def qr(A, q0, q1):
"""
Compute the block-wise QR decompositions of a matrix, taking block sparsity
structure dictated by quantum numbers into account (that is, `A[i, j]` can
only be non-zero if `q0[i] == q1[j]`).
The resulting R matrix is not necessarily upper triangular due to
reordering of entries.
"""
assert A.ndim == 2
assert len(q0) == A.shape[0]
assert len(q1) == A.shape[1]
assert is_qsparse(A, [q0, -q1])
# find common quantum numbers
qis = np.intersect1d(q0, q1)
if len(qis) == 0:
assert np.linalg.norm(A) == 0
# special case: no common quantum numbers;
# use dummy intermediate dimension 1 with all entries in 'R' set to zero
Q = np.zeros((A.shape[0], 1), dtype=A.dtype)
R = np.zeros((1, A.shape[1]), dtype=A.dtype)
# single column of 'Q' should have norm 1
Q[0, 0] = 1
# ensure non-zero entry in 'Q' formally matches quantum numbers
qinterm = q0[:1]
return (Q, R, qinterm)
# require NumPy arrays for indexing
q0 = np.array(q0)
q1 = np.array(q1)
# sort quantum numbers and arrange entries in A accordingly;
# using mergesort to avoid permutations of identical quantum numbers
idx0 = np.argsort(q0, kind='mergesort')
idx1 = np.argsort(q1, kind='mergesort')
if np.any(idx0 - np.arange(len(idx0))):
# if not sorted yet...
q0 = q0[idx0]
A = A[idx0, :]
if np.any(idx1 - np.arange(len(idx1))):
# if not sorted yet...
q1 = q1[idx1]
A = A[:, idx1]
# maximum intermediate dimension
max_interm_dim = min(A.shape)
# keep track of intermediate dimension
D = 0
Q = np.zeros((A.shape[0], max_interm_dim), dtype=A.dtype)
R = np.zeros((max_interm_dim, A.shape[1]), dtype=A.dtype)
# corresponding intermediate quantum numbers
qinterm = np.zeros(max_interm_dim, dtype=q0.dtype)
# for each shared quantum number...
for qn in qis:
# indices of current quantum number
iqn = np.where(q0 == qn)[0]; i0 = iqn[0]; i1 = iqn[-1] + 1
iqn = np.where(q1 == qn)[0]; j0 = iqn[0]; j1 = iqn[-1] + 1
# perform QR decomposition of current block
Qsub, Rsub = np.linalg.qr(A[i0:i1, j0:j1], mode='reduced')
# update intermediate dimension
Dprev = D
D += Qsub.shape[1]
Q[i0:i1, Dprev:D] = Qsub
R[Dprev:D, j0:j1] = Rsub
qinterm[Dprev:D] = qn
assert D <= max_interm_dim
# use actual intermediate dimensions
Q = Q[:, :D]
R = R[:D, :]
qinterm = qinterm[:D]
# undo sorting of quantum numbers
if np.any(idx0 - np.arange(len(idx0))):
Q = Q[np.argsort(idx0), :]
if np.any(idx1 - np.arange(len(idx1))):
R = R[:, np.argsort(idx1)]
return (Q, R, qinterm)
|
94397
|
import copy
import inspect
import itertools
import types
import warnings
from typing import Any, Dict
import numpy as np
from axelrod import _module_random
from axelrod.action import Action
from axelrod.game import DefaultGame
from axelrod.history import History
from axelrod.random_ import RandomGenerator
C, D = Action.C, Action.D
class PostInitCaller(type):
"""Metaclass to be able to handle post __init__ tasks.
If there is a DerivedPlayer class of Player that overrides
_post_init, as follows:
class Player(object, metaclass=PostInitCaller):
def __new__(cls, *args, **kwargs):
print("Player.__new__")
obj = super().__new__(cls)
return obj
def __init__(self):
print("Player.__init__")
def _post_init(self):
print("Player._post_init")
def _post_transform(self):
print("Player._post_transform")
class DerivedPlayer(Player):
def __init__(self):
print("DerivedPlayer.__init__")
super().__init__()
def _post_init(self):
print("DerivedPlayer._post_init")
super()._post_init()
dp = DerivedPlayer()
Then the call order is:
* PostInitCaller.__call__
* Player.__new__
* DerivedPlayer.__init__
* Player.__init__
* DerivedPlayer._post_init
* Player._post_init
* Player._post_transform
See here to learn more: https://blog.ionelmc.ro/2015/02/09/understanding-python-metaclasses/
"""
def __call__(cls, *args, **kwargs):
# This calls cls.__new__ and cls.__init__
obj = type.__call__(cls, *args, **kwargs)
# Next we do any post init or post transform tasks, like recomputing
# classifiers
# Note that subclasses inherit the metaclass, and subclasses my override
# or extend __init__ so it's necessary to do these tasks after all the
# __init__'s have run in the case of a post-transform reclassification.
obj._post_init()
obj._post_transform()
return obj
class Player(object, metaclass=PostInitCaller):
"""A class for a player in the tournament.
This is an abstract base class, not intended to be used directly.
"""
name = "Player"
classifier = {} # type: Dict[str, Any]
_reclassifiers = []
def __new__(cls, *args, **kwargs):
"""Caches arguments for Player cloning."""
obj = super().__new__(cls)
obj.init_kwargs = cls.init_params(*args, **kwargs)
return obj
@classmethod
def init_params(cls, *args, **kwargs):
"""
Return a dictionary containing the init parameters of a strategy
(without 'self').
Use *args and **kwargs as value if specified
and complete the rest with the default values.
"""
sig = inspect.signature(cls.__init__)
# The 'self' parameter needs to be removed or the first *args will be
# assigned to it
self_param = sig.parameters.get("self")
new_params = list(sig.parameters.values())
new_params.remove(self_param)
sig = sig.replace(parameters=new_params)
boundargs = sig.bind_partial(*args, **kwargs)
boundargs.apply_defaults()
return boundargs.arguments
def __init__(self):
"""Initial class setup."""
self._history = History()
self.classifier = copy.deepcopy(self.classifier)
self.set_match_attributes()
def _post_init(self):
"""Post initialization tasks such as reclassifying the strategy."""
pass
def _post_transform(self):
"""Handles post transform tasks such as further reclassifying."""
# Reclassify strategy post __init__, if needed.
for (reclassifier, args, kwargs) in self._reclassifiers:
self.classifier = reclassifier(self.classifier, *args, **kwargs)
def __eq__(self, other):
"""
Test if two players are equal, ignoring random seed and RNG state.
"""
if self.__repr__() != other.__repr__():
return False
for attribute in set(
list(self.__dict__.keys()) + list(other.__dict__.keys())
):
value = getattr(self, attribute, None)
other_value = getattr(other, attribute, None)
if attribute in ["_random", "_seed"]:
# Don't compare the random generators.
continue
if isinstance(value, np.ndarray):
if not (np.array_equal(value, other_value)):
return False
elif isinstance(value, types.GeneratorType) or isinstance(
value, itertools.cycle
):
# Split the original generator so it is not touched
generator, original_value = itertools.tee(value)
other_generator, original_other_value = itertools.tee(
other_value
)
if isinstance(value, types.GeneratorType):
setattr(self, attribute, (ele for ele in original_value))
setattr(
other, attribute, (ele for ele in original_other_value)
)
else:
setattr(self, attribute, itertools.cycle(original_value))
setattr(
other, attribute, itertools.cycle(original_other_value)
)
for _ in range(200):
try:
if next(generator) != next(other_generator):
return False
except StopIteration:
break
# Code for a strange edge case where each strategy points at each
# other
elif value is other and other_value is self:
pass
else:
if value != other_value:
return False
return True
def receive_match_attributes(self):
# Overwrite this function if your strategy needs
# to make use of match_attributes such as
# the game matrix, the number of rounds or the noise
pass
def set_match_attributes(self, length=-1, game=None, noise=0):
if not game:
game = DefaultGame
self.match_attributes = {"length": length, "game": game, "noise": noise}
self.receive_match_attributes()
def set_seed(self, seed):
"""Set a random seed for the player's random number generator."""
if seed is None:
warnings.warn(
"Initializing player with seed from Axelrod module random number generator. "
"Results may not be seed reproducible."
)
self._seed = _module_random.random_seed_int()
else:
self._seed = seed
self._random = RandomGenerator(seed=self._seed)
def __repr__(self):
"""The string method for the strategy.
Appends the `__init__` parameters to the strategy's name."""
name = self.name
prefix = ": "
gen = (
value for value in self.init_kwargs.values() if value is not None
)
for value in gen:
try:
if issubclass(value, Player):
value = value.name
except TypeError:
pass
name = "".join([name, prefix, str(value)])
prefix = ", "
return name
def __getstate__(self):
"""Used for pickling. Override if Player contains unpickleable attributes."""
return self.__dict__
def strategy(self, opponent):
"""This is a placeholder strategy."""
raise NotImplementedError()
def clone(self):
"""Clones the player without history, reapplying configuration
parameters as necessary."""
# You may be tempted to re-implement using the `copy` module
# Note that this would require a deepcopy in some cases and there may
# be significant changes required throughout the library.
# Consider overriding in special cases only if necessary
cls = self.__class__
new_player = cls(**self.init_kwargs)
new_player.match_attributes = copy.copy(self.match_attributes)
return new_player
def reset(self):
"""Resets a player to its initial state
This method is called at the beginning of each match (between a pair
of players) to reset a player's state to its initial starting point.
It ensures that no 'memory' of previous matches is carried forward.
"""
# This also resets the history.
self.__init__(**self.init_kwargs)
def update_history(self, play, coplay):
self.history.append(play, coplay)
@property
def history(self):
return self._history
# Properties maintained for legacy API, can refactor to self.history.X
# in 5.0.0 to reduce function call overhead.
@property
def cooperations(self):
return self._history.cooperations
@property
def defections(self):
return self._history.defections
@property
def state_distribution(self):
return self._history.state_distribution
|
94398
|
from django.test import TestCase
from newsletter.models import Subscriber
class NewsletterModelTest(TestCase):
""" Test suite for customer model """
def setUp(self):
""" Set up test database """
Subscriber.objects.create(email='<EMAIL>', confirmed=True)
Subscriber.objects.create(email='<EMAIL>', confirmed=False)
def tearDown(self):
""" Clean up test database """
Subscriber.objects.all().delete()
def test_string_representation(self):
""" Test string representation of model """
email = Subscriber.objects.get(email='<EMAIL>')
self.assertEqual(str(email), '<EMAIL> (confirmed)')
def test_unconfirmed_string_representation(self):
""" Test string representation of unconfirmed model """
email = Subscriber.objects.get(email='<EMAIL>')
self.assertEqual(str(email), '<EMAIL> (not confirmed)')
def test_subscriber_confirmed(self):
""" Test confirmed field """
confirmed = Subscriber.objects.filter(email='<EMAIL>', confirmed=True)
self.assertEqual(confirmed.count(), 1)
def test_subscriber_unconfirmed(self):
""" Test unconfirmed field """
unconfirmed = Subscriber.objects.filter(email='<EMAIL>', confirmed=False)
self.assertEqual(unconfirmed.count(), 0)
def test_subscriber_verbose_name(self):
""" Test verbose name of model """
self.assertEqual(str(Subscriber._meta.verbose_name), 'Subscriber')
def test_subscriber_verbose_name_plural(self):
""" Test verbose name of model """
self.assertEqual(str(Subscriber._meta.verbose_name_plural), 'Subscribers')
|
94403
|
from typing import List
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from chemcharts.core.container.chemdata import ChemData
from chemcharts.core.plots.base_plot import BasePlot, _check_value_input
from chemcharts.core.utils.value_functions import generate_value
from chemcharts.core.utils.enums import PlottingEnum
from chemcharts.core.utils.enums import PlotLabellingEnum
_PE = PlottingEnum
_PLE = PlotLabellingEnum
class HistogramPlot(BasePlot):
def __init__(self):
super().__init__()
def plot(self, chemdata_list: List[ChemData], parameters: dict, settings: dict):
# no warning message for multiple chemdata object inputs since normalisation
# for xlim and ylim is here anyways applied
# checks whether there is a value input
value_input_result = _check_value_input(chemdata_list, "Histogram")
# checks whether there are multiple input objects
if value_input_result: # checks whether _check_value_input function returns 'True'
# lim setting
xlim, ylim, valuelim = self._get_lims(chemdata_list=chemdata_list,
parameters=parameters)
# final path setting
final_path = settings.get(_PE.SETTINGS_PATH, None)
self._prepare_folder(path=final_path)
# temp path setting
temp_folder_path, temp_plots_path_list = self._generate_temp_paths(number_paths=len(chemdata_list))
max_columns = 3
# loop over ChemData objects and generate plots
for idx in range(len(chemdata_list)):
fig, axs = plt.subplots()
value_column, value_name = generate_value(chemdata_list=chemdata_list,
parameters=parameters,
idx=idx)
# TODO fix tanimoto
"""
# include tanimoto_similarity
if selection == "tanimoto_similarity":
value_input = chemdata.get_tanimoto_similarity()
value_name = "Tanimoto Similarity"
elif selection == "value":
value_input = chemdata.get_values()
value_name = parameters.get(_PE.V
else:
raise ValueError(f"Selection input: {selection} is not as expected.")
"""
# generate data frame
scatter_df = pd.DataFrame({_PLE.UMAP_1: chemdata_list[idx].get_embedding().np_array[:, 0],
_PLE.UMAP_2: chemdata_list[idx].get_embedding().np_array[:, 1],
value_name: value_column})
sns.set_context("talk",
font_scale=0.5)
# deal with axs issue (array if multiple input, otherwise not)
if isinstance(axs, np.ndarray):
row_pos = int(idx / max_columns)
col_pos = idx % max_columns
# makes sure that array is 2D, even if only one row
axs = np.atleast_2d(axs)
selected_axis = axs[row_pos, col_pos]
else:
selected_axis = axs
# generate seaborn histplot
sns.histplot(scatter_df[value_name],
element="step",
bins=parameters.get(_PE.PARAMETERS_BINS, 20),
stat="proportion",
kde=True,
color=parameters.get(_PE.PARAMETERS_PLOT_COLOR, "#d11d80"),
ax=selected_axis)
# Setting axs ranges (for this plot only x and y axis ranges from 0 to 1 make sense)
if xlim is not None or ylim is not None:
print("Histogram plot does not support setting arbitrary axis limits.")
plt.xlim(0, 1)
plt.ylim(0, 1)
plt.gcf().set_size_inches(settings.get(_PE.SETTINGS_FIG_SIZE, (7, 7)))
plt.subplots_adjust(top=parameters.get(_PE.PARAMETERS_PLOT_ADJUST_TOP, 0.9))
plt.xlabel(parameters.get(_PE.PARAMETERS_VALUENAME, "Value"), fontsize=10)
name = f"Dataset_{idx}" if chemdata_list[idx].get_name() == "" else chemdata_list[idx].get_name()
plt.suptitle(name,
fontsize=parameters.get(_PE.PARAMETERS_PLOT_TITLE_FONTSIZE, 14))
plt.savefig(temp_plots_path_list[idx],
format=settings.get(_PE.SETTINGS_FIG_FORMAT, 'png'),
dpi=settings.get(_PE.SETTINGS_FIG_DPI, _PE.SETTINGS_FIG_DPI_DEFAULT))
plt.close("all")
self._merge_multiple_plots(subplot_paths=temp_plots_path_list,
merged_path=final_path,
title=parameters.get(_PE.PARAMETERS_PLOT_TITLE, "Histogram ChemCharts Plot"))
self._clear_temp_dir(path=temp_folder_path)
|
94436
|
from django.conf.urls.defaults import *
from satchmo_store.urls import urlpatterns
urlpatterns += patterns('',
(r'test/', include('simple.localsite.urls'))
)
|
94438
|
from scipy.stats import multivariate_normal as normal
import numpy as np
from time import time
from experiments.lnpdfs.create_target_lnpfs import build_target_likelihood_planar_n_link
from sampler.SliceSampling.slice_sampler import slice_sample
num_dimensions = 10
conf_likelihood_var = 4e-2 * np.ones(num_dimensions)
conf_likelihood_var[0] = 1
cart_likelihood_var = np.array([1e-4, 1e-4])
lnpdf = build_target_likelihood_planar_n_link(num_dimensions, conf_likelihood_var, cart_likelihood_var)[0]
prior = normal(np.zeros((num_dimensions)), conf_likelihood_var * np.eye((num_dimensions)))
initial = prior.rvs(1)
def sample(n_samps, sigma, path):
start = time()
[samples, fevals, timestamps] = slice_sample(lnpdf, initial, n_samps, sigma * np.ones(num_dimensions))
timestamps -= start
samples = samples.transpose().reshape(len(timestamps),-1,num_dimensions).copy()
np.savez(path + 'processed_data', samples=samples, fevals=fevals, timestamps = timestamps)
sample(100, 0.1, "slice_test")
print("done")
|
94449
|
import logging
from openeye import oechem, oeszybki, oeomega
from torsion.utils.process_sd_data import get_sd_data, has_sd_data
from torsion.dihedral import get_dihedral
TORSION_LIBRARY = [
'[C,N,c:1][NX3:2][C:3](=[O])[C,N,c,O:4] 0 180', # amides are flipped cis and trans
'[#1:1][NX3H:2][C:3](=[O])[C,N,c,O:4] 0', # primary amides are NOT flipped
'[*:1][C,c:2][OX2:3][*:4] 0 180', # hydroxyls and ethers are rotated 180 degrees
'[H:1][CH3:2]-!@[!#1:3][*:4] 0.1 180', # methyls are rotated 180 degrees
'[H:1][CH3:2]-!@[!#1:3]=[*:4] 0.1 180'
]
MAX_CONFS = 100
class hasDoubleBondO(oechem.OEUnaryAtomPred):
def __call__(self, atom):
for bond in atom.GetBonds():
if bond.GetOrder() == 2 and bond.GetNbr(atom).IsOxygen():
return True
return False
def isAmideRotor(bond):
if bond.GetOrder() != 1:
return False
atomB = bond.GetBgn()
atomE = bond.GetEnd()
pred = hasDoubleBondO()
if atomB.IsCarbon() and atomE.IsNitrogen() and pred(atomB):
return True
if atomB.IsNitrogen() and atomE.IsCarbon() and pred(atomE):
return True
return False
def isMethylRotor(bond):
if bond.GetOrder() != 1:
return False
atomB = bond.GetBgn()
atomE = bond.GetEnd()
if atomB.IsHydrogen() or atomE.IsHydrogen():
return False
def isMethylCarbon(atom):
return atom.GetAtomicNum() == oechem.OEElemNo_C and \
atom.GetHvyDegree() == 1 and \
atom.GetTotalHCount() == 3
return isMethylCarbon(atomB) or isMethylCarbon(atomE)
def isEtherRotor(bond):
if bond.GetOrder() != 1:
return False
atomB = bond.GetBgn()
atomE = bond.GetEnd()
isEtherOxygen = oechem.OEMatchAtom("[OX2][C,c]")
return (atomB.IsCarbon() and isEtherOxygen(atomE)) or (atomE.IsCarbon() and isEtherOxygen(atomB))
def isRotatableBond(bond):
inRing = oechem.OEBondIsInRing()
return (not inRing(bond)) and (
isAmideRotor(bond) or \
isMethylRotor(bond) or \
isEtherRotor(bond)
)
class distance_predicate(oechem.OEUnaryBondPred):
def __init__(self, atom1_idx, atom2_idx):
oechem.OEUnaryBondPred.__init__(self)
self.atom1_idx = atom1_idx
self.atom2_idx = atom2_idx
def __call__(self, bond):
atomB = bond.GetBgn()
atomE = bond.GetEnd()
mol = bond.GetParent()
atom1 = mol.GetAtom(oechem.OEHasAtomIdx(self.atom1_idx))
atom2 = mol.GetAtom(oechem.OEHasAtomIdx(self.atom2_idx))
return max(oechem.OEGetPathLength(atomB, atom1),
oechem.OEGetPathLength(atomE, atom1),
oechem.OEGetPathLength(atomB, atom2),
oechem.OEGetPathLength(atomE, atom2)) <= 3
def configure_omega(library, rotor_predicate, rms_cutoff, energy_window, num_conformers=MAX_CONFS):
opts = oeomega.OEOmegaOptions(oeomega.OEOmegaSampling_Dense)
opts.SetEnumRing(False)
opts.SetEnumNitrogen(oeomega.OENitrogenEnumeration_Off)
opts.SetSampleHydrogens(True)
opts.SetRotorPredicate(rotor_predicate)
opts.SetIncludeInput(False)
opts.SetEnergyWindow(energy_window)
opts.SetMaxConfs(num_conformers)
opts.SetRMSThreshold(rms_cutoff)
conf_sampler = oeomega.OEOmega(opts)
conf_sampler.SetCanonOrder(False)
torlib = conf_sampler.GetTorLib()
# torlib.ClearTorsionLibrary()
for rule in library:
if not torlib.AddTorsionRule(rule):
oechem.OEThrow.Fatal('Failed to add torsion rule: {}'.format(rule))
conf_sampler.SetTorLib(torlib)
return conf_sampler
def gen_starting_confs(mol, torsion_library, num_conformers=MAX_CONFS, rms_cutoff=0.0, energy_window=25):
# Identify the atoms in the dihedral
TAGNAME = 'TORSION_ATOMS_FRAGMENT'
if not has_sd_data(mol, TAGNAME):
raise ValueError("Molecule does not have the SD Data Tag '{}'.".format(TAGNAME))
dihedralAtomIndices = [int(x)-1 for x in get_sd_data(mol, TAGNAME).split()]
inDih = \
oechem.OEOrAtom(oechem.OEOrAtom(oechem.OEHasAtomIdx(dihedralAtomIndices[0]),
oechem.OEHasAtomIdx(dihedralAtomIndices[1])),
oechem.OEOrAtom(oechem.OEHasAtomIdx(dihedralAtomIndices[2]),
oechem.OEHasAtomIdx(dihedralAtomIndices[3]))
)
mol1 = mol.CreateCopy()
mc_mol = oechem.OEMol(mol1)
if num_conformers > 1:
rotor_predicate = oechem.OEOrBond(oechem.OEIsRotor(),
oechem.PyBondPredicate(isRotatableBond))
#Initialize conformer generator and multi-conformer library
conf_generator = configure_omega(torsion_library, rotor_predicate,
rms_cutoff, energy_window, num_conformers)
# Generator conformers
if not conf_generator(mc_mol, inDih):
raise ValueError("Conformers cannot be generated.")
logging.debug("Generated a total of %d conformers for %s.", mc_mol.NumConfs(),
mol.GetTitle())
for conf_no, conf in enumerate(mc_mol.GetConfs()):
conformer_label = mol.GetTitle()+'_' +\
'_'.join(get_sd_data(mol, 'TORSION_ATOMS_ParentMol').split()) +\
'_{:02d}'.format(conf_no)
oechem.OESetSDData(conf, "CONFORMER_LABEL", conformer_label)
conf.SetTitle(conformer_label)
return mc_mol
def get_best_conf(mol, dih, num_points):
"""Drive the primary torsion in the molecule and select the lowest
energy conformer to represent each dihedral angle
"""
delta = 360.0/num_points
angle_list = [2*i*oechem.Pi/num_points for i in range(num_points)]
dih_atoms = [x for x in dih.GetAtoms()]
# Create new output OEMol
title = mol.GetTitle()
tor_mol = oechem.OEMol()
opts = oeszybki.OETorsionScanOptions()
opts.SetDelta(delta)
opts.SetForceFieldType(oeszybki.OEForceFieldType_MMFF94)
opts.SetSolvationType(oeszybki.OESolventModel_NoSolv)
tmp_angle = 0.0
tor = oechem.OETorsion(dih_atoms[0], dih_atoms[1], dih_atoms[2], dih_atoms[3], tmp_angle)
oeszybki.OETorsionScan(tor_mol, mol, tor, opts)
oechem.OECopySDData(tor_mol, mol)
# if 0 and 360 sampled because of rounding
if tor_mol.NumConfs() > num_points:
for conf in tor_mol.GetConfs():
continue
tor_mol.DeleteConf(conf)
for angle, conf in zip(angle_list, tor_mol.GetConfs()):
angle_deg = int(round(angle*oechem.Rad2Deg))
tor_mol.SetActive(conf)
oechem.OESetTorsion(conf, dih_atoms[0], dih_atoms[1], dih_atoms[2], dih_atoms[3], angle)
conf_name = title + '_{:02d}'.format(conf.GetIdx())
oechem.OESetSDData(conf, 'CONFORMER_LABEL', conf_name)
oechem.OESetSDData(conf, 'TORSION_ANGLE', "{:.0f}".format(angle_deg))
conf.SetDoubleData('TORSION_ANGLE', angle_deg)
conf.SetTitle('{}: Angle {:.0f}'.format(conf_name, angle_deg))
return tor_mol
def get_torsional_confs(mol):
mc_mol = gen_starting_confs(mol, TORSION_LIBRARY, True, 20)
torsion_tag = 'TORSION_ATOMS_FRAGMENT'
torsion_atoms_in_fragment = get_sd_data(mol, torsion_tag).split()
dihedral_atom_indices = [int(x) - 1 for x in torsion_atoms_in_fragment]
dih, _ = get_dihedral(mc_mol, dihedral_atom_indices)
torsional_confs = get_best_conf(mc_mol, dih, 24)
torsional_mols = []
for conf in torsional_confs.GetConfs():
new_mol = oechem.OEMol(conf)
oechem.OECopySDData(new_mol, mol)
torsional_mols.append(new_mol)
return torsional_mols
|
94454
|
import asyncio
import hashlib
import json
import re
import ssl
import websocket
import websockets
class OKCoinWSPublic:
Ticker = None
def __init__(self, pair, verbose):
self.pair = pair
self.verbose = verbose
@asyncio.coroutine
def initialize(self):
TickerFirstRun = True
while True:
if TickerFirstRun or not ws.open:
TickerFirstRun = False
sockpair = re.sub(r'[\W_]+', '', self.pair)
if self.pair[-3:] == 'cny':
url = "wss://real.okcoin.cn:10440/websocket/okcoinapi"
elif self.pair[-3:] == 'usd':
url = "wss://real.okcoin.com:10440/websocket/okcoinapi"
if self.verbose:
print('Connecting to Public OKCoin WebSocket...')
try:
ws = yield from websockets.connect(url)
# Ticker
yield from ws.send("{'event':'addChannel','channel':'ok_" + sockpair + "_ticker'}")
except Exception:
TickerFirstRun = True
OKCoinWSPublic.Ticker = yield from ws.recv()
class OKCoinWSPrivate:
TradeOrderID = None
def __init__(self, pair, verbose, api_key='', secret=''):
self.pair = pair
self.verbose = verbose
self.api_key = api_key
self.secret = secret
if self.pair[-3:] == 'cny':
self.url = "wss://real.okcoin.cn:10440/websocket/okcoinapi"
elif self.pair[-3:] == 'usd':
self.url = "wss://real.okcoin.com:10440/websocket/okcoinapi"
if self.verbose:
print('Connecting to Private OKCoin WebSocket...')
notconnected = True
while notconnected:
try:
self.ws = websocket.create_connection(self.url)
notconnected = False
except Exception:
pass
def buildMySign(self, params, secretKey):
sign = ''
for key in sorted(params.keys()):
sign += key + '=' + str(params[key]) + '&'
data = sign + 'secret_key=' + secretKey
return hashlib.md5(data.encode("utf8")).hexdigest().upper()
def userinfo(self):
params = {'api_key': self.api_key}
sign = self.buildMySign(params, self.secret)
try:
self.ws.send("{'event':'addChannel', 'channel':'ok_spot" + self.pair[-3:] + "_userinfo',\
'parameters':{ 'api_key':'" + self.api_key + "', 'sign':'" + sign + "'} }")
info = self.ws.recv()
except (websocket._exceptions.WebSocketTimeoutException,
websocket._exceptions.WebSocketConnectionClosedException, ssl.SSLError,
ConnectionResetError):
self.ws = websocket.create_connection(self.url)
self.ws.send("{'event':'addChannel', 'channel':'ok_spot" + self.pair[-3:] + "_userinfo',\
'parameters':{ 'api_key':'" + self.api_key + "', 'sign':'" + sign + "'} }")
info = self.ws.recv()
return info
def cancelorder(self, order_id):
params = {'api_key': self.api_key,
'symbol': self.pair, 'order_id': order_id}
sign = self.buildMySign(params, self.secret)
try:
try:
self.ws.send("{'event':'addChannel', 'channel':'ok_spot" + self.pair[-3:]
+
"_cancel_order', 'parameters':{ 'api_key':'" +
self.api_key
+ "', 'sign':'" + sign + "', 'symbol':'" + self.pair
+ "', 'order_id':'" + order_id + "'} }")
# Don't muck up userinfo with executed order_id
self.ws.recv()
except (websocket._exceptions.WebSocketTimeoutException,
websocket._exceptions.WebSocketConnectionClosedException, ssl.SSLError,
ConnectionResetError):
self.ws = websocket.create_connection(self.url)
self.ws.send("{'event':'addChannel', 'channel':'ok_spot" + self.pair[-3:]
+
"_cancel_order', 'parameters':{ 'api_key':'" +
self.api_key
+ "', 'sign':'" + sign + "', 'symbol':'" + self.pair
+ "', 'order_id':'" + order_id + "'} }")
# Don't muck up userinfo with executed order_id
self.ws.recv()
# If trading throws an error, we don't store an OrderID so there's no
# order to cancel.
except TypeError:
pass
def trade(self, order, rate, amount):
params = {'api_key': self.api_key, 'symbol': self.pair,
'type': order, 'price': rate, 'amount': amount}
sign = self.buildMySign(params, self.secret)
try:
self.ws.send("{'event':'addChannel','channel':'ok_spot" + self.pair[-3:]
+ "_trade','parameters':{'api_key':'" + self.api_key
+ "','sign':'" + sign + "','symbol':'" + self.pair
+ "','type':'" + order + "','price':'"
+ str(rate) + "','amount':'" + str(amount) + "'}}")
except (websocket._exceptions.WebSocketTimeoutException,
websocket._exceptions.WebSocketConnectionClosedException, ssl.SSLError,
ConnectionResetError):
self.ws = websocket.create_connection(self.url)
self.ws.send("{'event':'addChannel','channel':'ok_spot" + self.pair[-3:]
+ "_trade','parameters':{'api_key':'" + self.api_key
+ "','sign':'" + sign + "','symbol':'" + self.pair
+ "','type':'" + order + "','price':'"
+ str(rate) + "','amount':'" + str(amount) + "'}}")
try:
OKCoinWSPrivate.TradeOrderID = json.loads(
self.ws.recv())[-1]['data']['order_id']
except KeyError:
pass # Some error code instead (probably insufficient balance).
# Subscribes to channel, updates on new trade. Not in use since we store
# the order_id from trade
def realtrades(self):
params = {'api_key': self.api_key}
sign = self.buildMySign(params, self.secret)
self.ws.send("{'event':'addChannel','channel':'okspot_" + self.pair[-3:]
+ "_cancel_order','parameters':{'api_key':'"
+ self.api_key + "','sign':'" + sign + "', 'symbol':'"
+ self.pair + "', 'order_id':'-1'} }")
trades = self.ws.recv()
return trades
|
94477
|
from ethereum.utils import sha3, encode_hex
class EphemDB():
def __init__(self, kv=None):
self.reads = 0
self.writes = 0
self.kv = kv or {}
def get(self, k):
self.reads += 1
return self.kv.get(k, None)
def put(self, k, v):
self.writes += 1
self.kv[k] = v
def delete(self, k):
del self.kv[k]
# Hashes of empty subtrees
zerohashes = [b'\x00' * 32]
for i in range(256):
zerohashes.insert(0, sha3(zerohashes[0] + zerohashes[0]))
# Create a new empty tree
def new_tree(db):
return zerohashes[0]
# Convert a binary key into an integer path value
def key_to_path(k):
return int.from_bytes(k, 'big')
tt256m1 = 2**256 - 1
# And convert back
def path_to_key(k):
return (k & tt256m1).to_bytes(32, 'big')
# Read a key from a given tree
def get(db, root, key):
v = root
path = key_to_path(key)
for i in range(0, 256, 4):
if v == zerohashes[i]:
return b'\x00' * 32
child = db.get(v)
if len(child) == 65:
if (path % 2**256) == key_to_path(child[1:33]):
return child[33:]
else:
return b'\x00' * 32
else:
index = (path >> 252) & 15
v = child[32*index: 32*index+32]
path <<= 4
return v
# Make a root hash of a (sub)tree with a single key/value pair
def make_single_key_hash(path, depth, value):
if depth == 256:
return value
elif (path >> 255) & 1:
return sha3(zerohashes[depth+1] + make_single_key_hash(path << 1, depth + 1, value))
else:
return sha3(make_single_key_hash(path << 1, depth + 1, value) + zerohashes[depth+1])
# Hash together 16 elements
def hash_16_els(vals):
assert len(vals) == 16
for _ in range(4):
vals = [sha3(vals[i] + vals[i+1]) for i in range(0, len(vals), 2)]
return vals[0]
# Make a root hash of a (sub)tree with two key/value pairs, and save intermediate nodes in the DB
def make_double_key_hash(db, path1, path2, depth, value1, value2):
if depth == 256:
raise Exception("Cannot fit two values into one slot!")
if ((path1 >> 252) & 15) == ((path2 >> 252) & 15):
children = [zerohashes[depth+4]] * 16
children[(path1 >> 252) & 15] = make_double_key_hash(db, path1 << 4, path2 << 4, depth + 4, value1, value2)
else:
Lkey = ((path1 >> 252) & 15)
L = make_single_key_hash(path1 << 4, depth + 4, value1)
Rkey = ((path2 >> 252) & 15)
R = make_single_key_hash(path2 << 4, depth + 4, value2)
db.put(L, b'\x01' + path_to_key(path1 << 4) + value1)
db.put(R, b'\x01' + path_to_key(path2 << 4) + value2)
children = [zerohashes[depth+4]] * 16
children[Lkey] = L
children[Rkey] = R
h = hash_16_els(children)
db.put(h, b''.join(children))
return h
# Update a tree with a given key/value pair
def update(db, root, key, value):
return _update(db, root, key_to_path(key), 0, value)
def _update(db, root, path, depth, value):
if depth == 256:
return value
# Update an empty subtree: make a single-key subtree
if root == zerohashes[depth]:
k = make_single_key_hash(path, depth, value)
db.put(k, b'\x01' + path_to_key(path) + value)
return k
child = db.get(root)
# Update a single-key subtree: make a double-key subtree
if len(child) == 65:
origpath, origvalue = key_to_path(child[1:33]), child[33:]
return make_double_key_hash(db, path, origpath, depth, value, origvalue)
# Update a multi-key subtree: recurse down
else:
assert len(child) == 512
index = (path >> 252) & 15
new_value = _update(db, child[index*32: index*32+32], path << 4, depth + 4, value)
new_children = [new_value if i == index else child[32*i:32*i+32] for i in range(16)]
h = hash_16_els(new_children)
db.put(h, b''.join(new_children))
return h
def multi_update(db, root, keys, values):
for k, v in zip(keys, values):
root = update(db, root, k, v)
return root
|
94503
|
import torch
class DistributionLayer(torch.nn.Module):
"""A distribution layer for action selection (e.g. for actor-critic)"""
def __init__(self, action_space, input_size):
"""Initializes the distribution layer for the given action space and
input_size (i.e. the output size of the model)
"""
super().__init__()
def forward(self, x):
"""Returns the relevant pytorch distribution output for input x,
which can be used for action selection and distribution data
"""
raise NotImplementedError
|
94507
|
import unittest
from canvas_sdk.exceptions import CanvasAPIError
class TestExceptions(unittest.TestCase):
longMessage = True
def setUp(self):
self.default_api_error = CanvasAPIError()
def test_default_status_for_canvas_api_error(self):
""" Test expected default status for instance of CanvasAPIError """
self.assertEqual(self.default_api_error.status_code, 500)
def test_default_message_for_canvas_api_error(self):
""" Test expected default msg attribute for instance of CanvasAPIError """
self.assertIsNone(self.default_api_error.error_msg)
def test_default_error_json_for_canvas_api_error(self):
""" Test expected default error_json attribute for instance of CanvasAPIError """
self.assertIsNone(self.default_api_error.error_json)
def test_default_str_for_canvas_api_error(self):
""" Test default CanvasAPIError instance represented as a str """
self.assertEqual('500', str(self.default_api_error))
def test_instance_str_for_canvas_api_error(self):
""" Test string representation of CanvasAPIError with custom attributes """
status = 404
error_msg = 'This is a test message'
error_json = {'Some error json'}
api_error = CanvasAPIError(status_code=status, msg=error_msg, error_json=error_json)
self.assertEqual('%d: %s' % (status, error_msg), str(api_error))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.