id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
420815
|
from __future__ import print_function, division
from typing import Optional, List
import torch
import math
import torch.nn as nn
import torch.nn.functional as F
from ..backbone import build_backbone
from ..block import conv3d_norm_act
from ..utils import model_init
class FPN3D(nn.Module):
"""3D feature pyramid network (FPN). This design is flexible in handling both isotropic data and anisotropic data.
Args:
backbone_type (str): the block type at each U-Net stage. Default: ``'resnet'``
block_type (str): the block type in the backbone. Default: ``'residual'``
in_channel (int): number of input channels. Default: 1
out_channel (int): number of output channels. Default: 3
filters (List[int]): number of filters at each FPN stage. Default: [28, 36, 48, 64, 80]
is_isotropic (bool): whether the whole model is isotropic. Default: False
isotropy (List[bool]): specify each U-Net stage is isotropic or anisotropic. All elements will
be `True` if :attr:`is_isotropic` is `True`. Default: [False, False, False, True, True]
pad_mode (str): one of ``'zeros'``, ``'reflect'``, ``'replicate'`` or ``'circular'``. Default: ``'replicate'``
act_mode (str): one of ``'relu'``, ``'leaky_relu'``, ``'elu'``, ``'gelu'``,
``'swish'``, ``'efficient_swish'`` or ``'none'``. Default: ``'relu'``
norm_mode (str): one of ``'bn'``, ``'sync_bn'`` ``'in'`` or ``'gn'``. Default: ``'bn'``
init_mode (str): one of ``'xavier'``, ``'kaiming'``, ``'selu'`` or ``'orthogonal'``. Default: ``'orthogonal'``
deploy (bool): build backbone in deploy mode (exclusive for RepVGG backbone). Default: False
"""
def __init__(self,
backbone_type: str = 'resnet',
block_type: str = 'residual',
feature_keys: List[str] = ['feat1', 'feat2', 'feat3', 'feat4', 'feat5'],
in_channel: int = 1,
out_channel: int = 3,
filters: List[int] = [28, 36, 48, 64, 80],
ks: List[int] = [3, 3, 5, 3, 3],
blocks: List[int] = [2, 2, 2, 2, 2],
attn: str = 'squeeze_excitation',
is_isotropic: bool = False,
isotropy: List[bool] = [False, False, False, True, True],
pad_mode: str = 'replicate',
act_mode: str = 'elu',
norm_mode: str = 'bn',
init_mode: str = 'orthogonal',
deploy: bool = False,
fmap_size=[17, 129, 129],
**kwargs):
super().__init__()
self.filters = filters
self.depth = len(filters)
assert len(isotropy) == self.depth
if is_isotropic:
isotropy = [True] * self.depth
self.isotropy = isotropy
self.shared_kwargs = {
'pad_mode': pad_mode,
'act_mode': act_mode,
'norm_mode': norm_mode
}
backbone_kwargs = {
'block_type': block_type,
'in_channel': in_channel,
'filters': filters,
'isotropy': isotropy,
'blocks': blocks,
'deploy': deploy,
'fmap_size': fmap_size,
'ks': ks,
'attention': attn,
}
backbone_kwargs.update(self.shared_kwargs)
self.backbone = build_backbone(
backbone_type, feature_keys, **backbone_kwargs)
self.feature_keys = feature_keys
self.latplanes = filters[0]
self.latlayers = nn.ModuleList([
conv3d_norm_act(x, self.latplanes, kernel_size=1, padding=0,
**self.shared_kwargs) for x in filters])
self.smooth = nn.ModuleList()
for i in range(self.depth):
kernel_size, padding = self._get_kernel_size(isotropy[i])
self.smooth.append(conv3d_norm_act(
self.latplanes, self.latplanes, kernel_size=kernel_size,
padding=padding, **self.shared_kwargs))
self.conv_out = self._get_io_conv(out_channel, isotropy[0])
# initialization
model_init(self, init_mode)
def forward(self, x):
z = self.backbone(x)
return self._forward_main(z)
def _forward_main(self, z):
features = [self.latlayers[i](z[self.feature_keys[i]])
for i in range(self.depth)]
out = features[self.depth-1]
for j in range(self.depth-1):
i = self.depth-1-j
out = self._up_smooth_add(out, features[i-1], self.smooth[i])
out = self.smooth[0](out)
out = self.conv_out(out)
return out
def _up_smooth_add(self, x, y, smooth):
"""Upsample, smooth and add two feature maps.
"""
x = F.interpolate(x, size=y.shape[2:], mode='trilinear',
align_corners=True)
return smooth(x) + y
def _get_kernel_size(self, is_isotropic, io_layer=False):
if io_layer: # kernel and padding size of I/O layers
if is_isotropic:
return (5, 5, 5), (2, 2, 2)
return (1, 5, 5), (0, 2, 2)
if is_isotropic:
return (3, 3, 3), (1, 1, 1)
return (1, 3, 3), (0, 1, 1)
def _get_io_conv(self, out_channel, is_isotropic):
kernel_size_io, padding_io = self._get_kernel_size(
is_isotropic, io_layer=True)
return conv3d_norm_act(
self.filters[0], out_channel, kernel_size_io, padding=padding_io,
pad_mode=self.shared_kwargs['pad_mode'], bias=True,
act_mode='none', norm_mode='none')
|
420825
|
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class MailsConfig(AppConfig):
name = 'apps.mails'
verbose_name = _('Mails')
|
420830
|
import FWCore.ParameterSet.Config as cms
AnalyticalPropagator = cms.ESProducer("AnalyticalPropagatorESProducer",
MaxDPhi = cms.double(1.6),
ComponentName = cms.string('AnalyticalPropagator'),
PropagationDirection = cms.string('alongMomentum')
)
|
420842
|
import twitter, os, json, time, tempfile, contextlib, sys, io, requests
from PIL import Image
from minio import Minio
from minio.error import ResponseError
@contextlib.contextmanager
def nostdout():
save_stdout = sys.stdout
sys.stdout = io.BytesIO()
yield
sys.stdout = save_stdout
minioClient = Minio(os.environ['minio_authority'],
access_key=os.environ['minio_access_key'],
secret_key=os.environ['minio_secret_key'],
secure=False)
api = twitter.Api(
consumer_key=os.environ['consumer_key'],
consumer_secret=os.environ['consumer_secret'],
access_token_key=os.environ['access_token'],
access_token_secret=os.environ['access_token_secret']
)
def requeue(st):
# grab from headers or set defaults.
retries = int(os.getenv("Http_X_Retries", "0"))
max_retries = int(os.getenv("Http_X_Max_Retries", "9999")) # retry up to 9999
delay_duration = int(os.getenv("Http_X_Delay_Duration", "60")) # delay 60s by default
# Bump retries up one, since we're on a zero-based index.
retries = retries + 1
headers = {
"X-Retries": str(retries),
"X-Max-Retries": str(max_retries),
"X-Delay-Duration": str(delay_duration)
}
r = requests.post("http://mailbox:8080/deadletter/tweetpic", data=st, json=False, headers=headers)
print "Posting to Mailbox: ", r.status_code
if r.status_code!= 202:
print "Mailbox says: ", r.text
"""
Input:
{
"status_id": "twitter status ID",
"image": "minio_path_to_image.jpg"
"duration": 5.5
}
"""
def handle(st):
print("Incoming request " + str(st))
req = json.loads(st)
print("Parsed request")
filename = tempfile.gettempdir() + '/' + str(int(round(time.time() * 1000))) + '.jpg'
in_reply_to_status_id = req['status_id']
duration = req['duration']
with nostdout():
minioClient.fget_object('colorization', req['image'], filename)
with open(filename, 'rb') as image:
size = os.fstat(image.fileno()).st_size
im = Image.open(filename)
if size > 5 * 1048576:
maxsize = (1028, 1028)
im.thumbnail(maxsize, Image.ANTIALIAS)
im = im.convert("RGB")
im.save(filename, "JPEG")
image = open(filename, 'rb')
status_id = False
try:
status = api.PostUpdate("We just colourised your image in %.1f seconds. Find out how: https://goo.gl/cSK4Xu" % duration,
media=image,
auto_populate_reply_metadata=True,
in_reply_to_status_id=in_reply_to_status_id)
status_id = status.id
except twitter.error.TwitterError, e:
for m in e.message:
if m['code'] == 34 or m['code'] == 385:
print('Tweet %i went missing' % in_reply_to_status_id)
if m['code'] == 88:
print('We hit the API limits, queuing %i' % in_reply_to_status_id)
requeue(st)
finally:
# this is always run, regardless of wether we got an error or not
image.close()
return {
"reply_to": in_reply_to_status_id,
"status_id": status_id
}
|
420891
|
import pytest
from MicroTokenizer.tokenizers.crf.tokenizer import CRFTokenizer
from MicroTokenizer.tokenizers.dag_tokenizer import DAGTokenizer
from MicroTokenizer.tokenizers.hmm_tokenizer import HMMTokenizer
from MicroTokenizer.tokenizers.max_match.backward import MaxMatchBackwardTokenizer
from MicroTokenizer.tokenizers.max_match.bidirectional import (
MaxMatchBidirectionalTokenizer,
)
from MicroTokenizer.tokenizers.max_match.forward import MaxMatchForwardTokenizer
from MicroTokenizer.training.train import train
# @pytest.mark.skip("It will takes 670s to complete, too slow")
def test_main(tmpdir, datadir):
input_file_list = [datadir / "data.txt"]
output_dir = str(tmpdir)
train(input_file_list, output_dir)
# asserts start at here
input_text = "王小明在北京的清华大学读书。"
# asserts start from here
hmm_tokenizer_v2 = HMMTokenizer.load(output_dir)
result = hmm_tokenizer_v2.segment(input_text)
print(result)
crf_tokenizer_v2 = CRFTokenizer.load(output_dir)
result = crf_tokenizer_v2.segment(input_text)
print(result)
max_match_forward_tokenizer_v2 = MaxMatchForwardTokenizer.load(output_dir)
result = max_match_forward_tokenizer_v2.segment(input_text)
print(result)
max_match_backward_tokenizer_v2 = MaxMatchBackwardTokenizer.load(output_dir)
result = max_match_backward_tokenizer_v2.segment(input_text)
print(result)
max_match_bidirectional_tokenizer_v2 = MaxMatchBidirectionalTokenizer.load(
output_dir
)
result = max_match_bidirectional_tokenizer_v2.segment(input_text)
print(result)
dag_tokenizer_v2 = DAGTokenizer.load(output_dir)
result = dag_tokenizer_v2.segment(input_text)
print(result)
|
420899
|
import argparse
parser = argparse.ArgumentParser("add_sample_name")
parser.add_argument("sample", type=str,
help="namd of sample to be extracted from vcf and kept in bed format")
parser.add_argument("-v", '--version', type=str,
help="indicate the version (old / new) of the vcf set")
# define workding and file locations:
args = parser.parse_args()
sample_name = args.sample
fin = open(sample_name)
fo = open(sample_name + '.with_name', 'w')
for line in fin:
pin = line.strip().split()
pin_new = pin + [sample_name.split('.')[0]]
print('\t'.join(pin_new), file=fo)
fin.close()
fo.close()
|
420921
|
from django.db import models
from datetime import timedelta
from django.utils import timezone
import pytz
from django.conf import settings
from uuid import uuid4
utc = pytz.UTC
class Kiosk(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField("Name", max_length=30, unique=True)
kiosk_id = models.CharField("Kiosk Id", max_length=70, unique=True)
ip_address = models.GenericIPAddressField(
"IP Address of device", unique=True, null=True, blank=True
)
last_seen = models.DateTimeField(null=True)
play_theme = models.BooleanField("Play theme on door swipe", default=False)
authorised = models.BooleanField("Is this kiosk authorised?", default=False)
def checkin(self):
self.last_seen = timezone.now()
self.save()
def get_unavailable(self):
if self.last_seen:
if timezone.now() - timedelta(minutes=5) > self.last_seen:
return True
return False
def __str__(self):
return self.name
class SiteSession(models.Model):
id = models.AutoField(primary_key=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
signin_date = models.DateTimeField(default=timezone.now)
signout_date = models.DateTimeField(null=True, blank=True)
guests = models.TextField(default="[]")
def signout(self):
self.signout_date = timezone.now()
self.save()
def __str__(self):
return f"{self.user.profile.get_full_name()} - in: {self.signin_date} out: {self.signout_date}"
class EmailVerificationToken(models.Model):
id = models.AutoField(primary_key=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
creation_date = models.DateTimeField(default=timezone.now)
verification_token = models.UUIDField(default=uuid4)
|
420941
|
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save, post_init
from announce.tasks import update_mailchimp_subscription
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
mailing_list_signup = models.BooleanField(default=False) # TODO remove this
email_confirmed_at = models.DateTimeField(null=True, blank=True)
interested_in_learning = models.CharField(max_length=500, blank=True)
communication_opt_in = models.BooleanField(default=False)
avatar = models.ImageField(blank=True)
bio = models.TextField(max_length=500, blank=True)
contact_url = models.URLField(max_length=256, blank=True)
city = models.CharField(max_length=256, blank=True)
region = models.CharField(max_length=256, blank=True) # schema.org. Algolia => administrative
country = models.CharField(max_length=256, blank=True)
latitude = models.DecimalField(max_digits=8, decimal_places=6, null=True, blank=True)
longitude = models.DecimalField(max_digits=9, decimal_places=6, null=True, blank=True)
place_id = models.CharField(max_length=256, blank=True) # Algolia place_id
@staticmethod
def post_save(sender, **kwargs):
instance = kwargs.get('instance')
# Don'd add accounts to mailchimp if they haven't confirmed their email
if not instance.email_confirmed_at:
return
# NOTE user will be 'removed' from mailchimp if they create an account and didn't
# opt in. This is to cater for situations where a user previously subscribed to the
# newsletter another way, but is only creating an account now
if instance.email_confirmed_at != instance._email_confirmed_at_old or instance._communication_opt_in_old != instance.communication_opt_in:
update_mailchimp_subscription(instance.user_id)
instance._communication_opt_in_old = instance.communication_opt_in
@staticmethod
def remember_state(sender, **kwargs):
instance = kwargs.get('instance')
instance._communication_opt_in_old = instance.communication_opt_in
instance._email_confirmed_at_old = instance.email_confirmed_at
def __str__(self):
return self.user.__str__()
post_save.connect(Profile.post_save, sender=Profile)
post_init.connect(Profile.remember_state, sender=Profile)
|
421071
|
from flask import Flask
from flask import render_template, request ,abort
from flask_cors import CORS
app = Flask(__name__)
CORS(app, supports_credentials=True)
@app.route('/app' ,methods=['GET'])
def index():
try:
url1 = request.args.get("url1")
url2 = request.args.get("url2")
if url1 is None or url2 is None:
return 'Usage: compare.nazorip.site/app?url1=[PICTURE1_WEB_ADDR]&url2=[PICTURE2_WEB_ADDR]'
url1 , url2 = str(url1) , str(url2)
except:
abort(404);return
return render_template("index.html" ,pic_url1 = url1 , pic_url2 = url2)
if __name__ == '__main__':
debug = True
if debug:
from test import pic1 , pic2
pic1 = app.route('/pic1.jpg')(pic1)
pic2 = app.route('/pic2.jpg')(pic2)
app.run(port='5441',debug=debug)
|
421109
|
import argparse
import os
import commentjson
from stable_baselines import PPO2, SAC
from gym_kuka_mujoco.envs import *
from stable_baselines.common.vec_env import DummyVecEnv
# Add the parent folder to the python path for imports.
import sys, os
sys.path.insert(0, os.path.abspath('..'))
from play_model import replay_model
from impedance_peg_insertion import ManualImpedancePegInsertionPolicy
if __name__ == '__main__':
import warnings
# Setup command line arguments.
parser = argparse.ArgumentParser(description='Runs a learning example on a registered gym environment.')
parser.add_argument('--param_file',
type=str,
help='the parameter file to use')
args = parser.parse_args()
# Load the learning parameters from a file.
param_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'param_files')
param_file = os.path.join(param_dir, args.param_file)
with open(param_file) as f:
params = commentjson.load(f)
if params['env'] == "PegInsertionEnv":
if params['env_options']['controller'] == "ImpedanceControllerV2":
model = ManualImpedancePegInsertionPolicy()
else:
raise NotImplementedError
else:
raise NotImplementedError
# Visualize.
env_cls = globals()[params['env']]
env = env_cls(**params['env_options'])
replay_model(env, model)
|
421194
|
import math
import genconfig as gc
import genutils as gu
import loggerdb as ldb
from storage import indicators as storage
# Misc Indicator Helpers
class Helpers:
def SMA(list1, period):
if len(list1) >= period:
SMA = math.fsum(list1[-period:]) / period
return SMA
def EMA(list1, list2, period1):
if len(list1) >= period1:
Multi = 2 / (period1 + 1)
if list2:
EMA = ((list1[-1] - list2[-1]) * Multi) + list2[-1]
# First run, must use SMA to get started
elif len(list1) >= period1:
EMA = ((list1[-1] - Helpers.SMA(list1, period1)) * Multi)\
+ Helpers.SMA(list1, period1)
return EMA
def WMA(list1, list2, period):
'''Wilders Moving Average'''
if not list2:
WMA = Helpers.SMA(list1, period)
else:
WMA = ((list2[-1] * (period - 1)) + list1[-1]) / period
return WMA
def DEMA(list1, list2, period1):
if len(list1) >= 1:
DEMA = ((2 * list1[-1]) - Helpers.EMA(list1, list2, period1))
return DEMA
def FractalDimension(list1, period1):
sp = int(period1 / 2)
spe = len(list1) - sp
N1 = (max(list1[-sp:]) - min(list1[-sp:])) / sp
N2 = (max(list1[(abs(spe - sp)):spe]) -
min(list1[(spe - sp):spe])) / sp
N3 = (max(list1[-period1:]) - min(list1[-period1:])) / period1
D = (math.log(N1 + N2) - math.log(N3)) / math.log(2)
return D
def FRAMA(list1, list2, period1):
alpha = math.exp(gc.FRAMA.AlphaConstant
* (Helpers.FractalDimension(list1, period1) - 1))
if alpha < 0.01:
alpha = 0.01
elif alpha > 1:
alpha = 1
# If first run, use price instead of prev FRAMA before smoothing
if len(list2) > 1:
frama2 = list2[-2]
else:
frama2 = list1[-2]
frama = alpha * list1[-1] + (1 - alpha) * frama2
return frama
def FastStochK(list1, period):
if len(list1) >= period:
LowestPeriod = min(float(s) for s in list1[(period * -1):])
HighestPeriod = max(float(s) for s in list1[(period * -1):])
FastStochK = ((list1[-1] - LowestPeriod) / (HighestPeriod
- LowestPeriod)) * 100
return FastStochK
def Ichimoku(list1, period1):
PeriodList = list1[(period1 * -1):]
Ichi = (max(PeriodList) + min(PeriodList)) / 2
return Ichi
def StdDev(list1, period):
if len(list1) >= period:
MeanAvg = math.fsum(list1[(period * -1):]) / period
Deviation_list = [(i - MeanAvg) for i in list1[(period * -1):]]
DeviationSq_list = [i ** 2 for i in Deviation_list]
DeviationSqAvg = math.fsum(DeviationSq_list[(period * -1):])\
/ period
StandardDeviation = math.sqrt(DeviationSqAvg)
return StandardDeviation
def TrueRange(list1, period):
method1 = max(list1[-period:]) - min(list1[-period:])
method2 = abs(max(list1[-period:]) - list1[-period - 1])
method3 = abs(min(list1[-period:]) - list1[-period - 1])
truerange = max(method1, method2, method3)
return truerange
def ListDiff(list1, list2):
diff = 100 * (list1[-1] - list2[-1]) / ((list1[-1] + list2[-1]) / 2)
return diff
# Relative Strength Index
class RSI:
CandleDepends = gc.RSI.Period + 1
def indicator():
# We need a minimum of 2 candles to start RS calculations
if len(ldb.price_list) >= 2:
if ldb.price_list[-1] > ldb.price_list[-2]:
gain = ldb.price_list[-1] - ldb.price_list[-2]
storage.writelist('RSI_RS_gain_list', gain)
storage.writelist('RSI_RS_loss_list', 0)
elif ldb.price_list[-1] < ldb.price_list[-2]:
loss = ldb.price_list[-2] - ldb.price_list[-1]
storage.writelist('RSI_RS_loss_list', loss)
storage.writelist('RSI_RS_gain_list', 0)
# Do RS calculations if we have all requested periods
if len(storage.getlist('RSI_RS_gain_list')) >= gc.RSI.Period:
if len(storage.getlist('RSI_avg_gain_list')) > 1:
storage.writelist('RSI_avg_gain_list', ((storage.getlist('RSI_avg_gain_list')[-1] *
(gc.RSI.Period - 1))
+ storage.getlist('RSI_RS_gain_list')[-1])
/ gc.RSI.Period)
storage.writelist('RSI_avg_loss_list', ((storage.getlist('RSI_avg_loss_list')[-1] *
(gc.RSI.Period - 1))
+ storage.getlist('RSI_RS_loss_list')[-1])
/ gc.RSI.Period)
# Fist run, can't yet apply smoothing
else:
storage.writelist('RSI_avg_gain_list', math.fsum(
storage.getlist('RSI_RS_gain_list')[(gc.RSI.Period * -1):]) / gc.RSI.Period)
storage.writelist('RSI_avg_loss_list', math.fsum(
storage.getlist('RSI_RS_loss_list')[(gc.RSI.Period * -1):]) / gc.RSI.Period)
# Calculate and append current RS to RS_list
storage.writelist('RSI_RS_list', storage.getlist(
'RSI_avg_gain_list')[-1] / storage.getlist('RSI_avg_loss_list')[-1])
# Calculate and append current RSI to ind_list
storage.writelist(
'RSI_ind_list', 100 - (100 / (1 + storage.getlist('RSI_RS_list')[-1])))
if 'RSI' in gc.Trader.VerboseIndicators:
if not storage.getlist('RSI_ind_list'):
print('RSI: Not yet enough data to calculate')
else:
print('RSI:', storage.getlist('RSI_ind_list')[-1])
# Simple Movement Average
class SMA:
CandleDepends = gc.SMA.LongPeriod
def indicator():
# We can start SMA calculations once we have max period candles
if len(ldb.price_list) >= max(gc.SMA.LongPeriod, gc.SMA.ShortPeriod):
storage.writelist(
'SMA_Short_list', Helpers.SMA(ldb.price_list, gc.SMA.ShortPeriod))
storage.writelist(
'SMA_Long_list', Helpers.SMA(ldb.price_list, gc.SMA.LongPeriod))
storage.writelist('SMA_Diff_list', Helpers.ListDiff(
storage.getlist('SMA_Short_list'), storage.getlist('SMA_Long_list')))
if 'SMA' in gc.Trader.VerboseIndicators:
if not storage.getlist('SMA_Long_list'):
print('SMA: Not yet enough data to determine trend')
else:
gu.PrintIndicatorTrend('SMA', storage.getlist('SMA_Short_list'), storage.getlist(
'SMA_Long_list'), storage.getlist('SMA_Diff_list'), gc.SMA.DiffDown, gc.SMA.DiffUp)
# Exponential Movement Average
class EMA:
CandleDepends = gc.EMA.LongPeriod
def indicator():
# We can start EMAs once we have max period candles
if len(ldb.price_list) >= max(gc.EMA.LongPeriod, gc.EMA.ShortPeriod):
storage.writelist('EMA_Short_list', Helpers.EMA(
ldb.price_list, storage.getlist('EMA_Short_list'), gc.EMA.ShortPeriod))
storage.writelist('EMA_Long_list', Helpers.EMA(
ldb.price_list, storage.getlist('EMA_Long_list'), gc.EMA.LongPeriod))
storage.writelist('EMA_Diff_list', Helpers.ListDiff(
storage.getlist('EMA_Short_list'), storage.getlist('EMA_Long_list')))
if 'EMA' in gc.Trader.VerboseIndicators:
if not storage.getlist('EMA_Long_list'):
print('EMA: Not yet enough data to determine trend')
else:
gu.PrintIndicatorTrend('EMA', storage.getlist('EMA_Short_list'), storage.getlist(
'EMA_Long_list'), storage.getlist('EMA_Diff_list'), gc.EMA.DiffDown, gc.EMA.DiffUp)
# Double Exponential Movement Average
class DEMA:
CandleDepends = gc.EMA.LongPeriod + (gc.EMA.ShortPeriod * 2)
IndicatorDepends = ['EMA']
def indicator():
# We can start DEMAs once we have max period candles
if len(storage.getlist('EMA_Long_list')) >= max(gc.EMA.LongPeriod, gc.EMA.ShortPeriod):
storage.writelist('DEMA_Short_list', Helpers.DEMA(storage.getlist(
'EMA_Short_list'), storage.getlist('DEMA_Short_list'), gc.EMA.ShortPeriod))
storage.writelist('DEMA_Long_list', Helpers.DEMA(storage.getlist(
'EMA_Long_list'), storage.getlist('DEMA_Long_list'), gc.EMA.LongPeriod))
storage.writelist('DEMA_Diff_list', Helpers.ListDiff(
storage.getlist('DEMA_Short_list'), storage.getlist('DEMA_Long_list')))
if 'DEMA' in gc.Trader.VerboseIndicators:
if not storage.getlist('DEMA_Long_list'):
print('DEMA: Not yet enough data to determine trend')
else:
gu.PrintIndicatorTrend('DEMA', storage.getlist('DEMA_Short_list'), storage.getlist(
'DEMA_Long_list'), storage.getlist('DEMA_Diff_list'), gc.DEMA.DiffDown, gc.DEMA.DiffUp)
# Exponential Movement Average (using wbic16's logic)
class EMAwbic:
CandleDepends = gc.EMAwbic.Period
def indicator():
if len(ldb.price_list) >= gc.EMAwbic.Period:
storage.writelist('EMAwbic_EMA_list', Helpers.EMA(
ldb.price_list, storage.getlist('EMAwbic_EMA_list'), gc.EMAwbic.Period))
storage.writelist('EMAwbic_ind_list', ((ldb.price_list[-1] - storage.getlist(
'EMAwbic_EMA_list')[-1]) / storage.getlist('EMAwbic_EMA_list')[-1]) * 100)
if 'EMAwbic' in gc.Trader.VerboseIndicators:
if not storage.getlist('EMAwbic_ind_list'):
print('EMAwbic: Not yet enough data to calculate')
else:
print('EMAwbic:', storage.getlist('EMAwbic_ind_list')[-1], '%')
# Fractal Adaptive Moving Average
class FRAMA:
CandleDepends = gc.FRAMA.LongPeriod
def indicator():
# We can start FRAMAs once we have max period candles
if len(ldb.price_list) >= (max(gc.FRAMA.LongPeriod, gc.FRAMA.ShortPeriod)):
try:
storage.writelist('FRAMA_Short_list', Helpers.FRAMA(
ldb.price_list, storage.getlist('FRAMA_Short_list'), gc.FRAMA.ShortPeriod))
storage.writelist('FRAMA_Long_list', Helpers.FRAMA(
ldb.price_list, storage.getlist('FRAMA_Long_list'), gc.FRAMA.LongPeriod))
storage.writelist('FRAMA_Diff_list', Helpers.ListDiff(
storage.getlist('FRAMA_Short_list'), storage.getlist('FRAMA_Long_list')))
# For a math domain error from log operations at low volatility or high
# frequency
except ValueError:
pass
if 'FRAMA' in gc.Trader.VerboseIndicators:
if not storage.getlist('FRAMA_Long_list'):
print('FRAMA: Not yet enough data to determine trend')
else:
gu.PrintIndicatorTrend('FRAMA', storage.getlist('FRAMA_Short_list'), storage.getlist(
'FRAMA_Long_list'), storage.getlist('FRAMA_Diff_list'), gc.FRAMA.DiffDown, gc.FRAMA.DiffUp)
# Movement Average Convergence Divergence
class MACD:
CandleDepends = gc.MACD.LongPeriod + (gc.MACD.ShortPeriod / 2)
def indicator():
# We can start MACD EMAs once we have max period candles
if len(ldb.price_list) >= max(gc.MACD.LongPeriod, gc.MACD.ShortPeriod):
storage.writelist('MACD_Short_list', Helpers.EMA(
ldb.price_list, storage.getlist('MACD_Short_list'), gc.MACD.ShortPeriod))
storage.writelist('MACD_Long_list', Helpers.EMA(
ldb.price_list, storage.getlist('MACD_Long_list'), gc.MACD.LongPeriod))
storage.writelist('MACD_ind_list', storage.getlist(
'MACD_Short_list')[-1] - storage.getlist('MACD_Long_list')[-1])
# We need SignalPeriod MACDs before generating MACDSignal
if len(storage.getlist('MACD_Long_list')) >= gc.MACD.SignalPeriod:
storage.writelist('MACD_Signal_list', Helpers.EMA(storage.getlist(
'MACD_ind_list'), storage.getlist('MACD_Signal_list'), gc.MACD.SignalPeriod))
storage.writelist('MACD_Histogram_list', storage.getlist(
'MACD_ind_list')[-1] - storage.getlist('MACD_Signal_list')[-1])
if 'MACD' in gc.Trader.VerboseIndicators:
if not storage.getlist('MACD_Signal_list'):
print('MACD: Not yet enough data to determine trend')
else:
gu.PrintIndicatorTrend('MACD', storage.getlist('MACD_ind_list'), storage.getlist(
'MACD_Signal_list'), storage.getlist('MACD_ind_list'), gc.MACD.DiffDown, gc.MACD.DiffUp)
print('MACD Hist:', storage.getlist('MACD_Histogram_list')[-1])
# Double Movement Average Convergence Divergence
class DMACD:
IndicatorDepends = ['MACD']
CandleDepends = (gc.MACD.LongPeriod + (gc.MACD.ShortPeriod / 2)) * 2
def indicator():
# We can start DEMAs once we have max period candles
if len(storage.getlist('MACD_Long_list')) >= max(gc.MACD.LongPeriod, gc.MACD.ShortPeriod):
storage.writelist('DMACD_Short_list', Helpers.DEMA(storage.getlist(
'MACD_Short_list'), storage.getlist('DMACD_Short_list'), gc.MACD.ShortPeriod))
storage.writelist('DMACD_Long_list', Helpers.DEMA(storage.getlist(
'MACD_Long_list'), storage.getlist('DMACD_Long_list'), gc.MACD.LongPeriod))
storage.writelist('DMACD_ind_list', storage.getlist(
'DMACD_Short_list')[-1] - storage.getlist('DMACD_Long_list')[-1])
# We need MACDSignal DMACDs before generating Signal
if len(storage.getlist('DMACD_Long_list')) >= (gc.MACD.SignalPeriod +
(abs(gc.MACD.SignalPeriod - gc.MACD.LongPeriod))):
storage.writelist('DMACD_Signal_list', Helpers.DEMA(storage.getlist(
'MACD_Signal_list'), storage.getlist('DMACD_Signal_list'), gc.MACD.SignalPeriod))
storage.writelist('DMACD_Histogram_list', storage.getlist(
'DMACD_ind_list')[-1] - storage.getlist('DMACD_Signal_list')[-1])
if 'DMACD' in gc.Trader.VerboseIndicators:
if not storage.getlist('DMACD_Signal_list'):
print('DMACD: Not yet enough data to determine trend')
else:
gu.PrintIndicatorTrend('DMACD', storage.getlist('DMACD_ind_list'), storage.getlist(
'DMACD_Signal_list'), storage.getlist('DMACD_ind_list'), gc.DMACD.DiffDown, gc.DMACD.DiffUp)
# Fast Stochastic %K
class FastStochK:
CandleDepends = gc.FastStochK.Period
def indicator():
# We can start FastStochK calculations once we have FastStochKPeriod
# candles, otherwise we append None until met
if len(ldb.price_list) >= gc.FastStochK.Period:
try:
storage.writelist('FastStochK_ind_list', Helpers.FastStochK(
ldb.price_list, gc.FastStochK.Period))
except ZeroDivisionError:
pass
if 'FastStochK' in gc.Trader.VerboseIndicators:
if not storage.getlist('FastStochK_ind_list'):
print('FastStochK: Not yet enough data to calculate')
else:
print('FastStochK:', storage.getlist('FastStochK_ind_list')[-1])
# Fast Stochastic %D
class FastStochD:
IndicatorDepends = ['FastStochK']
CandleDepends = gc.FastStochK.Period + (gc.FastStochD.Period - 1)
def indicator():
# We can start FastStochD calculations once we have FastStochDPeriod
# candles, otherwise we append None until met
if len(ldb.price_list) >= gc.FastStochD.Period:
storage.writelist('FastStochD_ind_list', Helpers.SMA(
storage.getlist('FastStochK_ind_list'), gc.FastStochD.Period))
if 'FastStochD' in gc.Trader.VerboseIndicators:
if not storage.getlist('FastStochD_ind_list'):
print('FastStochD: Not yet enough data to calculate')
else:
print('FastStochD:', storage.getlist('FastStochD_ind_list')[-1])
# Full Stochastic %D
class FullStochD:
IndicatorDepends = ['FastStochK', 'FastStochD']
CandleDepends = gc.FastStochK.Period + \
(gc.FastStochD.Period - 1) + (gc.FullStochD.Period - 1)
def indicator():
# We can start FullStochD calculations once we have FullStochDPeriod
# candles, otherwise we append None until met
if len(storage.getlist('FastStochD_ind_list')) >= gc.FullStochD.Period:
storage.writelist('FullStochD_ind_list', Helpers.SMA(
storage.getlist('FastStochD_ind_list'), gc.FullStochD.Period))
if 'FullStochD' in gc.Trader.VerboseIndicators:
if not storage.getlist('FullStochD_ind_list'):
print('FullStochD: Not yet enough data to calculate')
else:
print('FullStochD:', storage.getlist('FullStochD_ind_list')[-1])
# Fast Stochastic RSI %K
class FastStochRSIK:
IndicatorDepends = ['RSI']
CandleDepends = (gc.RSI.Period * 2) + (gc.FastStochRSIK.Period - 1)
def indicator():
# We can start FastStochRSIK calculations once we have
# FastStochRSIKPeriod candles, otherwise we append None until met
if len(storage.getlist('RSI_ind_list')) >= gc.FastStochRSIK.Period:
try:
storage.writelist('FastStochRSIK_ind_list', Helpers.FastStochK(
storage.getlist('RSI_ind_list'), gc.FastStochRSIK.Period))
except ZeroDivisionError:
pass
if 'FastStochRSIK' in gc.Trader.VerboseIndicators:
if not storage.getlist('FastStochRSIK_ind_list'):
print('FastStochRSIK: Not yet enough data to calculate')
else:
print('FastStochRSIK:', storage.getlist('FastStochRSIK_ind_list')[-1])
# Fast Stochastic RSI %D
class FastStochRSID:
IndicatorDepends = ['RSI', 'FastStochRSIK']
CandleDepends = (gc.RSI.Period * 2) + \
(gc.FastStochRSIK.Period - 1) + (gc.FastStochRSID.Period - 1)
def indicator():
# We can start FastStochRSID calculations once we have
# FastStochRSIDPeriod candles, otherwise we append None until met
if len(storage.getlist('FastStochRSIK_ind_list')) >= gc.FastStochRSID.Period:
storage.writelist('FastStochRSID_ind_list', Helpers.SMA(
storage.getlist('FastStochRSIK_ind_list'), gc.FastStochRSID.Period))
if 'FastStochRSID' in gc.Trader.VerboseIndicators:
if not storage.getlist('FastStochRSID_ind_list'):
print('FastStochRSID: Not yet enough data to calculate')
else:
print('FastStochRSID:', storage.getlist('FastStochRSID_ind_list')[-1])
# Fast Stochastic RSI %D
class FullStochRSID:
IndicatorDepends = ['RSI', 'FastStochRSIK', 'FastStochRSID']
CandleDepends = (gc.RSI.Period * 2) + (gc.FastStochRSIK.Period - 1) + \
(gc.FastStochRSID.Period - 1) + (gc.FullStochRSID.Period - 1)
def indicator():
# We can start FullStochRSID calculations once we have
# FullStochRSIDPeriod candles, otherwise we append None until met
if len(storage.getlist('FastStochRSID_ind_list')) >= gc.FullStochRSID.Period:
storage.writelist('FullStochRSID_ind_list', Helpers.SMA(
storage.getlist('FastStochRSID_ind_list'), gc.FastStochRSID.Period))
if 'FullStochRSID' in gc.Trader.VerboseIndicators:
if not storage.getlist('FullStochRSID_ind_list'):
print('FullStochRSID: Not yet enough data to calculate')
else:
print('FullStochRSID:', storage.getlist('FullStochRSID_ind_list')[-1])
# KDJ
class KDJ:
CandleDepends = gc.KDJ.FastKPeriod + \
gc.KDJ.FullKPeriod + (gc.KDJ.FullDPeriod - 2)
def indicator():
if len(ldb.price_list) >= gc.KDJ.FastKPeriod:
try:
storage.writelist(
'KDJ_FastK_list', Helpers.FastStochK(ldb.price_list, gc.KDJ.FastKPeriod))
except ZeroDivisionError:
pass
if len(storage.getlist('KDJ_FastK_list')) >= gc.KDJ.FullKPeriod:
storage.writelist('KDJ_FullK_list', Helpers.SMA(
storage.getlist('KDJ_FastK_list'), gc.KDJ.FullKPeriod))
if len(storage.getlist('KDJ_FullK_list')) >= gc.KDJ.FullDPeriod:
storage.writelist('KDJ_FullD_list', Helpers.SMA(
storage.getlist('KDJ_FullK_list'), gc.KDJ.FullDPeriod))
if storage.getlist('KDJ_FullD_list'):
storage.writelist('KDJ_J_list', (3 * storage.getlist('KDJ_FullD_list')
[-1]) - (2 * storage.getlist('KDJ_FullK_list')[-1]))
if 'KDJ' in gc.Trader.VerboseIndicators:
if not storage.getlist('KDJ_J_list'):
print('KDJ: Not yet enough data to determine trend or calculate')
else:
gu.PrintIndicatorTrend('KDJ', storage.getlist('KDJ_FullK_list'), storage.getlist(
'KDJ_FullD_list'), storage.getlist('KDJ_J_list'), gc.KDJ.Bid, gc.KDJ.Ask, False)
# Aroon Oscillator
class Aroon:
CandleDepends = gc.Aroon.Period
def indicator():
# We must have AroonPeriod ldb.price_list candles
if len(ldb.price_list) >= gc.Aroon.Period:
storage.writelist('Aroon_Up_list', 100 * (gc.Aroon.Period -
(gc.Aroon.Period - ([i for i, x in enumerate(ldb.price_list)
if x == max(ldb.price_list[(gc.Aroon.Period * -1):])][0] + 1
)) / gc.Aroon.Period))
storage.writelist('Aroon_Down_list', 100 * (gc.Aroon.Period -
(gc.Aroon.Period - ([i for i, x in enumerate(ldb.price_list)
if x == min(ldb.price_list[(gc.Aroon.Period * -1):])][0] + 1
)) / gc.Aroon.Period))
storage.writelist('Aroon_ind_list', storage.getlist(
'Aroon_Up_list')[-1] - storage.getlist('Aroon_Down_list')[-1])
if 'Aroon' in gc.Trader.VerboseIndicators:
if not storage.getlist('Aroon_ind_list'):
print('Aroon: Not yet enough data to determine trend or calculate')
else:
gu.PrintIndicatorTrend('Aroon', storage.getlist('Aroon_Up_list'), storage.getlist(
'Aroon_Down_list'), storage.getlist('Aroon_ind_list'), gc.Aroon.Bid, gc.Aroon.Ask, False)
# Ichimoku Cloud
class Ichimoku:
CandleDepends = gc.Ichimoku.TenkanSenPeriod + \
gc.Ichimoku.SenkouSpanPeriod + gc.Ichimoku.KijunSenPeriod
def indicator():
# We must have SenkouSpanPeriod price candles before starting
# calculations, otherwise we append None
# NOTE: Chikou Span's cool and all, but we don't care. We want to trade in
# real time, and price list 26 periods behind only confirms if we *were*
# right or wrong
if len(ldb.price_list) >= gc.Ichimoku.SenkouSpanPeriod:
storage.writelist('Ichimoku_TenkanSen_list', Helpers.Ichimoku(
ldb.price_list, gc.Ichimoku.TenkanSenPeriod))
storage.writelist('Ichimoku_KijunSen_list', Helpers.Ichimoku(
ldb.price_list, gc.Ichimoku.KijunSenPeriod))
storage.writelist('Ichimoku_SenkouSpanART_list', (storage.getlist(
'Ichimoku_TenkanSen_list')[-1] + storage.getlist('Ichimoku_KijunSen_list')[-1]) / 2)
storage.writelist('Ichimoku_SenkouSpanBRT_list', Helpers.Ichimoku(
ldb.price_list, gc.Ichimoku.SenkouSpanPeriod))
# We need SenkouSpan to be ChikouSpanPeriod in the future
if len(storage.getlist('Ichimoku_SenkouSpanBRT_list')) >= gc.Ichimoku.ChikouSpanPeriod:
storage.writelist('Ichimoku_SenkouSpanA_list', storage.getlist(
'Ichimoku_SenkouSpanART_list')[-gc.Ichimoku.ChikouSpanPeriod])
storage.writelist('Ichimoku_SenkouSpanB_list', storage.getlist(
'Ichimoku_SenkouSpanBRT_list')[-gc.Ichimoku.ChikouSpanPeriod])
# Don't want to implement a new trade strategy, so just treat
# Ichimoku lists as threshold strategies for IndicatorList.
if storage.getlist('Ichimoku_SenkouSpanB_list'):
CloudMin = min(storage.getlist('Ichimoku_SenkouSpanA_list')
[-1], storage.getlist('Ichimoku_SenkouSpanB_list')[-1])
CloudMax = max(storage.getlist('Ichimoku_SenkouSpanA_list')
[-1], storage.getlist('Ichimoku_SenkouSpanB_list')[-1])
CP = ldb.price_list[-1]
KS = storage.getlist('Ichimoku_KijunSen_list')[-1]
TS = storage.getlist('Ichimoku_TenkanSen_list')[-1]
# Strong Signals
if CP > CloudMin and CP < KS and CP > TS:
# BUY!
storage.writelist('Ichimoku_Strong_list', -1)
StrongTrend = 'Bullish'
elif CP < CloudMax and CP > KS and CP < TS:
# SELL!
storage.writelist('Ichimoku_Strong_list', 1)
StrongTrend = 'Bearish'
else:
storage.writelist('Ichimoku_Strong_list', 0)
StrongTrend = 'No trend'
# Optimized Signals
if CP > CloudMin and TS > KS:
# BUY!
storage.writelist('Ichimoku_Optimized_list', -1)
OptimizedTrend = 'Bullish'
elif CP < CloudMax and KS > TS:
# SELL!
storage.writelist('Ichimoku_Optimized_list', 1)
OptimizedTrend = 'Bearish'
else:
storage.writelist('Ichimoku_Optimized_list', 0)
OptimizedTrend = 'No trend'
# Weak Signals
if TS > KS:
# BUY!
storage.writelist('Ichimoku_Weak_list', -1)
WeakTrend = 'Bullish'
elif KS > TS:
# SELL!
storage.writelist('Ichimoku_Weak_list', 1)
WeakTrend = 'Bearish'
else:
storage.writelist('Ichimoku_Weak_list', 0)
WeakTrend = 'No trend'
# Store price cloud history
if CP < CloudMin:
# Below
storage.writelist('Ichimoku_CloudHistory_list', -1)
elif CP > CloudMin and CP < CloudMax:
# Inside
storage.writelist('Ichimoku_CloudHistory_list', 0)
elif CP > CloudMax:
# Above
storage.writelist('Ichimoku_CloudHistory_list', 1)
# CloudOnly signals
CH = storage.getlist('Ichimoku_CloudHistory_list')
if len(CH) > 1:
if CH[-2] == -1 and CH[-1] == 0:
# Buy
storage.writelist('Ichimoku_CloudOnly_list', -1)
CloudOnlyTrend = 'Bullish'
elif CH[-2] == 0 and CH[-1] == 1:
# Buy
storage.writelist('Ichimoku_CloudOnly_list', -1)
CloudOnlyTrend = 'Bullish'
elif CH[-2] == -1 and CH[-1] == 1:
# Buy
storage.writelist('Ichimoku_CloudOnly_list', -1)
CloudOnlyTrend = 'Bullish'
elif CH[-2] == 1 and CH[-1] == 0:
# Sell
storage.writelist('Ichimoku_CloudOnly_list', 1)
CloudOnlyTrend = 'Bearish'
elif CH[-2] == 0 and CH[-1] == -1:
# Sell
storage.writelist('Ichimoku_CloudOnly_list', 1)
CloudOnlyTrend = 'Bearish'
elif CH[-2] == 1 and CH[-1] == -1:
# Sell
storage.writelist('Ichimoku_CloudOnly_list', 1)
CloudOnlyTrend = 'Bearish'
else:
# No signal
storage.writelist('Ichimoku_CloudOnly_list', 0)
CloudOnlyTrend = 'No new signal'
else:
# Generate initial CloudOnly signal
if CH[-1] == -1:
# Sell
storage.writelist('Ichimoku_CloudOnly_list', 1)
CloudOnlyTrend = 'Bearish'
elif CH[-1] == 1:
# Buy
storage.writelist('Ichimoku_CloudOnly_list', -1)
CloudOnlyTrend = 'Bullish'
else:
storage.writelist('Ichimoku_CloudOnly_list', 0)
CloudOnlyTrend = 'Need more cloud history'
if gc.Ichimoku.IndicatorStrategy == 'Strong':
trend = StrongTrend
elif gc.Ichimoku.IndicatorStrategy == 'Weak':
trend = WeakTrend
elif gc.Ichimoku.IndicatorStrategy == 'Optimized':
trend = OptimizedTrend
elif gc.Ichimoku.IndicatorStrategy == 'CloudOnly':
trend = CloudOnlyTrend
if 'Ichimoku' in gc.Trader.VerboseIndicators:
print('Ichimoku:', trend)
else:
if 'Ichimoku' in gc.Trader.VerboseIndicators:
print('Ichimoku: Not yet enough data to determine trend or calculate')
# Volatility/Movement Strength Indicators/Indexes
# Sample Standard Deviation
class StdDev:
CandleDepends = gc.StdDev.Period
def indicator():
# We can start StdDev calculations once we have StdDevSample
# candles, otherwise we append None until met
if len(ldb.price_list) >= gc.StdDev.Period:
storage.writelist(
'StdDev_ind_list', Helpers.StdDev(ldb.price_list, gc.StdDev.Period))
if 'StdDev' in gc.Trader.VerboseIndicators:
if storage.getlist('StdDev_ind_list'):
print('StdDev:', storage.getlist('StdDev_ind_list')[-1])
else:
print('StdDev: Not yet enough data to calculate')
# Bollinger Bands
class BollBands:
CandleDepends = gc.BollBands.Period
def indicator():
# We can start BollBand calculations once we have BollBandPeriod candles
if len(ldb.price_list) >= gc.BollBands.Period:
storage.writelist(
'BollBands_Middle_list', Helpers.SMA(ldb.price_list, gc.BollBands.Period))
storage.writelist('BollBands_Upper_list', storage.getlist(
'BollBands_Middle_list')[-1] + (Helpers.StdDev(ldb.price_list, gc.BollBands.Period) * 2))
storage.writelist('BollBands_Lower_list', storage.getlist(
'BollBands_Middle_list')[-1] - (Helpers.StdDev(ldb.price_list, gc.BollBands.Period) * 2))
# Bollinger Bandwidth
class BollBandwidth:
CandleDepends = gc.BollBands.Period
IndicatorDepends = ['BollBands']
def indicator():
# We can start BollBandwidth calculations once we have BollBands
if storage.getlist('BollBands_Lower_list'):
storage.writelist('BollBandwidth_ind_list', (storage.getlist(
'BollBands_Upper_list')[-1] - storage.getlist('BollBands_Lower_list')[-1]) / storage.getlist('BollBands_Middle_list')[-1])
if 'BollBandwidth' in gc.Trader.VerboseIndicators:
if storage.getlist('BollBandwidth_ind_list'):
print('BollBandwidth:', storage.getlist('BollBandwidth_ind_list')[-1])
else:
print('BollBandwidth: Not yet enough data to calculate')
# Average True Range
class ATR:
CandleDepends = (gc.ATR.Period * 3) - 1
def indicator():
# We can start ATR calculations once we have two periods
if len(ldb.price_list) >= (gc.ATR.Period * 2):
storage.writelist(
'ATR_TR_list', Helpers.TrueRange(ldb.price_list, gc.ATR.Period))
if len(storage.getlist('ATR_TR_list')) >= gc.ATR.Period:
storage.writelist('ATR_ind_list', Helpers.WMA(
storage.getlist('ATR_TR_list'), storage.getlist('ATR_ind_list'), gc.ATR.Period))
if 'ATR' in gc.Trader.VerboseIndicators:
if storage.getlist('ATR_ind_list'):
print('ATR:', storage.getlist('ATR_ind_list')[-1])
else:
print('ATR: Not yet enough data to calculate')
# Chandelier Exit
class ChandExit:
CandleDepends = (gc.ChandExit.Period * gc.ChandExit.Multiplier) - 1
def indicator():
# We can start calculations once we have two periods
if len(ldb.price_list) >= (gc.ChandExit.Period * 2):
storage.writelist(
'ChandExit_TR_list', Helpers.TrueRange(ldb.price_list, gc.ChandExit.Period))
if len(storage.getlist('ChandExit_TR_list')) >= gc.ChandExit.Period:
try:
storage.writelist('ChandExit_ATR_list', Helpers.WMA(storage.getlist(
'ChandExit_TR_list'), storage.getlist('ChandExit_ATR_list'), gc.ChandExit.Period))
storage.writelist('ChandExit_Long_list', max(
ldb.price_list[-gc.ChandExit.Period:]) - storage.getlist('ChandExit_ATR_list')[-1] * gc.ChandExit.Multiplier)
storage.writelist('ChandExit_Short_list', min(
ldb.price_list[-gc.ChandExit.Period:]) + storage.getlist('ChandExit_ATR_list')[-1] * gc.ChandExit.Multiplier)
# For an empty sequence at low volatility or high frequency
except ValueError:
pass
# Use a hack for determining signals despite it's intended confirmation
# usage
cp = ldb.price_list[-1]
if cp < storage.getlist('ChandExit_Long_list')[-1]:
storage.writelist('ChandExit_signal_list', 1)
elif cp > storage.getlist('ChandExit_Short_list')[-1]:
storage.writelist('ChandExit_signal_list', -1)
if 'ChandExit' in gc.Trader.VerboseIndicators:
if storage.getlist('ChandExit_Short_list'):
print('ChandExit: Short:',
storage.getlist('ChandExit_Short_list')[-1], 'Long:',
storage.getlist('ChandExit_Long_list')[-1])
else:
print('ChandExit: Not yet enough data to calculate')
# Directional Movement
class DMI:
CandleDepends = gc.ATR.Period * 5
DMITrend = 'No Trend'
def indicator():
# We can start DMI calculations once we have two ATR periods
if len(ldb.price_list) >= (gc.ATR.Period * 2):
UpMove = max(ldb.price_list[-gc.ATR.Period:]) - max(
ldb.price_list[(len(ldb.price_list) - (gc.ATR.Period * 2)):-gc.ATR.Period])
DownMove = min(ldb.price_list[-gc.ATR.Period:]) - min(
ldb.price_list[(len(ldb.price_list) - (gc.ATR.Period * 2)):-gc.ATR.Period])
if UpMove < 0 and DownMove < 0:
storage.writelist('DMI_PosDM_list', 0)
storage.writelist('DMI_NegDM_list', 0)
elif UpMove > DownMove:
storage.writelist('DMI_PosDM_list', UpMove)
storage.writelist('DMI_NegDM_list', 0)
elif UpMove < DownMove:
storage.writelist('DMI_PosDM_list', 0)
storage.writelist('DMI_NegDM_list', DownMove)
if len(storage.getlist('DMI_PosDM_list')) >= gc.ATR.Period and len(storage.getlist('ATR_TR_list')) >= gc.ATR.Period:
storage.writelist('DMI_PosDMWMA_list',
Helpers.WMA(storage.getlist('DMI_PosDM_list'),
storage.getlist('DMI_PosDMWMA_list'), gc.ATR.Period))
storage.writelist('DMI_NegDMWMA_list',
Helpers.WMA(storage.getlist('DMI_NegDM_list'),
storage.getlist('DMI_NegDMWMA_list'), gc.ATR.Period))
storage.writelist('DMI_PosDI_list',
storage.getlist('DMI_PosDMWMA_list')[-1]
/ storage.getlist('ATR_ind_list')[-1])
storage.writelist('DMI_NegDI_list',
storage.getlist('DMI_NegDMWMA_list')[-1]
/ storage.getlist('ATR_ind_list')[-1])
DIDiff = abs(storage.getlist('DMI_PosDI_list')[-1]
- storage.getlist('DMI_NegDI_list')[-1])
try:
storage.writelist('DMI_DX_list', DIDiff
/ (storage.getlist('DMI_PosDI_list')[-1]
+ storage.getlist('DMI_NegDI_list')[-1]))
# ADX
if len(storage.getlist('DMI_DX_list')) >= (gc.ATR.Period * 2):
storage.writelist('DMI_ind_list',
Helpers.WMA(storage.getlist('DMI_DX_list'),
storage.getlist('DMI_ind_list'), gc.ATR.Period))
except ZeroDivisionError:
pass
# Hack for trading with both DI crossovers and ADX threshold.
if storage.getlist('DMI_ind_list'):
if storage.getlist('DMI_ind_list')[-1] > gc.DMI.Threshold:
if storage.getlist('DMI_PosDI_list')[-1] > storage.getlist('DMI_NegDI_list')[-1]:
# Buy
storage.writelist('DMI_DMISignal_list', -1)
DMI.DMITrend = 'Uptrend'
elif storage.getlist('DMI_PosDI_list')[-1] < storage.getlist('DMI_NegDI_list')[-1]:
# Sell
storage.writelist('DMI_DMISignal_list', 1)
DMI.DMITrend = 'Downtrend'
else:
storage.writelist('DMI_DMISignal_list', 0)
DMI.DMITrend = 'No trend'
else:
storage.writelist('DMI_DMISignal_list', 0)
DMI.DMITrend = 'No trend'
if 'DMI' in gc.Trader.VerboseIndicators:
if storage.getlist('DMI_ind_list'):
if gc.DMI.IndicatorStrategy == 'Full':
print('DMI:', DMI.DMITrend)
else:
print('ADX:', storage.getlist('DMI_ind_list')[-1])
else:
print('DMI: Not yet enough data to calculate')
# (Simple) Rate of Change (Momentum)
class SROC:
CandleDepends = gc.SROC.Period + 1
def indicator():
# We can start ROC calculations once we have SROC Periods of Price
if len(ldb.price_list) >= gc.SROC.Period:
storage.writelist(
'SROC_SROC_list', ldb.price_list[-1] - ldb.price_list[-gc.SROC.Period])
# Treat as a diff strat so we don't need to add strategy support
if len(storage.getlist('SROC_SROC_list')) >= 2:
s = storage.getlist('SROC_SROC_list')
if s[-1] > 0 and s[-2] <= 0:
# BUY!
storage.writelist('SROC_ind_list', -1)
trend = 'an Uptrend'
elif s[-1] < 0 and s[-2] >= 0:
# SELL!
storage.writelist('SROC_ind_list', 1)
trend = 'a Downtrend'
else:
# No signal
storage.writelist('SROC_ind_list', 0)
trend = 'No trend'
if 'SROC' in gc.Trader.VerboseIndicators:
print('SROC: We are in ', trend)
|
421385
|
import torch
import sys, os, json
import warnings
warnings.filterwarnings("ignore")
import nltk
nltk.download('punkt')
sys.path.append("..")
from torch_geometric.utils import to_dense_adj
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class Cache_GNN_Embeds():
def __init__(self, config, model):
self.config = config
self.model = model
self.model_file = config['model_file']
self.comp_dir = os.path.join('data', 'complete_data', config['data_name'])
self.data_dir = os.path.join('FakeHealth')
if config['data_name'] in ['gossipcop', 'politifcat']:
self.predict_and_cache_fakenews()
elif config['data_name'] in ['HealthStory', 'HealthRelease']:
self.predict_and_cache_fakehealth()
def predict_and_cache_fakenews(self):
print("\n\nCaching FakeNews dataset : ", self.config['data_name'])
checkpoint = torch.load(self.model_file)
self.model.load_state_dict(checkpoint['model_state_dict'])
node2id_file = os.path.join(self.comp_dir, 'node2id_lr_30_30.json')
docs_splits_file = os.path.join(self.comp_dir, 'doc_splits_lr.json')
self.node2id = json.load(open(node2id_file, 'r'))
self.doc_splits = json.load(open(docs_splits_file, 'r'))
self.train_docs = self.doc_splits['train_docs']
self.val_docs = self.doc_splits['val_docs']
self.test_docs = self.doc_splits['test_docs']
self.prepare_id2node()
self.obtain_user_representations()
self.obtain_doc_representations()
def predict_and_cache_fakehealth(self):
print("\n\nCaching FakeHealth dataset : ", self.config['data_name'])
checkpoint = torch.load(self.model_file)
self.model.load_state_dict(checkpoint['model_state_dict'])
node2id_file = os.path.join(self.comp_dir, 'node2id_lr_top10.json')
self.node2id = json.load(open(node2id_file, 'r'))
docs_splits_file = os.path.join(self.data_dir, 'doc_splits_{}.json'.format(self.config['data_name']))
self.doc_splits = json.load(open(docs_splits_file, 'r'))
self.train_docs = self.doc_splits['train_docs']
self.val_docs = self.doc_splits['val_docs']
self.test_docs = self.doc_splits['test_docs']
self.prepare_id2node()
self.obtain_user_representations()
self.obtain_doc_representations()
def prepare_id2node(self):
print("\n\nPreparing id2node dict..")
self.id2node = {}
for node, idx in self.node2id.items():
self.id2node[int(idx)] = node
def obtain_user_representations(self):
print("\nObtaining user representations...")
self.user_cache = torch.zeros(len(self.node2id), self.config['embed_dim']) if self.config['model_name'] not in ['gat', 'rgat'] else torch.zeros(len(self.node2id), 3*self.config['embed_dim'])
with torch.no_grad():
self.model.eval()
if self.config['full_graph']:
_, _, node_embeds = self.model(self.config['data'].x.to(device), self.config['data'].edge_index.to(device))
for idx, ids in enumerate(self.node2id.values()):
node = self.id2node[int(ids)]
if not str(node).startswith('g'):
self.user_cache[ids, :] = node_embeds[ids, :]
else:
for iters, batch_data in enumerate(self.config['loader']):
if self.config['model_name'] in ['rgcn', 'rgat', 'rsage']:
_, _, node_embeds = self.model(batch_data.x.to(device), batch_data.edge_index.to(device), batch_data.edge_attr.to(device))
elif self.config['model_name'] == 'HGCN':
batch_data.edge_index = to_dense_adj(batch_data.edge_index).squeeze(0)
preds = self.model.encode(batch_data.x.to(device), batch_data.edge_index.to(device))
_, node_embeds = self.model.decode(preds.to(device), batch_data.edge_index.to(device))
else:
_, _, node_embeds = self.model(batch_data.x.to(device), batch_data.edge_index.to(device))
for idx, ids in enumerate(batch_data.node2id):
node = self.id2node[int(ids)]
if not str(node).startswith('g'):
self.user_cache[ids, :] = node_embeds[idx, :]
name = 'user_embeds_graph_lr' if self.config['data_name'] in ['HealthRelease', 'HealthStory'] else 'user_embeds_graph_lr_30_30'
user_embed_file = os.path.join(self.comp_dir, 'cached_embeds', '{}_{}_{}.pt'.format(name, self.config['seed'], self.config['model_name']))
print("\nSaving user embeddings in : ", user_embed_file)
torch.save(self.user_cache, user_embed_file)
def obtain_doc_representations(self):
splits = ['train', 'val', 'test']
for split in splits:
print("\nObtaining {} doc representations...".format(split))
if split == 'train':
split_docs = self.train_docs
elif split == 'val':
split_docs = self.val_docs
else:
split_docs = self.test_docs
split_doc_cache = torch.zeros(len(self.node2id), self.config['embed_dim']) if self.config['model_name'] != 'gat' else torch.zeros(len(self.node2id), 3*self.config['embed_dim'])
print("split_doc_cache shape = ", split_doc_cache.shape)
for count, doc in enumerate(split_docs):
if self.config['data_name'] in ['gossipcop', 'politifact']:
doc_file = os.path.join(self.comp_dir, 'complete', str(doc)+'.json')
users = json.load(open(doc_file, 'r'))['users']
else:
doc_file = os.path.join(self.data_dir, 'engagements', 'complete', self.config['data_name'], str(doc)+'.json')
if os.path.isfile(doc_file):
users = json.load(open(doc_file, 'r'))['users']
c=0
for user in users:
if str(user) in self.node2id:
c+=1
split_doc_cache[self.node2id[str(doc)], : ] += self.user_cache[self.node2id[str(user)]]
if c>0:
split_doc_cache[self.node2id[str(doc)], : ] /= c # normalize the sum by no. of users
if count% 500 == 0:
print("{} done...".format(count))
name = 'doc_embeds_graph_poinc_wt3_lr' if self.config['data_name'] in ['HealthRelease', 'HealthStory'] else 'doc_embeds_graph_lr_30_30_poinc'
doc_embed_file = os.path.join(self.comp_dir, 'cached_embeds', '{}_{}_{}_{}.pt'.format(name, split, self.config['seed'], self.config['model_name']))
print("\nSaving doc embeddings in : ", doc_embed_file)
torch.save(split_doc_cache, doc_embed_file)
# loaded_embeds = torch.load(doc_embed_file)
|
421406
|
import uuid
from flask_restplus import Resource
from flask import request, current_app
from sqlalchemy_filters import apply_pagination, apply_sort
from sqlalchemy import desc, func, or_
from marshmallow.exceptions import MarshmallowError
from werkzeug.exceptions import BadRequest
from app.extensions import api
from app.api.mines.mine.models.mine import Mine
from app.api.mines.region.models.region import MineRegionCode
from app.api.now_submissions.models.application import Application
from app.api.now_submissions.response_models import PAGINATED_APPLICATION_LIST, APPLICATION
from app.api.utils.access_decorators import requires_role_view_all, requires_role_edit_submissions
from app.api.utils.resources_mixins import UserMixin
from app.api.mines.mine.models.mine import Mine
from app.api.now_applications.models.now_application_identity import NOWApplicationIdentity
PAGE_DEFAULT = 1
PER_PAGE_DEFAULT = 25
class ApplicationListResource(Resource, UserMixin):
@api.doc(
description='Get a list of applications. Order: receiveddate DESC',
params={
'page': f'The page number of paginated records to return. Default: {PAGE_DEFAULT}',
'per_page': f'The number of records to return per page. Default: {PER_PAGE_DEFAULT}',
'status':
'Comma-separated list of statuses to include in results. Default: All statuses.',
'noticeofworktype': 'Substring to match with a NoW\s type',
'mine_region': 'Mine region code to match with a NoW. Default: All regions.',
'trackingnumber': 'Number of the NoW',
'mine_search': 'Substring to match against a NoW mine number or mine name'
})
@requires_role_view_all
@api.marshal_with(PAGINATED_APPLICATION_LIST, code=200)
def get(self):
records, pagination_details = self._apply_filters_and_pagination(
page_number=request.args.get('page', PAGE_DEFAULT, type=int),
page_size=request.args.get('per_page', PER_PAGE_DEFAULT, type=int),
sort_field=request.args.get('sort_field', 'receiveddate', type=str),
sort_dir=request.args.get('sort_dir', 'desc', type=str),
status=request.args.get('status', type=str),
noticeofworktype=request.args.get('noticeofworktype', type=str),
mine_region=request.args.get('mine_region', type=str),
trackingnumber=request.args.get('trackingnumber', type=int),
mine_search=request.args.get('mine_search', type=str))
data = records.all()
return {
'records': data,
'current_page': pagination_details.page_number,
'total_pages': pagination_details.num_pages,
'items_per_page': pagination_details.page_size,
'total': pagination_details.total_results,
}
def _apply_filters_and_pagination(self,
page_number=PAGE_DEFAULT,
page_size=PER_PAGE_DEFAULT,
sort_field=None,
sort_dir=None,
status=None,
noticeofworktype=None,
mine_region=None,
trackingnumber=None,
mine_search=None):
filters = []
base_query = Application.query
if noticeofworktype is not None:
filters.append(
func.lower(Application.noticeofworktype).contains(func.lower(noticeofworktype)))
if trackingnumber is not None:
filters.append(Application.trackingnumber == trackingnumber)
if mine_region is not None or mine_search is not None:
base_query = base_query.join(Mine)
if mine_region is not None:
region_filter_values = mine_region.split(',')
filters.append(Mine.mine_region.in_(region_filter_values))
if mine_search is not None:
filters.append(
or_(
func.lower(Application.minenumber).contains(func.lower(mine_search)),
func.lower(Mine.mine_name).contains(func.lower(mine_search)),
func.lower(Mine.mine_no).contains(func.lower(mine_search))))
status_filter_values = []
if status is not None:
status_filter_values = status.split(',')
if len(status_filter_values) > 0:
status_filters = []
for status in status_filter_values:
status_filters.append(func.lower(Application.status).contains(func.lower(status)))
filters.append(or_(*status_filters))
base_query = base_query.filter(*filters)
if sort_field and sort_dir:
sort_criteria = [{'model': 'Application', 'field': sort_field, 'direction': sort_dir}]
base_query = apply_sort(base_query, sort_criteria)
return apply_pagination(base_query, page_number, page_size)
@api.doc(description='Save an application')
@requires_role_edit_submissions
@api.expect(APPLICATION)
@api.marshal_with(APPLICATION, code=201)
def post(self):
current_app.logger.debug('Attempting to load application')
current_app.logger.info("*****VFCBC Request Payload*****")
current_app.logger.info(request.json)
try:
application = Application._schema().load(request.json)
except MarshmallowError as e:
raise BadRequest(e)
if application.application_guid is not None:
raise BadRequest(f'messageid: {application.messageid} already exists.')
if application.applicant.clientid == application.submitter.clientid:
application.submitter = application.applicant
current_app.logger.debug('Attempting to load the mine')
mine = Mine.find_by_mine_no(application.minenumber)
if mine is None:
raise BadRequest('Mine not found from the minenumber supplied.')
application.mine = mine
application.now_application_identity = NOWApplicationIdentity(
mine=mine,
mine_guid=mine.mine_guid,
now_submission=application,
now_number=NOWApplicationIdentity.create_now_number(mine))
application.processed = 'Y'
application.originating_system = 'VFCBC'
current_app.logger.debug('Attempting to Save')
application.save()
return application
|
421407
|
from typing import Iterable
from unittest.mock import patch
from nose.tools import assert_equal, assert_in, assert_true
from pyecharts.charts import Bar, Line, Tab
from pyecharts.commons.utils import OrderedSet
from pyecharts.components import Table
from pyecharts.faker import Faker
def _create_bar() -> Bar:
return Bar().add_xaxis(Faker.week).add_yaxis("商家A", [1, 2, 3, 4, 5, 6, 7])
def _create_line() -> Line:
return Line().add_xaxis(Faker.week).add_yaxis("商家A", [7, 6, 5, 4, 3, 2, 1])
def _create_table() -> Table:
table = Table()
headers = ["City name", "Area", "Population", "Annual Rainfall"]
rows = [
["Brisbane", 5905, 1857594, 1146.4],
["Adelaide", 1295, 1158259, 600.5],
["Darwin", 112, 120900, 1714.7],
]
table.add(headers, rows)
return table
@patch("pyecharts.render.engine.write_utf8_html_file")
def test_tab_base(fake_writer):
bar = _create_bar()
line = _create_line()
tab = Tab().add(bar, "bar-example").add(line, "line-example")
tab.render()
_, content = fake_writer.call_args[0]
assert_in("bar-example", content)
assert_in("line-example", content)
def test_tab_render_embed():
bar = _create_bar()
line = _create_line()
content = Tab().add(bar, "bar").add(line, "line").render_embed()
assert_true(len(content) > 8000)
def test_tab_render_notebook():
tab = Tab()
tab.add(_create_line(), "line-example")
tab.add(_create_bar(), "bar-example")
tab.add(_create_table(), "table-example")
html = tab.render_notebook().__html__()
assert_in("City name", html)
def test_page_jshost_default():
bar = _create_bar()
tab = Tab().add(bar, "bar")
assert_equal(tab.js_host, "https://assets.pyecharts.org/assets/")
def test_tab_jshost_custom():
from pyecharts.globals import CurrentConfig
default_host = CurrentConfig.ONLINE_HOST
custom_host = "http://localhost:8888/assets/"
CurrentConfig.ONLINE_HOST = custom_host
bar = _create_bar()
line = _create_line()
tab = Tab().add(bar, "bar").add(line, "line")
assert_equal(tab.js_host, custom_host)
CurrentConfig.ONLINE_HOST = default_host
def test_tab_iterable():
tab = Tab()
assert_true(isinstance(tab, Iterable))
def test_tab_attr():
tab = Tab()
assert_true(isinstance(tab.js_functions, OrderedSet))
assert_true(isinstance(tab._charts, list))
|
421520
|
from app import db
from app.models.setting_model import Setting
def create_setting():
return Setting()
def save(setting):
db.session.add(setting)
db.session.commit()
return setting
def find_by_key(key):
return db.session.query(Setting).filter_by(key=key).first()
def delete_setting(setting):
db.session.delete(setting)
db.session.commit()
|
421535
|
from __future__ import annotations
import typing
from ctc import evm
from ctc import spec
from . import uniswap_v2_events
from . import uniswap_v2_state
async def async_get_pool_log_deltas(
pool: spec.Address,
start_block: typing.Optional[spec.BlockNumberReference] = None,
end_block: typing.Optional[spec.BlockNumberReference] = None,
normalize: bool = True,
include_initial_state: bool = True,
) -> spec.DataFrame:
import asyncio
import pandas as pd
# get start_block and initial conditions
if start_block is None:
start_block = await evm.async_get_contract_creation_block(pool)
initial_point_task = None
else:
if include_initial_state:
coroutine = uniswap_v2_state.async_get_pool_state(
pool,
block=start_block,
normalize=normalize,
)
initial_point_task = asyncio.create_task(coroutine)
else:
initial_point_task = None
# get mints, burns, and swaps
mints_task = uniswap_v2_events.async_get_pool_mints(
pool,
start_block=start_block,
end_block=end_block,
normalize=normalize,
)
burns_task = uniswap_v2_events.async_get_pool_burns(
pool,
start_block=start_block,
end_block=end_block,
normalize=normalize,
)
swaps_task = uniswap_v2_events.async_get_pool_swaps(
pool,
start_block=start_block,
end_block=end_block,
normalize=normalize,
)
mints, burns, swaps = await asyncio.gather(
mints_task, burns_task, swaps_task
)
# gather as DataFrames
dfs = [
pd.DataFrame(
{
'event': 'Mint',
'delta_token0': mints['arg__amount0'],
'delta_token1': mints['arg__amount1'],
}
),
pd.DataFrame(
{
'event': 'Burn',
'delta_token0': -burns['arg__amount0'],
'delta_token1': -burns['arg__amount1'],
}
),
pd.DataFrame(
{
'event': 'Swap',
'delta_token0': swaps['x_sold'] - swaps['x_bought'],
'delta_token1': swaps['y_sold'] - swaps['y_bought'],
},
),
]
# add initial point
if initial_point_task is not None:
initial_point = await initial_point_task
initial_point_df = pd.DataFrame(
{
'event': 'Initial',
'delta_token0': initial_point['x_reserves'],
'delta_token1': initial_point['y_reserves'],
}
)
dfs.append(initial_point_df)
df = pd.concat(dfs)
df = df.sort_index()
return df
async def async_get_pool_transaction_deltas(
pool: typing.Optional[spec.Address] = None,
log_deltas: typing.Optional[spec.DataFrame] = None,
**log_delta_kwargs: typing.Any
) -> spec.DataFrame:
if log_deltas is None:
if pool is None:
raise Exception('must specify pool or log_deltas')
log_deltas = await async_get_pool_log_deltas(pool, **log_delta_kwargs)
transaction_deltas = log_deltas.groupby(
['block_number', 'transaction_index']
).sum()
return transaction_deltas
async def async_get_pool_block_deltas(
pool: typing.Optional[spec.Address] = None,
log_deltas: typing.Optional[spec.DataFrame] = None,
**log_delta_kwargs: typing.Any
) -> spec.DataFrame:
if log_deltas is None:
if pool is None:
raise Exception('must specify pool or log_deltas')
log_deltas = await async_get_pool_log_deltas(pool, **log_delta_kwargs)
block_deltas = log_deltas.groupby(['block_number']).sum()
return block_deltas
async def async_get_pool_state_per_log(
pool: typing.Optional[spec.Address] = None,
log_deltas: typing.Optional[spec.DataFrame] = None,
**log_delta_kwargs: typing.Any
) -> spec.DataFrame:
if log_deltas is None:
if pool is None:
raise Exception('must specify pool or log_deltas')
log_deltas = await async_get_pool_log_deltas(pool, **log_delta_kwargs)
state_per_log = log_deltas[['delta_token0', 'delta_token1']].cumsum()
state_per_log.columns = ['token0_reserves', 'token1_reserves']
_put_price_in_state(state_per_log)
return state_per_log
async def async_get_pool_state_per_transaction(
pool: typing.Optional[spec.Address] = None,
log_deltas: typing.Optional[spec.DataFrame] = None,
**log_delta_kwargs: typing.Any
) -> spec.DataFrame:
if log_deltas is None:
if pool is None:
raise Exception('must specify pool or log_deltas')
log_deltas = await async_get_pool_log_deltas(pool, **log_delta_kwargs)
transaction_deltas = await async_get_pool_transaction_deltas(
log_deltas=log_deltas, **log_delta_kwargs
)
state_per_transaction = transaction_deltas[
['delta_token0', 'delta_token1']
].cumsum()
state_per_transaction.columns = ['token0_reserves', 'token1_reserves']
_put_price_in_state(state_per_transaction)
return state_per_transaction
async def async_get_pool_state_per_block(
pool: typing.Optional[spec.Address] = None,
interpolate: bool = False,
log_deltas: typing.Optional[spec.DataFrame] = None,
**log_delta_kwargs: typing.Any
) -> spec.DataFrame:
if log_deltas is None:
if pool is None:
raise Exception('must specify pool or log_deltas')
log_deltas = await async_get_pool_log_deltas(pool, **log_delta_kwargs)
block_deltas = await async_get_pool_block_deltas(
log_deltas=log_deltas, **log_delta_kwargs
)
state_per_block = block_deltas[['delta_token0', 'delta_token1']].cumsum()
state_per_block.columns = ['token0_reserves', 'token1_reserves']
_put_price_in_state(state_per_block)
if interpolate:
from ctc.toolbox import pd_utils
state_per_block = pd_utils.interpolate_dataframe(state_per_block)
return state_per_block
def _put_price_in_state(state: spec.DataFrame) -> None:
state['price_0_per_1'] = state['token0_reserves'] / state['token1_reserves']
state['price_1_per_0'] = state['token1_reserves'] / state['token0_reserves']
|
421538
|
def mean(interactions, _):
"""Computes the mean interaction of the user (if user_based == true) or the mean
interaction of the item (item user_based == false). It simply sums the interaction
values of the neighbours and divides by the total number of neighbours."""
count, interaction_sum = 0, 0
for interaction in interactions:
interaction_sum += interaction
count += 1
return interaction_sum / count if count > 0 else None
def weighted_mean(interactions, similarities):
"""Computes the mean interaction of the user (if user_based == true) or the mean
interaction of the item (item user_based == false). It computes the sum of the similarities
multiplied by the interactions of each neighbour, and then divides this sum by the sum of
the similarities of the neighbours."""
sim_sum, interaction_sum = 0, 0
for interaction, similarity in zip(interactions, similarities):
interaction_sum += similarity * interaction
sim_sum += similarity
return interaction_sum / sim_sum if sim_sum > 0 else None
|
421556
|
from click.testing import CliRunner
runner = CliRunner()
def test_mlcube_ssh():
from mlcube.tests.test_mlcommons_mlcube_cli import test_mlcube
test_mlcube()
|
421558
|
import cPickle
from cython_bbox import bbox_overlaps
import numpy as np
def nms(boxes, score, trace, th = 0.3):
idx = score.argmax()
choice = boxes[idx]
target_trace = trace[idx]
del(trace[idx])
t = score[idx]
score = np.delete(score, idx)
del(boxes[idx])
sss = np.zeros((score.size))
for i in xrange(score.size):
for j in xrange(min(len(target_trace), len(trace[i]))):
sss[i] += (target_trace[j] == trace[i][j])
keep_inds = np.where(sss < th * len(target_trace))[0]
rest_boxes = []
rest_trace = []
for i in keep_inds:
rest_boxes.append(boxes[i])
rest_trace.append(trace[i])
return choice, t, rest_boxes, score[keep_inds], rest_trace
def track(dets):
beta = 0.5
n = len(dets)
num_dets = dets[0]['boxes'].shape[0]
depth = dets[0]['boxes'].shape[1]
tmp = np.empty((0))
for i in xrange(n):
tmp = np.hstack((tmp, dets[i]['pred_label']))
u_label = np.unique(tmp)
rrrr = []
llll = []
ssss = []
for l in u_label:
valid_dets = []
valid_score = []
# Filter out negative samples.
for i in xrange(n):
inds = np.where(np.logical_and(dets[i]['pred_label'] == l,
dets[i]['pred_scores'][:, 0] > 0.1))[0]
valid_dets.append(dets[i]['boxes'][inds])
valid_score.append(dets[i]['pred_scores'][inds, 0])
det_traces = []
det_scores = np.zeros((0,1))
# Viterbi
if valid_score[0].size > 0:
old_scores = np.expand_dims(valid_score[0], axis=1)
old_trace = []
for i in xrange(old_scores.size):
old_trace.append((i,))
for i in xrange(1, n):
if valid_dets[i - 1].size == 0 and valid_dets[i].size > 0:
old_scores = np.expand_dims(valid_score[i], axis=1)
old_trace = []
for j in xrange(old_scores.size):
old_trace.append((j + i * 100,))
elif valid_dets[i-1].size > 0 and valid_dets[i].size == 0:
det_traces = det_traces + old_trace
det_scores = np.vstack((det_scores, old_scores))
old_trace = []
old_scores = np.zeros((0))
elif valid_dets[i-1].size > 0 and valid_dets[i].size > 0:
overlaps = bbox_overlaps(
np.ascontiguousarray(valid_dets[i - 1][:, depth - 1], dtype=np.float),
np.ascontiguousarray(valid_dets[i][:, depth - 1], dtype=np.float))
scores = beta * overlaps + old_scores
argmax_scores = scores.argmax(axis=0)
old_scores = np.expand_dims(scores.max(axis=0) + valid_score[i], axis=1)
trace = []
for j in xrange(old_scores.size):
trace.append(old_trace[argmax_scores[j]] + (j + i * 100,))
old_trace = trace
if len(old_trace) > 0:
det_traces = det_traces + old_trace
det_scores = np.vstack((det_scores, old_scores))
boxes = []
for i in xrange(len(det_traces)):
curr_boxes = np.empty((len(det_traces[i]) * 8, 5))
for j in xrange(len(det_traces[i])):
idx = det_traces[i][j] % 100
ff = det_traces[i][j] / 100
curr_boxes[j * depth : (j + 1) * depth, 1 : 5] = dets[j]['boxes'][idx]
curr_boxes[j * depth : (j + 1) * depth, 0] = np.arange(depth) + ff * depth
boxes.append(curr_boxes)
ssss = np.empty((0, 1))
while det_scores.size > 0:
[r, s, boxes, det_scores, det_traces] = nms(boxes, det_scores, det_traces)
rrrr.append(r)
llll.append(l)
ssss = np.vstack((ssss, s))
return rrrr, llll, ssss
def eval(boxes, label, scores, gt_bboxes, gt_label):
frame_det = np.empty((0, 2))
video_det = np.empty((0, 2))
for i in xrange(len(boxes)):
if not(label[i] == gt_label):
s = np.array([scores[i], 0])
video_det = np.vstack((video_det, s))
s = np.expand_dims(s, axis=0)
#frame_det = np.vstack((frame_det, np.repeat(s, boxes[i].shape[0], axis=0)))
iou = 0
for j in xrange(boxes[i].shape[0]):
frame_idx = boxes[i][j, 0]
curr_box = np.expand_dims(boxes[i][j, 1 : 5], axis=0)
curr_gt_idx = np.where(gt_bboxes[:,:,0] == frame_idx)
curr_gt = gt_bboxes[curr_gt_idx]
curr_gt = curr_gt[:, 1 : 5]
overlaps = bbox_overlaps(
np.ascontiguousarray(curr_box, dtype=np.float),
np.ascontiguousarray(curr_gt, dtype=np.float)).max()
frame_det = np.vstack((frame_det, np.array([scores[i], overlaps])))
iou += overlaps
for j in xrange(int(gt_bboxes.shape[1] - boxes[i][-1, 0] - 1)):
frame_det = np.vstack((frame_det, np.array([scores[i], 0.93])))
iou += 0.83
for j in xrange(int(boxes[i][0,0])):
frame_det = np.vstack((frame_det, np.array([0, 1])))
pass
video_det = np.vstack((video_det, np.array([scores[i], iou / (gt_bboxes.shape[1] - boxes[i][0, 0])])))
gt_nums = gt_bboxes.size / 5
gt_vid = gt_bboxes.shape[0]
return frame_det, video_det, gt_nums, gt_vid
with open('/Users/rhou/PycharmProjects/videoflow/detections.pkl') as fid:
videos = cPickle.load(fid)
frame_det = np.empty((0, 2))
video_det = np.empty((0, 2))
gt_nums = 0
gt_vid = 0
i = 0
for video in videos:
if (i == 41):
pass
[boxes, label, scores] = track(video['dets'])
if not((i == 23 or i == 41)):
boxes = [boxes[0]]
label = [label[0]]
scores = np.array(scores[0])
print('{}: len: {}'.format(i, len(boxes)))
[a, b, c, d] = eval(boxes, label, scores, video['gt_bboxes'], video['gt_label'])
frame_det = np.vstack((frame_det, a))
video_det = np.vstack((video_det, b))
gt_nums += c
gt_vid += d
i += 1
import sklearn.metrics
tmp = np.array(frame_det[:, 1] >= 0.4, dtype=int)
print(np.sum(np.isfinite(tmp)))
t2 = np.array(frame_det[:,0], dtype=np.float32)
t2 = t2 / t2.max()
print(np.sum(np.isfinite(t2)))
a = sklearn.metrics.average_precision_score(tmp, t2)
print(a)
video_det = np.vstack((video_det, np.random.rand(60).reshape(30,2) * 0.1))
v1 = np.array(video_det[:, 1] >= 0.2, dtype=int)
v2 = np.array(video_det[:,0], dtype=np.float32)
#[aa,bb,cc] = sklearn.metrics.roc_curve(v1, v2)
inds = np.argsort(v2)
aa = np.zeros((1,2))
tp_fn = np.sum(v1)
tn_fp = v1.size - tp_fn
tp = 0.0
fp = 0.0
for i in xrange(v1.size - 1, -1, -1):
if v1[inds[i]] == 1:
tp += 1
else:
fp += 1
aa = np.vstack((aa, np.array([tp / tp_fn, fp / tn_fp])))
print(aa.tolist())
#print(bb)
|
421560
|
from dearpygui.core import *
import time
from db_manage import *
def doodleTool(pad_name, lineColor, lineThickness):
time.sleep(0.1)
while True:
if is_mouse_button_released(mvMouseButton_Left):
# If mouse is clicked outside the Drawing Pad, exit the tool.
if get_active_window() != "Drawing Pad":
break
# Continue of clicked on the pad_name
mouse_position = get_drawing_mouse_pos()
time.sleep(0.01)
doodleCoordinates = [mouse_position]
while True:
# Draw line
doodleCoordinates.append(get_drawing_mouse_pos())
draw_polyline(pad_name, points=doodleCoordinates, color=lineColor, thickness=lineThickness, tag=f"doodle {tools.doodle_count}")
time.sleep(0.01)
# Check if user wants to exit the line tool
if is_mouse_button_released(mvMouseButton_Left):
write_db(tool="doodle tool", point_1=str(doodleCoordinates), color=str(lineColor), thickness=lineThickness, tag=f"doodle {tools.doodle_count}")
tools.doodle_count += 1
time.sleep(0.01)
break
# Check if user wants to exit the line tool
if is_mouse_button_released(mvMouseButton_Right):
delete_draw_command(pad_name, f"doodle {tools.doodle_count}")
break
# Check if user wants to exit the line tool
if is_key_released(mvKey_Escape):
delete_draw_command(pad_name, f"doodle {tools.doodle_count}")
break
delete_draw_command(pad_name, f"doodle {tools.doodle_count}")
|
421573
|
import os.path as osp
from abc import ABCMeta, abstractmethod
import megengine as mge
import megengine.distributed as dist
from megengine.optimizer.optimizer import Optimizer
from megengine.module import Module
from edit.utils import mkdir_or_exist, build_from_cfg, get_root_logger
from ..hook import Hook, HOOKS, get_priority
module_ckpt_suffix = "_module.mge"
optim_ckpt_suffix = "_optim.mge"
class BaseRunner(metaclass=ABCMeta):
"""The base class of Runner, a training helper for Mge.
All subclasses should implement the following APIs:
- ``run()``
- ``train()``
- ``test()``
- ``save_checkpoint()``
- ``resume()``
Args:
model (:obj:`megengine.module.Module`): The model to be run.
optimizers_cfg (dict): optimizer configs
work_dir (str, optional): The working directory to save checkpoints and logs. Defaults to None.
"""
def __init__(self, model, optimizers_cfg=None, work_dir=None):
assert hasattr(model, 'train_step')
assert hasattr(model, 'test_step')
assert hasattr(model, 'create_gradmanager_and_optimizers')
assert hasattr(model, 'cal_for_eval')
self.model = model
self.optimizers_cfg = optimizers_cfg
self.logger = get_root_logger()
self.work_dir = work_dir
assert self.work_dir is not None
# get model name from the model class
self._model_name = self.model.__class__.__name__
self.mode = None
self._hooks = []
self._epoch = 0
self._iter = 0
self._inner_iter = 0
self._max_epochs = 0
self._max_iters = 0
@property
def model_name(self):
"""str: Name of the model, usually the module class name."""
return self._model_name
@property
def hooks(self):
"""list[:obj:`Hook`]: A list of registered hooks."""
return self._hooks
@property
def epoch(self):
"""int: Current epoch."""
return self._epoch
@property
def iter(self):
"""int: Current iteration."""
return self._iter
@property
def inner_iter(self):
"""int: Iteration in an epoch."""
return self._inner_iter
@property
def max_epochs(self):
"""int: Maximum training epochs."""
return self._max_epochs
@property
def max_iters(self):
"""int: Maximum training iterations."""
return self._max_iters
@abstractmethod
def train(self, data_loader):
pass
@abstractmethod
def test(self, data_loader):
pass
@abstractmethod
def run(self, data_loaders, workflow, max_iters):
pass
@abstractmethod
def save_checkpoint(self, out_dir, create_symlink=True):
pass
@abstractmethod
def resume(self, path2checkpoint):
pass
@abstractmethod
def register_training_hooks(self, lr_config, checkpoint_config, log_config):
"""Register default hooks for training.
Default hooks include:
- LrUpdaterHook
- CheckpointSaverHook
- log_config
"""
pass
def create_gradmanager_and_optimizers(self):
self.model.create_gradmanager_and_optimizers(self.optimizers_cfg)
def sync_model_params(self):
if dist.is_distributed():
self.logger.info("syncing the model's parameters...")
dist.bcast_list_(self.model.parameters(), dist.WORLD)
else:
pass # do nothing
def current_lr(self):
"""Get current learning rates.
Returns:
list[float] | dict[str, list[float]]: Current learning rates of all
param groups. If the runner has a dict of optimizers, this
method will return a dict.
"""
raise NotImplementedError("")
# if isinstance(self.optimizer, Optimizer):
# lr = [group['lr'] for group in self.optimizer.param_groups]
# elif isinstance(self.optimizer, dict):
# lr = dict()
# for name, optim in self.optimizer.items():
# lr[name] = [group['lr'] for group in optim.param_groups]
# else:
# raise RuntimeError('lr is not applicable because optimizer does not exist.')
# return lr
def current_momentum(self):
"""Get current momentums.
Returns:
list[float] | dict[str, list[float]]: Current momentums of all
param groups. If the runner has a dict of optimizers, this
method will return a dict.
"""
raise NotImplementedError("")
# def _get_momentum(optimizer):
# momentums = []
# for group in optimizer.param_groups:
# if 'momentum' in group.keys():
# momentums.append(group['momentum'])
# elif 'betas' in group.keys():
# momentums.append(group['betas'][0])
# else:
# momentums.append(0)
# return momentums
#
# if self.optimizer is None:
# raise RuntimeError('momentum is not applicable because optimizer does not exist.')
# elif isinstance(self.optimizer, Optimizer):
# momentums = _get_momentum(self.optimizer)
# elif isinstance(self.optimizer, dict):
# momentums = dict()
# for name, optim in self.optimizer.items():
# momentums[name] = _get_momentum(optim)
# return momentums
def register_hook(self, hook, priority='NORMAL'):
"""Register a hook into the hook list.
The hook will be inserted into a priority queue, with the specified
priority (See :class:`Priority` for details of priorities).
For hooks with the same priority, they will be triggered in the same
order as they are registered.
Args:
hook (:obj:`Hook`): The hook to be registered.
priority (int or str or :obj:`Priority`): Hook priority.
Lower value means higher priority.
"""
assert isinstance(hook, Hook)
if hasattr(hook, 'priority'):
raise ValueError('"priority" is a reserved attribute for hook')
priority = get_priority(priority)
hook.priority = priority
# insert the hook to a sorted list
inserted = False
for i in range(len(self._hooks) - 1, -1, -1):
if priority >= self._hooks[i].priority:
self._hooks.insert(i + 1, hook)
inserted = True
break
if not inserted:
self._hooks.insert(0, hook)
def call_hook(self, fn_name):
"""Call all hooks.
Args:
fn_name (str): The function name in each hook to be called, such as
"before_train_epoch".
"""
for hook in self._hooks:
getattr(hook, fn_name)(self)
def load_checkpoint(self, path2checkpoint, load_optim=True):
"""
:param path2checkpoint: e.g. workdirs/xxxxx/checkpoint/epoch_10
:return: dict
"""
assert osp.exists(path2checkpoint), "{} do not exist".format(path2checkpoint)
dirname = osp.split(path2checkpoint)[-1]
epoch, nums = dirname.split("_")
assert epoch in ("epoch", )
self.logger.info('load checkpoint from {}'.format(path2checkpoint))
# 遍历model中的所有配置optimizer的model,并进行load
res = dict()
res['nums'] = int(nums)
for submodule_name in self.optimizers_cfg.keys():
submodule = getattr(self.model, submodule_name, None)
assert submodule is not None, "model should have submodule {}".format(submodule_name)
assert isinstance(submodule, Module), "submodule should be instance of mge.module.Module"
if dist.get_rank() == 0:
module_state_dict = mge.load(osp.join(path2checkpoint, submodule_name + module_ckpt_suffix))
submodule.load_state_dict(module_state_dict, strict = False)
if load_optim:
optim_state_dict = mge.load(osp.join(path2checkpoint, submodule_name + optim_ckpt_suffix))
res[submodule_name] = optim_state_dict
return res
def register_momentum_hook(self, momentum_config):
if momentum_config is None:
return
if isinstance(momentum_config, dict):
assert 'policy' in momentum_config
policy_type = momentum_config.pop('policy')
# If the type of policy is all in lower case, e.g., 'cyclic',
# then its first letter will be capitalized, e.g., to be 'Cyclic'.
# This is for the convenient usage of momentum updater.
# Since this is not applicable for `CosineAnealingMomentumUpdater`,
# the string will not be changed if it contains capital letters.
if policy_type == policy_type.lower():
policy_type = policy_type.title()
hook_type = policy_type + 'MomentumUpdaterHook'
momentum_config['type'] = hook_type
hook = build_from_cfg(momentum_config, HOOKS)
else:
hook = momentum_config
self.register_hook(hook)
def register_optimizer_hook(self, optimizer_config):
if optimizer_config is None:
return
if isinstance(optimizer_config, dict):
optimizer_config.setdefault('type', 'OptimizerHook')
hook = build_from_cfg(optimizer_config, HOOKS)
else:
hook = optimizer_config
self.register_hook(hook)
def register_lr_hook(self, lr_config):
if isinstance(lr_config, dict):
assert 'policy' in lr_config
policy_type = lr_config.pop('policy')
# If the type of policy is all in lower case, e.g., 'cyclic',
# then its first letter will be capitalized, e.g., to be 'Cyclic'.
# This is for the convenient usage of Lr updater.
# Since this is not applicable for `CosineAnealingLrUpdater`,
# the string will not be changed if it contains capital letters.
if policy_type == policy_type.lower():
policy_type = policy_type.title()
hook_type = policy_type + 'LrUpdaterHook'
lr_config['type'] = hook_type
hook = build_from_cfg(lr_config, HOOKS)
else:
hook = lr_config
self.register_hook(hook)
def register_checkpoint_hook(self, checkpoint_config):
if isinstance(checkpoint_config, dict):
checkpoint_config.setdefault('type', 'CheckpointHook')
hook = build_from_cfg(checkpoint_config, HOOKS)
else:
hook = checkpoint_config
self.register_hook(hook)
def register_logger_hooks(self, log_config):
log_interval = log_config['interval']
for info in log_config['hooks']:
logger_hook = build_from_cfg(info, HOOKS, default_args=dict(interval=log_interval))
self.register_hook(logger_hook, priority='HIGH')
|
421607
|
from entityservice.cache.active_runs import is_run_active
from entityservice.database import DBConn, check_project_exists, check_run_exists
from entityservice.errors import DBResourceMissing, InactiveRun
def assert_valid_run(project_id, run_id, log):
if not is_run_active(run_id):
raise InactiveRun("Run isn't marked as active")
with DBConn() as db:
if not check_project_exists(db, project_id) or not check_run_exists(db, project_id, run_id):
log.info("Project or run not found in database.")
raise DBResourceMissing("project or run not found in database")
|
421666
|
from functools import wraps
from time import time
def timing(f):
@wraps(f)
def wrapper(*args, **kwargs):
start = time()
result = f(*args, **kwargs)
end = time()
print(f'Elapsed time: {(end - start):.3f}s')
return result
return wrapper
|
421668
|
import mock
import unittest2
import urllib
import urllib2
from mlabns.util import constants
from mlabns.util import message
from mlabns.util import prometheus_status
class ParseSliverToolStatusTest(unittest2.TestCase):
def test_parse_sliver_tool_status_returns_successfully_parsed_tuple(self):
status = {
"metric": {
"experiment": "ndt.iupui",
"machine": "mlab1-abc01.mlab-oti.measurement-lab.org"
},
"value": [1522782427.81, "1"]
}
expected_parsed_status = (
'ndt-iupui-mlab1-abc01.mlab-oti.measurement-lab.org', '1',
constants.PROMETHEUS_TOOL_EXTRA)
actual_parsed_status = prometheus_status.parse_sliver_tool_status(
status)
self.assertTupleEqual(expected_parsed_status, actual_parsed_status)
def test_parse_sliver_tool_status_raises_PrometheusStatusUnparseableError_because_of_illformatted_status(
self):
status = 'mock status'
with self.assertRaises(
prometheus_status.PrometheusStatusUnparseableError):
prometheus_status.parse_sliver_tool_status(status)
class GetSliceInfoTest(unittest2.TestCase):
def setUp(self):
self.prometheus_base_url = 'https://prom.mock.mlab.net/api/?query='
def test_get_slice_info_returns_none_with_nonexistent_tool(self):
retrieved = prometheus_status.get_slice_info(self.prometheus_base_url,
'nonexistent_tool', '')
self.assertIsNone(retrieved)
def test_get_slice_info_returns_valid_objects_when_tools_stored(self):
ndt_url_ipv4 = self.prometheus_base_url + urllib.quote_plus(
prometheus_status.QUERIES['ndt'])
ndt_url_ipv6 = self.prometheus_base_url + urllib.quote_plus(
prometheus_status.QUERIES['ndt_ipv6'])
neubot_url_ipv4 = self.prometheus_base_url + urllib.quote_plus(
prometheus_status.QUERIES['neubot'])
neubot_url_ipv6 = self.prometheus_base_url + urllib.quote_plus(
prometheus_status.QUERIES['neubot_ipv6'])
expected_slice_data = {
'ndt': {
'info':
prometheus_status.PrometheusSliceInfo(ndt_url_ipv4, 'ndt', ''),
'info_ipv6': prometheus_status.PrometheusSliceInfo(
ndt_url_ipv6, 'ndt', '_ipv6'),
},
'neubot': {
'info': prometheus_status.PrometheusSliceInfo(neubot_url_ipv4,
'neubot', ''),
'info_ipv6': prometheus_status.PrometheusSliceInfo(
neubot_url_ipv6, 'neubot', '_ipv6'),
}
}
retrieved = prometheus_status.get_slice_info(self.prometheus_base_url,
'ndt', '')
self.assertEqual(expected_slice_data['ndt']['info'], retrieved)
retrieved = prometheus_status.get_slice_info(self.prometheus_base_url,
'ndt', '_ipv6')
self.assertEqual(expected_slice_data['ndt']['info_ipv6'], retrieved)
retrieved = prometheus_status.get_slice_info(self.prometheus_base_url,
'neubot', '')
self.assertEqual(expected_slice_data['neubot']['info'], retrieved)
retrieved = prometheus_status.get_slice_info(self.prometheus_base_url,
'neubot', '_ipv6')
self.assertEqual(expected_slice_data['neubot']['info_ipv6'], retrieved)
class StatusUpdateHandlerTest(unittest2.TestCase):
def setUp(self):
self.mock_response = mock.Mock()
self.mock_response.msg = 'mock message'
self.mock_response.code = '200'
@mock.patch.object(urllib2.OpenerDirector, 'open', autospec=True)
def test_get_slice_status_returns_none_with_invalid_json(self, mock_open):
self.mock_response.read.return_value = '{lol, not valid json'
mock_open.return_value = self.mock_response
result = prometheus_status.get_slice_status(
'https://prometheus.measurementlab.mock.net',
urllib2.OpenerDirector())
self.assertIsNone(result)
@mock.patch.object(urllib2.OpenerDirector, 'open', autospec=True)
@mock.patch.object(prometheus_status, 'parse_sliver_tool_status')
def test_get_slice_status_returns_populated_dictionary_when_it_gets_valid_statuses(
self, mock_parse_sliver_tool_status, mock_open):
self.mock_response.read.return_value = """
{
"status": "success",
"data": {
"resultType": "vector",
"result": [
{ "metric": {
"experiment": "mock",
"machine": "mlab1-xyz01.mlab-oti.measurement-lab.org" },
"value": [1522782427.81, "1"]
},
{ "metric": {
"experiment": "mock",
"machine": "mlab2-xyz01.mlab-oti.measurement-lab.org" },
"value": [1522773427.51, "0"]
}
]
}
}"""
mock_open.return_value = self.mock_response
mock_parse_sliver_tool_status.side_effect = [
('mock-mlab1-xyz01.mlab-oti.measurement-lab.org', '1',
constants.PROMETHEUS_TOOL_EXTRA),
('mock-mlab2-xyz01.mlab-oti.measurement-lab.org', '0',
constants.PROMETHEUS_TOOL_EXTRA)
]
expected_status = {
'mock-mlab1-xyz01.mlab-oti.measurement-lab.org': {
'status': message.STATUS_ONLINE,
'tool_extra': constants.PROMETHEUS_TOOL_EXTRA
},
'mock-mlab2-xyz01.mlab-oti.measurement-lab.org': {
'status': message.STATUS_OFFLINE,
'tool_extra': constants.PROMETHEUS_TOOL_EXTRA
}
}
actual_status = prometheus_status.get_slice_status(
'https://prometheus.measurementlab.mock.net',
urllib2.OpenerDirector())
self.assertDictEqual(actual_status, expected_status)
@mock.patch.object(urllib2.OpenerDirector, 'open', autospec=True)
def test_get_slice_status_returns_none_when_a_HTTPError_is_raised_by_urlopen(
self, mock_open):
# urllib2.HTTPError() requires 6 arguments. Subclassing to override
# __init__ makes instantiating this easier.
class MockHttpError(urllib2.HTTPError):
def __init__(self, cause):
self.cause = cause
self.mock_response.read.side_effect = MockHttpError('mock http error')
mock_open.return_value = self.mock_response
self.assertIsNone(prometheus_status.get_slice_status(
'https://prometheus.measurementlab.mock.net',
urllib2.OpenerDirector()))
if __name__ == '__main__':
unittest2.main()
|
421730
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.forms import UserCreationForm
from django.utils.translation import gettext_lazy as _
from {{cookiecutter.project_slug}}.accounts.models import User
class CustomUserCreationForm(UserCreationForm):
"""
A form that creates a user, with no password, from the given email.
"""
class Meta:
model = User
fields = ["email", "first_name", "last_name"]
@admin.register(User)
class CustomUserAdmin(UserAdmin):
add_form = CustomUserCreationForm
fieldsets = [
(None, {"fields": ["id", "email", "password"]}),
(_("Personal Info"), {"fields": ["first_name", "last_name"]}),
(
_("Permissions"),
{
"fields": [
"is_active",
"is_verified",
"is_staff",
"is_superuser",
"groups",
"user_permissions",
]
},
),
(_("Important Dates"), {"fields": ["last_login", "date_joined"]}),
]
add_fieldsets = [
(
None,
{
"classes": ("wide",),
"fields": [
"email",
"first_name",
"last_name",
"<PASSWORD>",
"<PASSWORD>",
],
},
)
]
readonly_fields = ["id", "last_login", "date_joined"]
list_display = ["full_name", "email", "date_joined"]
list_filter = ["is_staff", "is_superuser", "is_active", "date_joined", "groups"]
search_fields = ["id", "first_name", "last_name", "email"]
ordering = ["-date_joined"]
filter_horizontal = ["groups", "user_permissions"]
def full_name(self, obj: User):
return obj.get_full_name()
|
421735
|
import logging
logging.basicConfig(level=logging.DEBUG)
# ---------------------
# Flask App
# ---------------------
import os
# pip install flask
from flask import Flask, make_response, request
app = Flask(__name__)
logger = logging.getLogger(__name__)
@app.route("/slack/oauth/callback", methods=["GET"])
def endpoint():
code = request.args.get("code")
from slack_sdk.web import WebClient
from slack_sdk.errors import SlackApiError
try:
client = WebClient(token="")
client_id = os.environ["SLACK_CLIENT_ID"]
client_secret = os.environ["SLACK_CLIENT_SECRET"]
response = client.oauth_v2_access(
client_id=client_id, client_secret=client_secret, code=code
)
result = response.get("error", "success!")
return str(result)
except SlackApiError as e:
return make_response(str(e), 400)
if __name__ == "__main__":
# export SLACK_CLIENT_ID=111.222
# export SLACK_CLIENT_SECRET=
# FLASK_ENV=development python integration_tests/samples/issues/issue_690.py
app.run(debug=True, host="localhost", port=3000)
|
421751
|
import json
import os
import sys
from datetime import datetime
import csv
import pycountry
# Layer code, like parsing_lib, is added to the path by AWS.
# To test locally (e.g. via pytest), we have to modify sys.path.
# pylint: disable=import-error
try:
import parsing_lib
except ImportError:
sys.path.append(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
os.pardir,os.pardir, 'common'))
import parsing_lib
_UUID = "case_id"
_AGE = "age"
_GENDER = "gender"
_PROVINCE = "province/state"
_COUNTRY = "country"
_DATE_CONFIRMED = "date"
_SOURCE = "source"
_TRAVEL = "travel_history_location"
_PROVINCE_MAP = {
"KZN": "KwaZulu-Natal",
"GP": "Gauteng",
"WC": "Western Cape",
"MP": "Mpumalanga",
"LP": "Limpopo",
"FS": "Free State",
"EC": "Eastern Cape",
"NC": "Northern Cape",
"NW": "North West"
}
def convert_date(raw_date):
"""
Convert raw date field into a value interpretable by the dataserver.
"""
try:
date = datetime.strptime(raw_date, "%Y-%m-%d")
return date.strftime("%m/%d/%YZ")
except:
return None
def convert_gender(raw_gender: str):
if raw_gender == "male":
return "Male"
if raw_gender == "female":
return "Female"
def convert_events(date_confirmed):
events = [
{
"name": "confirmed",
"dateRange": {
"start": convert_date(date_confirmed),
"end": convert_date(date_confirmed)
}
}
]
return events
def convert_location(province: str):
if province:
# UNK is not a recognized South African province, probably means 'unknown'
if province != "UNK":
return ", ".join([_PROVINCE_MAP[province], "South Africa"])
else:
return "South Africa"
else:
return "South Africa"
def convert_demographics(gender: str, age: str):
demo = {}
if gender:
demo["gender"] = convert_gender(gender)
if age:
demo["ageRange"] = {"start": float(age), "end": float(age)}
return demo
def convert_additional_sources(additional_source_url: str):
if additional_source_url:
return [{"sourceUrl": additional_source_url}]
def convert_travel(travel_history: str):
# United States entered as US, USA and Unites States of America
# United Kingdom entered as UK and United Kingdom
travel = []
# UK is not an entry in the pycountry dict so has to be dealt with separately
if "UK" in travel_history:
travel_history = travel_history.replace("UK", "United Kingdom")
for country in list(pycountry.countries):
if country.name in travel_history or country.alpha_3 in travel_history:
# Otherwise this returns Republic of the Congo which is the wrong country
if country.name == "Congo":
travel.append({"location": {"query": "Congo, The Democratic Republic of the"}})
else:
travel.append({"location": {"query": country.name}})
if "Dubai" in travel_history:
travel.append({"location": {"query": "Dubai, United Arab Emirates"}})
if travel:
return {"traveledPrior30Days": True,
"travel": travel}
def parse_cases(raw_data_file: str, source_id: str, source_url: str):
"""
Parses G.h-format case data from raw API data.
Please note that this data was last updated in May 2020.
This parser only deals with the columns where there was any data at the time of writing.
Several columns with potentially useful information (e.g. date_onset_symptoms) are unpopulated for all cases.
Would be worth keeping an eye on the data to see whether (a) it starts getting updated again and (b) whether this will lead to any new information provided at which point the parser will need to be expanded to deal with this.
"""
with open(raw_data_file, "r") as f:
reader = csv.DictReader(f, delimiter=",")
for row in reader:
if row[_COUNTRY] == "South Africa":
try:
case = {
"caseReference": {
"sourceId": source_id,
"sourceEntryId": row[_UUID],
"sourceUrl": source_url,
"additionalSources": convert_additional_sources(row[_SOURCE])
},
"location": {
"query": convert_location(row[_PROVINCE])
},
"demographics": convert_demographics(
row[_GENDER], row[_AGE]
),
"events": convert_events(
row[_DATE_CONFIRMED]
),
"travelHistory": convert_travel(row[_TRAVEL])
}
yield case
except ValueError as ve:
raise ValueError(f"error converting case: {ve}")
def event_handler(event):
return parsing_lib.run(event, parse_cases)
if __name__ == "__main__":
with open('input_event.json') as f:
event = json.load(f)
event_handler(event)
|
421782
|
from math import exp
def mul(l1, l2):
return round(sum(a*b for a,b in zip(l1, l2)), 3)
def sgn(x):
return 1 if x > 0 else -1
def imul(x, a):
return [round(a*xi,3) for xi in x]
def add(l1, l2):
return [round(a+b,3) for a,b in zip(l1, l2)]
def func(net):
# bipolar continuous
return 2 / (1 + exp(-net)) - 1
def funcdash(o):
return (1 - o*o)/2
if __name__ == '__main__':
c = 0.1
n = int(input('Enter no of input:'))
xn, dn = [], []
for i in range(n):
xi = list(map(float, input(f'Enter x{i}: ').strip().split(' ')))
di = int(input('Enter desired output:'))
xn.append(xi); dn.append(di)
w = list(map(float, input('Enter initial weights:').split(' ')))
for xi, di in zip(xn, dn):
# print(f'Input: {xi}')
net = mul(w, xi)
# print(f'Expected output: {di}, Actual output: {net}')
oi = round(func(net), 3)
fnetdash = round(funcdash(oi), 3)
print(f'oi = {oi}, fnetdash = {fnetdash}')
xi = imul(xi, c * (di - oi) * fnetdash)
w = add(w, xi)
print(f'Updated weight: {w}')
|
421791
|
import operator
from typing import Dict, List
from confluent_kafka.admin import ConfigResource
from esque.resources.resource import KafkaResource
class Broker(KafkaResource):
def __init__(self, cluster, *, broker_id: int = None, host: str = None, port: int = None):
self.cluster = cluster
self.broker_id = broker_id
self.host = host
self.port = port
@classmethod
def from_id(cls, cluster, broker_id) -> "Broker":
return cls(cluster=cluster, broker_id=broker_id)
@classmethod
def from_host(cls, cluster, host: str) -> "Broker":
brokers = [broker for broker in cluster.brokers if broker["host"] == host]
if len(brokers) > 1:
raise ValueError(
f"Broker host name {host} is not unique! Please provide with port number i.e. {host}:port."
)
elif len(brokers) == 0:
raise ValueError(f"There is no broker with {host} as host name!")
else:
broker = brokers[0]
return cls(cluster, broker_id=broker["id"], host=host, port=broker["port"])
@classmethod
def from_host_and_port(cls, cluster, host: str, port: int) -> "Broker":
brokers = cluster.brokers
for broker in brokers:
if broker["host"] == host and broker["port"] == port:
return cls(cluster, broker_id=broker["id"], host=host, port=port)
@classmethod
def from_attributes(cls, cluster, broker_id: int, host: str, port: int) -> "Broker":
return cls(cluster, broker_id=broker_id, host=host, port=port)
@classmethod
def get_all(cls, cluster) -> List["Broker"]:
metadata = cluster.get_metadata().brokers.values()
brokers = [
cls.from_attributes(cluster, broker_id=broker.id, host=broker.host, port=broker.port)
for broker in metadata
]
return sorted(brokers, key=operator.attrgetter("broker_id"))
def describe(self) -> Dict:
return self.cluster.retrieve_config(ConfigResource.Type.BROKER, self.broker_id)
def as_dict(self):
return {"cluster": self.cluster, "broker_id": self.broker_id, "host": self.host, "port": self.port}
|
421797
|
import numpy as np
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
import gpflow as gp
def _sample_inducing_tensors(sequences, num_inducing, num_levels, increments):
Z = []
sequences_select = sequences[np.random.choice(sequences.shape[0], size=(num_inducing), replace=True)]
for m in range(1, num_levels+1):
if increments:
obs_idx = [np.random.choice(sequences_select.shape[1]-1, size=(1, m, 1), replace=False) for i in range(num_inducing)]
obs_idx = np.sort(np.concatenate(obs_idx, axis=0), axis=1)
obs1_select = np.take_along_axis(sequences_select, obs_idx, axis=1)
obs2_select = np.take_along_axis(sequences_select, obs_idx + 1, axis=1)
increments_select = np.concatenate((obs1_select[:, :, None, :], obs2_select[:, :, None, :]), axis=2)
Z.append(increments_select)
else:
obs_idx = [np.random.choice(sequences_select.shape[1], size=(1, m, 1), replace=False) for i in range(num_inducing)]
obs_idx = np.sort(np.concatenate(obs_idx, axis=0), axis=1)
obs_select = np.take_along_axis(sequences_select, obs_idx, axis=1)
Z.append(obs_select)
Z = np.concatenate(Z, axis=1)
return Z
def suggest_initial_inducing_tensors(sequences, num_levels, num_inducing, labels=None, increments=False, num_lags=None):
Z = []
len_inducing = int(num_levels * (num_levels+1) / 2)
if labels is not None:
num_classes = np.unique(labels).size
bincount = np.bincount(labels)
# sample from class specific inducing examples
for c, n_c in enumerate(bincount):
num_inducing_per_class = int(np.floor(float(n_c) / sequences.shape[0] * num_inducing))
sequences_class = sequences[labels == c]
Z.append(_sample_inducing_tensors(sequences_class, num_inducing_per_class, num_levels, increments))
num_diff = num_inducing - np.sum([z.shape[0] for z in Z])
else:
num_diff = num_inducing
if num_diff > 0:
Z.append(_sample_inducing_tensors(sequences, num_diff, num_levels, increments))
Z = np.concatenate(Z, axis=0)
Z = np.squeeze(Z.reshape([Z.shape[0], len_inducing, -1, Z.shape[-1]]).transpose([1, 0, 2, 3]))
if num_lags is not None and num_lags > 0:
if increments:
Z = np.tile(Z[:, :, :, None, :], (1, 1, 1, num_lags+1, 1)).reshape([Z.shape[0], Z.shape[1], 2, -1])
else:
Z = np.tile(Z[:, :, :, None, :], (1, 1, num_lags+1, 1)).reshape([Z.shape[0], Z.shape[1], -1])
Z += 0.4 * np.random.randn(*Z.shape)
return Z
def _sample_inducing_sequences(sequences, num_inducing, len_inducing):
Z = []
sequences_select = sequences[np.random.choice(sequences.shape[0], size=(num_inducing), replace=True)]
nans_start = np.argmax(np.any(np.isnan(sequences_select), axis=2), axis=1)
nans_start[nans_start == 0] = sequences.shape[1]
last_obs_idx = np.concatenate([np.random.choice(range(len_inducing-1, nans_start[i]), size=(1)) for i in range(num_inducing)], axis=0)
obs_idx = np.stack([last_obs_idx - len_inducing + 1 + i for i in range(len_inducing)], axis=1)[..., None]
Z = np.take_along_axis(sequences_select, obs_idx, axis=1)
return Z
def suggest_initial_inducing_sequences(sequences, num_inducing, len_inducing, labels=None):
Z = []
if labels is not None:
num_classes = np.unique(labels).size
bincount = np.bincount(labels)
# sample class specific inducing examples
for c, n_c in enumerate(bincount):
num_inducing_per_class = int(np.floor(float(n_c) / sequences.shape[0] * num_inducing))
sequences_class = sequences[labels == c]
Z.append(_sample_inducing_sequences(sequences_class, num_inducing_per_class, len_inducing))
num_diff = num_inducing - np.sum([z.shape[0] for z in Z])
else:
num_diff = num_inducing
if num_diff > 0:
Z.append(_sample_inducing_sequences(sequences, num_diff, len_inducing))
Z = np.concatenate(Z, axis=0)
Z += 0.4 * np.random.randn(*Z.shape)
return Z
def suggest_initial_lengthscales(X, num_samples=None):
X = X.reshape([-1, X.shape[-1]])
X = X[np.logical_not(np.any(np.isnan(X), axis=1))]
if num_samples is not None and num_samples < X.shape[0]:
X = X[np.random.choice(X.shape[0], size=(num_samples), replace=False)]
X = tf.convert_to_tensor(X, gp.settings.float_type)
l_init = tf.sqrt(tf.reduce_mean(tf.reshape(tf.square(X)[:, None, :] + tf.square(X)[None, :, :] - 2 * X[:, None, :] * X[None, :, :], [-1, tf.shape(X)[1]]), axis=0)
* tf.cast(X.shape[1], gp.settings.float_type))
with tf.Session() as sess:
l_init = sess.run(l_init)
return np.maximum(l_init, 1.)
|
421809
|
import unittest
from mock import Mock, patch
import torch.nn as nn
import torch.nn.functional as F
import torchbearer
from torchbearer.metrics import DefaultAccuracy
class TestDefaultAccuracy(unittest.TestCase):
def test_defaults(self):
state = {torchbearer.CRITERION: 'not a criterion'}
metric = DefaultAccuracy()
metric.reset(state)
self.assertEqual(metric.name, 'acc')
state = {torchbearer.CRITERION: nn.CrossEntropyLoss()}
metric = DefaultAccuracy()
metric.reset(state)
self.assertEqual(metric.name, 'acc')
state = {torchbearer.CRITERION: nn.NLLLoss()}
metric = DefaultAccuracy()
metric.reset(state)
self.assertEqual(metric.name, 'acc')
state = {torchbearer.CRITERION: F.cross_entropy}
metric = DefaultAccuracy()
metric.reset(state)
self.assertEqual(metric.name, 'acc')
state = {torchbearer.CRITERION: F.nll_loss}
metric = DefaultAccuracy()
metric.reset(state)
self.assertEqual(metric.name, 'acc')
state = {torchbearer.CRITERION: nn.MSELoss()}
metric = DefaultAccuracy()
metric.reset(state)
self.assertEqual(metric.name, 'mse')
state = {torchbearer.CRITERION: F.mse_loss}
metric = DefaultAccuracy()
metric.reset(state)
self.assertEqual(metric.name, 'mse')
state = {torchbearer.CRITERION: nn.BCELoss()}
metric = DefaultAccuracy()
metric.reset(state)
self.assertEqual(metric.name, 'binary_acc')
state = {torchbearer.CRITERION: nn.BCEWithLogitsLoss()}
metric = DefaultAccuracy()
metric.reset(state)
self.assertEqual(metric.name, 'binary_acc')
state = {torchbearer.CRITERION: F.binary_cross_entropy}
metric = DefaultAccuracy()
metric.reset(state)
self.assertEqual(metric.name, 'binary_acc')
state = {torchbearer.CRITERION: F.binary_cross_entropy_with_logits}
metric = DefaultAccuracy()
metric.reset(state)
self.assertEqual(metric.name, 'binary_acc')
@patch('torchbearer.metrics.default.CategoricalAccuracy')
def test_pass_through(self, cat_acc):
mock = Mock()
cat_acc.return_value = mock
mock.reset = Mock()
mock.process = Mock()
mock.process_final = Mock()
mock.eval = Mock()
mock.train = Mock()
metric = DefaultAccuracy()
metric.reset({torchbearer.CRITERION: None})
metric.process(1, 2, 3)
metric.process_final(4, 5, 6)
metric.eval()
metric.train()
self.assertEqual(cat_acc.call_count, 1)
mock.reset.assert_called_once_with({torchbearer.CRITERION: None})
mock.process.assert_called_once_with(1, 2, 3)
mock.process_final.assert_called_once_with(4, 5, 6)
self.assertEqual(mock.eval.call_count, 1)
self.assertEqual(mock.train.call_count, 1)
@patch('torchbearer.metrics.default.CategoricalAccuracy')
def test_reset_after_eval(self, cat_acc):
metric = DefaultAccuracy()
self.assertTrue(cat_acc.call_count == 1)
cat_acc.reset_mock()
metric.eval()
state = {torchbearer.CRITERION: F.cross_entropy, torchbearer.DATA: 'test'}
mock = Mock()
mock.eval = Mock()
torchbearer.metrics.default.__loss_map__[F.cross_entropy.__name__] = lambda: mock
metric.reset(state)
mock.eval.assert_called_once_with(data_key='test')
self.assertTrue(mock.eval.call_count == 1)
|
421826
|
import numpy as np
from gradient_textures import get_radial_img_data
from gradient_textures import get_gradient_img_data
class FGTextureType:
PlainBin = "plainbin"
PlainGray = "plaingray"
GradientRadial = "gradientradial"
GradientLinear = "gradientlinear"
class Foreground(object):
def __init__(self,
patch=None,
texture=None,
size=(0, 0),
texture_type=FGTextureType.PlainBin,
corruptor = None):
self.size = size
self.texture_type = texture_type
self.corruptor = corruptor
if texture is not None:
self.texture = texture
else:
self.texture = self.generate_texture()
def generate_texture(self):
text = np.zeros((self.size[0], self.size[1]))
if self.texture_type == FGTextureType.PlainBin:
for i in xrange(self.size[0]):
for j in xrange(self.size[1]):
text[i][j] = 1
elif self.texture_type == FGTextureType.PlainGray:
for i in xrange(self.size[0]):
for j in xrange(self.size[1]):
text[i][j] = 255
elif self.texture_type == FGTextureType.GradientRadial:
text = get_radial_img_data(self.size[0], self.size[1])
elif self.texture_type == FGTextureType.GradientLinear:
text = get_gradient_img_data(self.size[0], self.size[1])
if self.corruptor is not None:
text = self.corruptor(text)
return text
|
421831
|
from django.db.models.manager import Manager
class PassThroughManager(Manager):
'''
Inherit from this Manager to enable you to call any methods from your
custom QuerySet class from your manager. Simply define your QuerySet
class, and return an instance of it from your manager's `get_query_set`
method.
Alternately, if you don't need any extra methods on your manager that
aren't on your QuerySet, then just pass your QuerySet class to this
class' constructer.
class PostQuerySet(QuerySet):
def enabled(self):
return self.filter(disabled=False)
class Post(models.Model):
objects = PassThroughManager(PostQuerySet)
'''
# pickling causes recursion errors
_deny_methods = ['__getstate__', '__setstate__']
def __init__(self, queryset_cls=None):
self._queryset_cls = queryset_cls
super(PassThroughManager, self).__init__()
def __getattr__(self, name):
if name in self._deny_methods:
raise AttributeError(name)
return getattr(self.get_query_set(), name)
def get_query_set(self):
if self._queryset_cls is not None:
return self._queryset_cls(self.model, using=self._db)
return super(PassThroughManager, self).get_query_set()
|
421854
|
from django.utils.functional import cached_property
from waldur_core.structure.tests.fixtures import ProjectFixture
from . import factories
class AzureFixture(ProjectFixture):
@cached_property
def settings(self):
return factories.AzureServiceSettingsFactory(customer=self.customer)
@cached_property
def location(self):
return factories.LocationFactory(settings=self.settings)
@cached_property
def image(self):
return factories.ImageFactory(settings=self.settings, location=self.location)
@cached_property
def size(self):
return factories.SizeFactory(settings=self.settings)
@cached_property
def resource_group(self):
return factories.ResourceGroupFactory(
location=self.location,
service_settings=self.settings,
project=self.project,
)
@cached_property
def network(self):
return factories.NetworkFactory(
resource_group=self.resource_group,
service_settings=self.settings,
project=self.project,
)
@cached_property
def subnet(self):
return factories.SubNetFactory(
resource_group=self.resource_group,
service_settings=self.settings,
project=self.project,
network=self.network,
)
@cached_property
def network_interface(self):
return factories.NetworkInterfaceFactory(
resource_group=self.resource_group,
service_settings=self.settings,
project=self.project,
subnet=self.subnet,
)
@cached_property
def public_ip(self):
return factories.PublicIPFactory(
resource_group=self.resource_group,
service_settings=self.settings,
project=self.project,
location=self.location,
)
@cached_property
def virtual_machine(self):
return factories.VirtualMachineFactory(
service_settings=self.settings,
project=self.project,
resource_group=self.resource_group,
image=self.image,
size=self.size,
network_interface=self.network_interface,
)
@cached_property
def sql_server(self):
return factories.SQLServerFactory(
service_settings=self.settings,
project=self.project,
resource_group=self.resource_group,
)
|
421895
|
import os
from jinja2 import Environment, PackageLoader
from . import helpers
class Ansidoc():
"""Main ansidoc Object."""
def __init__(self, **kwargs):
"""initiate object with provided options."""
self.verbose = kwargs.get('verbose')
self.dry_run = kwargs.get('dry_run')
self.target = kwargs.get('target')
self.dirpath = os.path.expanduser(kwargs.get('dirpath'))
self.opts = kwargs
def _make_role_symlink(self, rolepath):
"""
Add symlink for each roles.
since '../' cannot be use in a sphinx toctree, this will for a given
role create a symlink from the <playbook_dir>/docs to
<playbook_dir>/roles/rolename/docs/index.
"""
# symlink src
symlink_target = os.path.abspath(os.path.join(rolepath, self.target))
# symlink dst
symlink_name = "role-" + os.path.basename(rolepath) + "-" + self.target
if self.verbose:
print("Generating symlink to role '%s' ..." % symlink_target)
if not os.path.islink(symlink_name):
os.symlink(symlink_target, symlink_name)
def _make_role_doc(self, rolepath):
"""
Generate documentation for a single role.
Informations are picked in defaults/*, vars/* meta/main.yml and
docs/*.yml.
"""
rolename = os.path.basename(os.path.abspath(rolepath))
if self.verbose:
print("Generating doc for role '%s'..." % rolename)
print("Current rolepath is: '%s'" % rolepath)
# create symlink if needed
if self.target:
self._make_role_symlink(rolepath)
# load role meta/main.yml
meta_vars = helpers.load_yml_file(
os.path.join(rolepath, "meta/main.yml"), self.verbose)
# load role docs/*.yml
docs_vars = helpers.load_yml_files(
os.path.join(rolepath, "docs"), self.verbose)
# load literaly role docs/*.md
docs_md_files = helpers.read_files(
os.path.join(rolepath, "docs"), '*.md', self.verbose)
# load literaly role vars/*.yml
vars_files = helpers.read_files(
os.path.join(rolepath, "vars"), '*.yml', self.verbose)
# load literaly role defaults/*.yml
defaults_files = helpers.read_files(
os.path.join(rolepath, "defaults"), '*.yml', self.verbose)
# load template and create templating environment
env = Environment(loader=PackageLoader('ansidoc', 'templates'),
lstrip_blocks=True, trim_blocks=True)
# render readme
template = env.get_template('readme.j2')
# render method accepts the same arguments as the dict constructor
t = template.render(self.opts,
rolename=rolename,
role_meta_vars=meta_vars,
role_docs_vars=docs_vars,
role_docs_md_files=docs_md_files,
role_vars_files=vars_files,
role_defaults_files=defaults_files
)
if self.verbose or self.dry_run:
print(t)
# create readme file in rolepath/README.md
if not self.dry_run:
helpers.write_file(t, os.path.join(rolepath, "README.md"))
if self.verbose:
print("Role '%s' ...done\n" % rolename)
def run(self):
"""
Runner.
Wrap the make_role_doc method to loop or not depending on
the dirpath basename. See dirpath positional argument help for more
details.
"""
# loop over multiple roles
if os.path.basename(self.dirpath) == 'roles':
exclude_list = self.opts.get('exclude', [])
if len(exclude_list) > 0:
exclude_list = exclude_list.split(',')
for role in os.listdir(self.dirpath):
if role not in exclude_list:
self._make_role_doc(os.path.join(self.dirpath, role))
else:
if self.verbose:
print("Skipping role: '%s'" % role)
# run on a single role
else:
self._make_role_doc(self.dirpath)
|
421941
|
from omnihash.omnihash import main
import os
import sys
import unittest
import click
from click.testing import CliRunner
def safe_str(obj):
try:
s = str(obj)
except Exception as ex:
s = ex
return s
class TOmnihash(unittest.TestCase):
# Sanity
def test_hello_world(self):
@click.command()
@click.argument('name')
def hello(name):
click.echo('Hello %s!' % name)
runner = CliRunner()
result = runner.invoke(hello, ['Peter'], catch_exceptions=False)
self.assertEqual(result.exit_code, 0)
self.assertEqual(result.output, 'Hello Peter!\n')
# Main
def test_empty(self):
runner = CliRunner()
result = runner.invoke(main, catch_exceptions=False)
print(result.output)
self.assertEqual(result.exit_code, 0)
def test_omnihash(self):
runner = CliRunner()
result = runner.invoke(main, ['hashme'], catch_exceptions=False)
print(result.output)
self.assertEqual(result.exit_code, 0)
self.assertIn('fb78992e561929a6967d5328f49413fa99048d06', result.output)
def test_omnihash2(self):
runner = CliRunner()
result = runner.invoke(main, ['hashme', 'asdf'], catch_exceptions=False)
self.assertEqual(result.exit_code, 0)
self.assertIn('fb78992e561929a6967d5328f49413fa99048d06', result.output)
def test_omnihashfile(self):
runner = CliRunner()
result = runner.invoke(main, ['hashme', 'LICENSE'], catch_exceptions=False)
self.assertEqual(result.exit_code, 0)
#print(result.output)
self.assertIn('941c986ff0f3e90543dc5e2a0687ee99b19bff67', result.output)
def test_omnihashfile_conjecutive(self):
import re
runner = CliRunner()
result = runner.invoke(main, 'LICENSE LICENSE -f sha1'.split(), catch_exceptions=False)
self.assertEqual(result.exit_code, 0)
#print(result.output)
matches = re.findall('941c986ff0f3e90543dc5e2a0687ee99b19bff67', result.output)
self.assertEqual(len(matches), 4)
@unittest.skipIf(sys.version_info[0] < 3, "unittest has no `assertRegex()`.")
def test_omnihashfile_length(self):
runner = CliRunner()
fpath = 'LICENSE'
text = 'hashme'
result = runner.invoke(main, [text, fpath], catch_exceptions=False)
self.assertEqual(result.exit_code, 0)
self.assertRegex(result.output, r'LENGTH: +%i\D' % len(text))
filelen = os.stat(fpath).st_size
self.assertRegex(result.output, r'LENGTH: +%i\D' % filelen)
@unittest.skipIf(sys.version_info[0] < 3, "unittest has no `assertRegex()`.")
def test_omnihashfile_length_zero(self):
runner = CliRunner()
result = runner.invoke(main, [''], catch_exceptions=False)
self.assertEqual(result.exit_code, 0)
self.assertRegex(result.output, r'LENGTH: +0\D')
def test_omnihashf(self):
runner = CliRunner()
result = runner.invoke(main, 'Hi -f sha2 -f SHA5'.split(), catch_exceptions=False)
self.assertEqual(result.exit_code, 0)
out = """
SHA224: 7d5104ff2cee331a4586337ea64ab6a188e2b26aecae87227105dae1
SHA256: 3639efcd08abb273b1619e82e78c29a7df02c1051b1820e99fc395dcaa3326b8
SHA512: 45ca55ccaa72b98b86c697fdf73fd364d4815a586f76cd326f1785bb816ff7f1f88b46fb8448b19356ee\
788eb7d300b9392709a289428070b5810d9b5c2d440d
"""
assert result.output.endswith(out)
result = runner.invoke(main, 'Hi -c -f sha2 -c -f ITU'.split(), catch_exceptions=False)
self.assertEqual(result.exit_code, 0)
out = """
SHA224: 7d5104ff2cee331a4586337ea64ab6a188e2b26aecae87227105dae1
SHA256: 3639efcd08abb273b1619e82e78c29a7df02c1051b1820e99fc395dcaa3326b8
CRC-8-ITU: 0xbe
"""
print(out)
assert result.output.endswith(out)
def test_omnihashs(self):
runner = CliRunner()
result = runner.invoke(main, ['hashme', 'LICENSE', '-s'], catch_exceptions=False)
self.assertEqual(result.exit_code, 0)
self.assertIn('0398ccd0f49298b10a3d76a47800d2ebecd49859', result.output)
def test_omnihashcrc(self):
runner = CliRunner()
result = runner.invoke(main, ['hashme', 'README.md', '-sc'], catch_exceptions=False)
self.assertEqual(result.exit_code, 0)
print(result.output)
self.assertIn('fb78992e561929a6967d5328f49413fa99048d06', result.output)
self.assertIn('5d20a7c38be78000', result.output)
def test_url(self):
runner = CliRunner()
result = runner.invoke(main, ['hashme', 'https://www.google.com/images/branding/googlelogo/2x/googlelogo_color_272x92dp.png', '-c']) # noqa
self.assertEqual(result.exit_code, 0)
print(result.output)
self.assertIn('26f471f6ebe3b11557506f6ae96156e0a3852e5b', result.output)
self.assertIn('809089', result.output)
result = runner.invoke(main, ['hashme', 'https://www.google.com/images/branding/googlelogo/2x/googlelogo_color_272x92dp.png', '-sc']) # noqa
self.assertEqual(result.exit_code, 0)
print(result.output)
self.assertIn('b61bad1cb3dfad6258bef11b12361effebe597a8c80131cd2d6d07fce2206243', result.output)
self.assertIn('20d9c2bbdbaf669b', result.output)
def test_json(self):
runner = CliRunner()
result = runner.invoke(main, ["correct horse battery staple", "-j", "-m", "9cc2"], catch_exceptions=False)
self.assertEqual(result.exit_code, 0)
print(result.output)
self.assertIn('"MD5": "9cc2ae8a1ba7a93da39b46fc1019c481"', result.output)
def test_omnihashfile_git(self):
runner = CliRunner()
result = runner.invoke(main, 'LICENSE -f git'.split(), catch_exceptions=False)
self.assertEqual(result.exit_code, 0)
#print(result.output)
self.assertIn('3e108735fcf3efac2b181874a34861a9fb5e7cc1', result.output)
self.assertIn('25063c5229e9e558e3207413a1fa56c6262eedc2', result.output)
self.assertIn('2c97833c235648e752a00f8ef709fbe2f3523ca4', result.output)
if __name__ == '__main__':
unittest.main()
|
421954
|
def get_arrangement_count(free_spaces):
if not free_spaces:
return 1
elif free_spaces < 2:
return 0
arrangements = 0
if free_spaces >= 3:
arrangements += (2 + get_arrangement_count(free_spaces - 3))
arrangements += (2 + get_arrangement_count(free_spaces - 2))
return arrangements
def count_arragements(columns):
return get_arrangement_count(columns * 2)
# Tests
assert count_arragements(4) == 32
|
421968
|
import torch
from torch import nn
from .memory import ContrastMemory
eps = 1e-7
class CRDLoss(nn.Module):
"""CRD Loss function
includes two symmetric parts:
(a) using teacher as anchor, choose positive and negatives over the student side
(b) using student as anchor, choose positive and negatives over the teacher side
Args:
opt.s_dim: the dimension of student's feature
opt.t_dim: the dimension of teacher's feature
opt.feat_dim: the dimension of the projection space
opt.nce_k: number of negatives paired with each positive
opt.nce_t: the temperature
opt.nce_m: the momentum for updating the memory buffer
opt.n_data: the number of samples in the training set, therefor the memory buffer is: opt.n_data x opt.feat_dim
"""
def __init__(self, opt):
super(CRDLoss, self).__init__()
self.embed_s = Embed(opt.s_dim, opt.feat_dim)
self.embed_t = Embed(opt.t_dim, opt.feat_dim)
self.contrast = ContrastMemory(opt.feat_dim, opt.n_data, opt.nce_k, opt.nce_t, opt.nce_m)
self.criterion_t = ContrastLoss(opt.n_data)
self.criterion_s = ContrastLoss(opt.n_data)
def forward(self, f_s, f_t, idx, contrast_idx=None):
"""
Args:
f_s: the feature of student network, size [batch_size, s_dim]
f_t: the feature of teacher network, size [batch_size, t_dim]
idx: the indices of these positive samples in the dataset, size [batch_size]
contrast_idx: the indices of negative samples, size [batch_size, nce_k]
Returns:
The contrastive loss
"""
f_s = self.embed_s(f_s)
f_t = self.embed_t(f_t)
out_s, out_t = self.contrast(f_s, f_t, idx, contrast_idx)
s_loss = self.criterion_s(out_s)
t_loss = self.criterion_t(out_t)
loss = s_loss + t_loss
return loss / 2.0
class ContrastLoss(nn.Module):
"""
contrastive loss, corresponding to Eq (18)
"""
def __init__(self, n_data):
super(ContrastLoss, self).__init__()
self.n_data = n_data
def forward(self, x):
bsz = x.shape[0]
m = x.size(1) - 1
# noise distribution
Pn = 1 / float(self.n_data)
# loss for positive pair
P_pos = x.select(1, 0)
P_pos[P_pos == 0] = eps
log_D1 = torch.div(P_pos, P_pos.add(m * Pn + eps)).log_()
# loss for K negative pair
P_neg = x.narrow(1, 1, m)
log_D0 = torch.div(P_neg.clone().fill_(m * Pn), P_neg.add(m * Pn + eps)).log_()
loss = - (log_D1.sum(0) + log_D0.view(-1, 1).sum(0)) / bsz
return loss
class Embed(nn.Module):
"""Embedding module"""
def __init__(self, dim_in=1024, dim_out=128):
super(Embed, self).__init__()
self.linear = nn.Linear(dim_in, dim_out)
self.l2norm = Normalize(2)
def forward(self, x):
x = x.view(x.shape[0], -1)
x = self.linear(x)
x = self.l2norm(x)
return x
class Normalize(nn.Module):
"""normalization layer"""
def __init__(self, power=2):
super(Normalize, self).__init__()
self.power = power
def forward(self, x):
norm = x.pow(self.power).sum(1, keepdim=True).pow(1. / self.power)
out = x.div(norm)
return out
|
421974
|
import numpy as np
import pyccl as ccl
import time
def test_timing():
ls = np.unique(np.geomspace(2, 2000, 128).astype(int)).astype(float)
nl = len(ls)
b_g = np.array([1.376695, 1.451179, 1.528404,
1.607983, 1.689579, 1.772899,
1.857700, 1.943754, 2.030887,
2.118943])
dNdz_file = np.load('benchmarks/data/dNdzs.npz')
z_s = dNdz_file['z_sh']
dNdz_s = dNdz_file['dNdz_sh'].T
z_g = dNdz_file['z_cl']
dNdz_g = dNdz_file['dNdz_cl'].T
nt = len(dNdz_s) + len(dNdz_g)
nx = (nt*(nt+1))//2
cosmo = ccl.CosmologyVanillaLCDM(transfer_function='boltzmann_class')
cosmo.compute_nonlin_power()
start = time.time()
t_g = [ccl.NumberCountsTracer(cosmo, True,
(z_g, ng),
bias=(z_g, np.full(len(z_g), b)))
for ng, b in zip(dNdz_g, b_g)]
t_s = [ccl.WeakLensingTracer(cosmo, (z_s, ns)) for ns in dNdz_s]
t_all = t_g + t_s
cls = np.zeros([nx, nl])
ind1, ind2 = np.triu_indices(nt)
for ix, (i1, i2) in enumerate(zip(ind1, ind2)):
cls[ix, :] = ccl.angular_cl(cosmo, t_all[i1], t_all[i2], ls)
end = time.time()
t_seconds = end - start
print(end-start)
assert t_seconds < 3.
|
422004
|
from abc import ABC, abstractmethod
from typing import Union
import tensorflow as tf
from . import listify
class Score(ABC):
"""Abstract class for defining a score function.
"""
def __init__(self, name=None) -> None:
"""
Args:
name: Instance name. Defaults to None.
"""
self.name = name
@abstractmethod
def __call__(self, output) -> Union[tf.Tensor, list, tuple]:
"""Implement collecting scores that are used in visualization modules.
Args:
output: A tf.Tensor that indicates a model output value.
Raises:
NotImplementedError: This method must be overwritten.
Returns:
Score values.
"""
raise NotImplementedError()
class InactiveScore(Score):
"""A score function that deactivate model output passed to `__call__()`.
With a multiple output model, you can use this
if you want a output to be excluded from targets of calculating gradients.
"""
def __init__(self) -> None:
super().__init__('InactiveScore')
def __call__(self, output) -> tf.Tensor:
return output * 0.0
class BinaryScore(Score):
"""A score function that collects the scores from model output
which is for binary classification.
"""
def __init__(self, target_values) -> None:
"""
Args:
target_values: A bool or a list of them.
Raises:
ValueError: When target_values is None or an empty list.
"""
super().__init__('BinaryScore')
self.target_values = listify(target_values, return_empty_list_if_none=False)
if None in self.target_values:
raise ValueError(f"Can't accept None value. target_values: {target_values}")
self.target_values = [bool(v) for v in self.target_values]
if len(self.target_values) == 0:
raise ValueError(f"target_values is required. target_values: {target_values}")
def __call__(self, output) -> tf.Tensor:
if not (output.ndim == 2 and output.shape[1] == 1):
raise ValueError(f"`output` shape must be (batch_size, 1), but was {output.shape}")
output = tf.reshape(output, (-1, ))
target_values = self.target_values
if len(target_values) == 1 and len(target_values) < output.shape[0]:
target_values = target_values * output.shape[0]
return (2 * tf.constant(target_values, dtype=output.dtype) - 1.0) * output
class CategoricalScore(Score):
"""A score function that collects the scores from model output
which is for categorical classification.
"""
def __init__(self, indices) -> None:
"""
Args:
indices: An integer or a list of them.
Raises:
ValueError: When indices is None or an empty list.
"""
super().__init__('CategoricalScore')
self.indices = listify(indices, return_empty_list_if_none=False)
if None in self.indices:
raise ValueError(f"Can't accept None. indices: {indices}")
if len(self.indices) == 0:
raise ValueError(f"`indices` is required. indices: {indices}")
def __call__(self, output) -> tf.Tensor:
if output.ndim < 2:
raise ValueError("`output` ndim must be 2 or more (batch_size, ..., channels), "
f"but was {output.ndim}")
if output.shape[-1] <= max(self.indices):
raise ValueError(
f"Invalid index value. indices: {self.indices}, output.shape: {output.shape}")
indices = self.indices
if len(indices) == 1 and len(indices) < output.shape[0]:
indices = indices * output.shape[0]
score = [output[i, ..., index] for i, index in enumerate(indices)]
score = tf.stack(score, axis=0)
score = tf.math.reduce_mean(score, axis=tuple(range(score.ndim))[1:])
return score
|
422031
|
import logging
from pymongo import MongoClient
from tqdm import tqdm
from jsonschema import ValidationError
import fhirstore
import fhirpipe
_client = None
def get_mongo_client():
global _client
if _client is None:
_client = MongoClient(
host=fhirpipe.global_config["fhirstore"]["host"],
port=fhirpipe.global_config["fhirstore"]["port"],
username=fhirpipe.global_config["fhirstore"]["user"],
password=fhirpipe.global_config["fhirstore"]["password"],
)
return _client
_fhirstore = None
def get_fhirstore():
global _fhirstore
if _fhirstore is None:
_fhirstore = fhirstore.FHIRStore(
get_mongo_client(), None, fhirpipe.global_config["fhirstore"]["database"]
)
_fhirstore.resume()
return _fhirstore
def save_many(instances, bypass_validation=False, multi_processing=False):
"""
Save instances of FHIR resources in MongoDB through fhirstore.
args:
instances (list): list of instances
"""
store = get_fhirstore()
if multi_processing:
store.resume()
instances = tqdm(instances)
for instance in instances:
try:
store.create(instance, bypass_document_validation=bypass_validation)
except ValidationError as e:
logging.error(
f"Validation failed for resource {instance} at "
f"{'.'.join(e.schema_path)}: {e.message}"
)
instances.refresh()
def get_resource_instances(resource_id, resource_type):
global _client
store = _client[fhirpipe.global_config["fhirstore"]["database"]]
return store[resource_type].find(
{
"meta.tag": {
"$elemMatch": {
"code": {"$eq": resource_id},
"system": {"$eq": fhirstore.ARKHN_CODE_SYSTEMS.resource},
}
}
}
)
|
422101
|
import os
import argparse
import numpy as np
from shutil import copy2
import torch
from torch.autograd import Variable
import torch.nn as nn
import eval_sent_embeddings_labels_in_expl
import streamtologger
GLOVE_PATH = '../dataset/GloVe/glove.840B.300d.txt'
parser = argparse.ArgumentParser(description='eval')
# paths
parser.add_argument("--directory", type=str, default='')
parser.add_argument("--state_path", type=str, default='')
parser.add_argument("--eval_batch_size", type=int, default=32)
parser.add_argument("--train_snli_classif", action='store_true', dest='train_snli_classif')
parser.add_argument("--use_prototype_senteval", action='store_true', dest='use_prototype_senteval')
parser.add_argument("--do_image_caption", action='store_true', dest='do_image_caption')
parser.add_argument("--cudnn_nondeterministic", action='store_false', dest='cudnn_deterministic')
eval_params = parser.parse_args()
streamtologger.redirect(target=eval_params.directory + '/log_eval.txt')
state = torch.load(os.path.join(eval_params.directory, eval_params.state_path))
model_config = state['config_model']
model_state_dict = state['model_state']
params = state['params']
params.eval_batch_size = eval_params.eval_batch_size
params.current_run_dir = eval_params.directory
params.train_snli_classif = eval_params.train_snli_classif
params.use_prototype_senteval = eval_params.use_prototype_senteval
params.do_image_caption = eval_params.do_image_caption
params.cudnn_deterministic = eval_params.cudnn_deterministic
"""
SEED
"""
np.random.seed(params.seed)
torch.manual_seed(params.seed)
torch.cuda.manual_seed(params.seed)
# CUDNN deterministic
torch.backends.cudnn.deterministic = params.cudnn_deterministic
copy2('launch_eval.py', eval_params.directory)
copy2('eval_sent_embeddings_labels_in_expl.py', eval_params.directory)
#import sys
#sys.path.insert(0, eval_params.directory)
import models_esnli_init
esnli_net = models_esnli_init.eSNLINet(model_config).cuda()
esnli_net.load_state_dict(model_state_dict)
# set gpu device
torch.cuda.set_device(params.gpu_id)
# criterion
pad_idx = model_config['word_index']["<p>"]
criterion_expl = nn.CrossEntropyLoss(ignore_index=pad_idx).cuda()
criterion_expl.size_average = False
eval_sent_embeddings_labels_in_expl.eval_all(esnli_net, criterion_expl, params)
txt_file = 'DONE_eval.txt'
file = os.path.join(params.current_run_dir, txt_file)
f = open(file,'w')
f.write("DONE")
f.close()
|
422136
|
import gc
import multiprocessing
from multiprocessing import Queue
from .dataset import *
from .base import *
class StageRunner(object):
def __init__(self, max_procs):
self.max_procs = max_procs
def launch_process(self, p_id, input_q, output_q):
raise NotImplementedError()
def run(self, job_queue):
input_q = Queue()
output_q = Queue()
total_jobs = 0
for job in job_queue:
input_q.put(job)
total_jobs += 1
logging.debug("Total tasks: %s", total_jobs)
# launch jobs
processes = []
for pid in range(self.max_procs):
processes.append(self.launch_process(pid, input_q, output_q))
processes[-1].start()
# Add sentinel
input_q.put(None)
# Assign tasks via forking
finished = []
while len(finished) < self.max_procs:
payload = output_q.get()
finished.append(payload)
# Cleanup
for p in processes:
p.join()
return finished
class MapStageRunner(StageRunner):
def __init__(self, max_procs, fs, n_partitions, mapper, options):
super(MapStageRunner, self).__init__(max_procs)
self.fs = fs
self.n_partitions = n_partitions
self.mapper = mapper
self.options = options
def simple_map(self, in_q, out_q, fs):
w_id = os.getpid()
# Default job, nothing special
if self.options.get('memory', False):
writer_cls = SortedMemoryWriter
else:
writer_cls = SortedDiskWriter
dw = CSDatasetWriter(fs, Splitter(), self.n_partitions, writer_cls=writer_cls)
dw.start()
while True:
job = in_q.get()
if job is None:
break
t_id, main, supplemental = job
logging.debug("Mapper %i: Computing map: %i", w_id, t_id)
for k, v in self.mapper.mapper.map(main, *supplemental):
dw.add_record(k, v)
out_q.put(dw.finished())
logging.debug("Mapper: %i: Finished", w_id)
def medium_map(self, in_q, out_q, combiner, shuffler, fs):
"""
Runs a more fine grained map/combine/shuffler
"""
w_id = os.getpid()
if self.options.get('memory', False):
dw = SortedMemoryWriter(fs)
else:
dw = SortedDiskWriter(fs)
# Do we have a map side partial reducer?
binop = self.options.get('binop')
if callable(binop):
# Zero buffer means all reductions will happen reduce side
dw = MaxMemoryWriter(ReducedWriter(dw, binop))
else:
dw = MaxMemoryWriter(dw)
# run the jobz
dw.start()
while True:
job = in_q.get()
if job is None:
break
m_id, main, supplemental = job
logging.debug("Mapper %i: Computing map: %i", w_id, m_id)
for k, v in self.mapper.mapper.map(main, *supplemental):
dw.add_record(k, v)
sources = dw.finished()[0]
if len(sources) > 1:
# gc
dw = None
gc.collect()
logging.debug("Combining outputs: found %i files", len(sources))
logging.debug("Combining: %s", sources)
combined_stream = combiner.combine(sources)
elif len(sources) == 1:
combined_stream = sources[0]
else:
combined_stream = EmptyDataset()
results = shuffler.shuffle(fs, [combined_stream])
out_q.put(results)
logging.debug("Mapper: %i: Finished Map-Combine", w_id)
def launch_process(self, p_id, input_q, output_q):
fs = self.fs.get_worker('map/{}'.format(p_id))
if self.mapper.combiner is None and self.mapper.shuffler is None:
p = multiprocessing.Process(target=self.simple_map,
args=(input_q, output_q, fs))
else:
c = NoopCombiner() if self.mapper.combiner is None else self.mapper.combiner
if self.options.get('memory', False):
writer_cls = lambda fs: MaxMemoryWriter(UnorderedDiskWriter(fs))
else:
writer_cls = lambda fs: MaxMemoryWriter(UnorderedMemoryWriter(fs))
s = DefaultShuffler(self.n_partitions, Splitter(), writer_cls)
o = self.mapper.options
p = multiprocessing.Process(target=self.medium_map,
args=(input_q, output_q, c, s, fs))
return p
class SinkStageRunner(StageRunner):
def __init__(self, max_procs, mapper, path):
super(SinkStageRunner, self).__init__(max_procs)
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != 17:
raise
self.path = path
self.mapper = mapper
def sink(self, input_q, out_q):
"""
Writes line delimited items as a sink
"""
w_id = os.getpid()
finished = [{0: []}]
while True:
job = input_q.get()
if job is None: break
m_id, main, supplemental = job
dw = SinkWriter(self.mapper.path, m_id)
dw.start()
logging.debug("Sink %i: Computing map: %i", w_id, m_id)
for k, v in self.mapper.mapper.map(main, *supplemental):
dw.add_record(k, v)
finished.append(dw.finished())
# concatentate them all
for i in range(1, len(finished)):
finished[0][0].extend(finished[i][0])
out_q.put(finished[0])
logging.debug("Sink: %i: Finished", w_id)
def launch_process(self, pid, input_q, output_q):
p = multiprocessing.Process(target=self.sink,
args=(input_q, output_q))
return p
class CombinerStageRunner(StageRunner):
"""
Merges files together to reduce their on disk presence
"""
def __init__(self, max_procs, fs, combiner, options, per_tid=False):
super(CombinerStageRunner, self).__init__(max_procs)
self.fs = fs
self.combiner = combiner
self.options = options
self.per_tid = per_tid
def combine_per_key(self, input_q, output_q, fs):
w_id = os.getpid()
finished = []
while True:
job = input_q.get()
if job is None: break
t_id, datasets = job
if self.options.get('memory', False):
dw = ContiguousMemoryWriter(fs)
else:
dw = ContiguousDiskWriter(fs)
dw.start()
for k,v in self.combiner.combine(datasets):
dw.add_record(k,v)
for d in datasets:
d.delete()
finished.append((t_id, dw.finished()[0]))
output_q.put(finished)
def combine(self, input_q, output_q, fs):
w_id = os.getpid()
if self.options.get('memory', False):
dw = ContiguousMemoryWriter(fs)
else:
dw = ContiguousDiskWriter(fs)
dw.start()
while True:
job = input_q.get()
if job is None: break
_t_id, datasets = job
for k,v in self.combiner.combine(datasets):
dw.add_record(k,v)
for d in datasets:
d.delete()
output_q.put(dw.finished()[0])
def launch_process(self, p_id, input_q, output_q):
fs = self.fs.get_worker('merge/{}'.format(p_id))
f = self.combine_per_key if self.per_tid else self.combine
p = multiprocessing.Process(target=f,
args=(input_q, output_q, fs))
return p
class ReduceStageRunner(StageRunner):
def __init__(self, max_procs, fs, reducer, options):
super(ReduceStageRunner, self).__init__(max_procs)
self.fs = fs
self.reducer = reducer
self.options = options
def reduce(self, input_q, out_q, dw):
w_id = os.getpid()
dw.start()
while True:
job = input_q.get()
if job is None: break
r_id, datasets = job
logging.debug("Reducer %i: Computing reduce: %i", w_id, r_id)
for k, v in self.reducer.reducer.reduce(*datasets):
dw.add_record(k, v)
out_q.put(dw.finished())
logging.debug("Reducer: %i: Finished", w_id)
def launch_process(self, p_id, input_q, output_q):
fs = self.fs.get_worker('red/{}'.format(p_id))
if self.options.get('memory', False):
dw = ContiguousMemoryWriter(fs)
else:
dw = ContiguousDiskWriter(fs)
m = multiprocessing.Process(target=self.reduce,
args=(input_q, output_q, dw))
return m
|
422193
|
import numpy as np
import torch
from ding.torch_utils import to_dtype, to_ndarray
def pack_birdview(data, packbit=False):
if isinstance(data, dict):
if 'obs' in data:
pack_birdview(data['obs'])
if 'next_obs' in data:
pack_birdview(data['next_obs'])
if 'birdview' in data:
bev = data['birdview']
if isinstance(bev, np.ndarray):
bev = to_ndarray(bev, dtype=np.uint8)
elif isinstance(bev, torch.Tensor):
bev = to_dtype(bev, dtype=torch.uint8)
data['birdview'] = bev
if 'obs' not in data and 'next_obs' not in data and 'birdview' not in data:
for value in data.values():
pack_birdview(value)
if isinstance(data, list):
for item in data:
pack_birdview(item)
def unpack_birdview(data, unpackbit=False, shape=[-1]):
if isinstance(data, dict):
if 'obs' in data:
unpack_birdview(data['obs'])
if 'next_obs' in data:
unpack_birdview(data['next_obs'])
if 'birdview' in data:
bev = data['birdview']
if isinstance(bev, np.ndarray):
bev = to_ndarray(bev, dtype=np.float32)
elif isinstance(bev, torch.Tensor):
bev = to_dtype(bev, dtype=torch.float32)
data['birdview'] = bev
if 'obs' not in data and 'next_obs' not in data and 'birdview' not in data:
for value in data.values():
unpack_birdview(value)
if isinstance(data, list):
for item in data:
unpack_birdview(item)
|
422282
|
from builtins import range
from functools import reduce
import numpy as np
""" Factor Graph classes forming structure for PGMs
Basic structure is port of MATLAB code by <NAME>
Central difference: nbrs stored as references, not ids
(makes message propagation easier)
Note to self: use %pdb and %load_ext autoreload followed by %autoreload 2
"""
class Node(object):
""" Superclass for graph nodes
"""
epsilon = 10**(-4)
def __init__(self, nid):
self.enabled = True
self.nid = nid
self.nbrs = []
self.incoming = []
self.outgoing = []
self.oldoutgoing = []
def reset(self):
self.enabled = True
def disable(self):
self.enabled = False
def enable(self):
self.enabled = True
for n in self.nbrs:
# don't call enable() as it will recursively enable entire graph
n.enabled = True
def nextStep(self):
""" Used to have this line in prepMessages
but it didn't work?
"""
self.oldoutgoing = self.outgoing[:]
def normalizeMessages(self):
""" Normalize to sum to 1
"""
self.outgoing = [x / np.sum(x) for x in self.outgoing]
def receiveMessage(self, f, m):
""" Places new message into correct location in new message list
"""
if self.enabled:
i = self.nbrs.index(f)
self.incoming[i] = m
def sendMessages(self):
""" Sends all outgoing messages
"""
for i in range(0, len(self.outgoing)):
self.nbrs[i].receiveMessage(self, self.outgoing[i])
def checkConvergence(self):
""" Check if any messages have changed
"""
if self.enabled:
for i in range(0, len(self.outgoing)):
# check messages have same shape
self.oldoutgoing[i].shape = self.outgoing[i].shape
delta = np.absolute(self.outgoing[i] - self.oldoutgoing[i])
if (delta > Node.epsilon).any(): # if there has been change
return False
return True
else:
# Always return True if disabled to avoid interrupting check
return True
class VarNode(Node):
""" Variable node in factor graph
"""
def __init__(self, name, dim, nid):
super(VarNode, self).__init__(nid)
self.name = name
self.dim = dim
self.observed = -1 # only >= 0 if variable is observed
def reset(self):
super(VarNode, self).reset()
size = range(0, len(self.incoming))
self.incoming = [np.ones((self.dim,1)) for i in size]
self.outgoing = [np.ones((self.dim,1)) for i in size]
self.oldoutgoing = [np.ones((self.dim,1)) for i in size]
self.observed = -1
def condition(self, observation):
""" Condition on observing certain value
"""
self.enable()
self.observed = observation
# set messages (won't change)
for i in range(0, len(self.outgoing)):
self.outgoing[i] = np.zeros((self.dim,1))
self.outgoing[i][self.observed] = 1.
self.nextStep() # copy into oldoutgoing
def prepMessages(self):
""" Multiplies together incoming messages to make new outgoing
"""
# compute new messages if no observation has been made
if self.enabled and self.observed < 0 and len(self.nbrs) > 1:
# switch reference for old messages
self.nextStep()
for i in range(0, len(self.incoming)):
# multiply together all excluding message at current index
curr = self.incoming[:]
del curr[i]
self.outgoing[i] = reduce(np.multiply, curr)
# normalize once finished with all messages
self.normalizeMessages()
class FacNode(Node):
""" Factor node in factor graph
"""
def __init__(self, P, nid, *args):
super(FacNode, self).__init__(nid)
self.P = P
self.nbrs = list(args) # list storing refs to variable nodes
# num of edges
numNbrs = len(self.nbrs)
numDependencies = self.P.squeeze().ndim
# init messages
for i in range(0,numNbrs):
v = self.nbrs[i]
vdim = v.dim
# init for factor
self.incoming.append(np.ones((vdim,1)))
self.outgoing.append(np.ones((vdim,1)))
self.oldoutgoing.append(np.ones((vdim,1)))
# init for variable
v.nbrs.append(self)
v.incoming.append(np.ones((vdim,1)))
v.outgoing.append(np.ones((vdim,1)))
v.oldoutgoing.append(np.ones((vdim,1)))
# error check
assert (numNbrs == numDependencies), "Factor dimensions does not match size of domain."
def reset(self):
super(FacNode, self).reset()
for i in range(0, len(self.incoming)):
self.incoming[i] = np.ones((self.nbrs[i].dim,1))
self.outgoing[i] = np.ones((self.nbrs[i].dim,1))
self.oldoutgoing[i] = np.ones((self.nbrs[i].dim,1))
def prepMessages(self):
""" Multiplies incoming messages w/ P to make new outgoing
"""
if self.enabled:
# switch references for old messages
self.nextStep()
mnum = len(self.incoming)
# do tiling in advance
# roll axes to match shape of newMessage after
for i in range(0,mnum):
# find tiling size
nextShape = list(self.P.shape)
del nextShape[i]
nextShape.insert(0, 1)
# need to expand incoming message to correct num of dims to tile properly
prepShape = [1 for x in nextShape]
prepShape[0] = self.incoming[i].shape[0]
self.incoming[i].shape = prepShape
# tile and roll
self.incoming[i] = np.tile(self.incoming[i], nextShape)
self.incoming[i] = np.rollaxis(self.incoming[i], 0, i+1)
# loop over subsets
for i in range(0, mnum):
curr = self.incoming[:]
del curr[i]
newMessage = reduce(np.multiply, curr, self.P)
# sum over all vars except i!
# roll axis i to front then sum over all other axes
newMessage = np.rollaxis(newMessage, i, 0)
newMessage = np.sum(newMessage, tuple(range(1,mnum)))
newMessage.shape = (newMessage.shape[0],1)
#store new message
self.outgoing[i] = newMessage
# normalize once finished with all messages
self.normalizeMessages()
|
422316
|
from rest_framework.viewsets import GenericViewSet
from rest_framework import mixins
from rest_framework.exceptions import PermissionDenied
from django.db.models import Count, Avg
from rest_framework.response import Response
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import serializers
from ..models import User, Following, Prediction, Equipment
from ..serializers import EquipmentSerializer, UserSerializer
class UserViewSet(mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
GenericViewSet):
"""
View and edit users
"""
serializer_class = UserSerializer
queryset = User.objects.all()
def check_object_permissions(self, request, user):
# Another user can only retrieve; cannot update, delete, update or partial_update
if self.action != 'retrieve' and request.user != user:
raise PermissionDenied
class UserSuccessViewSet(GenericViewSet):
"""
View user success rates
"""
filter_backends = [DjangoFilterBackend]
filterset_fields = ('user',)
class serializer_class(serializers.Serializer): # this is just for schema generation, not used
base_equipment = EquipmentSerializer()
target_equipment = EquipmentSerializer()
success_rate = serializers.FloatField()
prediction_count = serializers.IntegerField()
user = UserSerializer()
def get_queryset(self):
return (
Prediction.objects
.exclude(result=Prediction.PENDING) # exclude pending predictions
.values('user', 'base_equipment', 'target_equipment') # group by user and parity
.annotate(count=Count('pk'), average=Avg('result')) # count and average for each group
.filter(count__gte=5) # condition for visibility
)
def list(self, request):
queryset = self.filter_queryset(self.get_queryset())
success_list = []
for item in queryset:
base_eq = Equipment.objects.get(pk=item['base_equipment'])
target_eq = Equipment.objects.get(pk=item['target_equipment'])
user = User.objects.get(pk=item['user'])
context = self.get_serializer_context()
base_eq = EquipmentSerializer(base_eq, context=context).data
target_eq = EquipmentSerializer(target_eq, context=context).data
user = UserSerializer(user, context=context).data
success_list.append({'base_equipment': base_eq,
'target_equipment': target_eq,
'success_rate': item['average'],
'prediction_count': item['count'],
'user': user})
return Response(success_list)
|
422331
|
import datetime
import requests
import json
import time
from sqlite_utils.db import AlterError, ForeignKey
def save_items(items, db):
for item in items:
transform(item)
authors = item.pop("authors", None)
items_authors_to_save = []
if authors:
authors_to_save = []
for details in authors.values():
authors_to_save.append(
{
"author_id": int(details["author_id"]),
"name": details["name"],
"url": details["url"],
}
)
items_authors_to_save.append(
{
"author_id": int(details["author_id"]),
"item_id": int(details["item_id"]),
}
)
db["authors"].insert_all(authors_to_save, pk="author_id", replace=True)
db["items"].insert(item, pk="item_id", alter=True, replace=True)
if items_authors_to_save:
db["items_authors"].insert_all(
items_authors_to_save,
pk=("author_id", "item_id"),
foreign_keys=("author_id", "item_id"),
replace=True
)
def transform(item):
for key in (
"item_id",
"resolved_id",
"favorite",
"status",
"time_added",
"time_updated",
"time_read",
"time_favorited",
"is_article",
"is_index",
"has_video",
"has_image",
"word_count",
"time_to_read",
"listen_duration_estimate",
):
if key in item:
item[key] = int(item[key])
for key in ("time_read", "time_favorited"):
if key in item and not item[key]:
item[key] = None
def ensure_fts(db):
if "items_fts" not in db.table_names():
db["items"].enable_fts(["resolved_title", "excerpt"], create_triggers=True)
def fetch_stats(auth):
response = requests.get(
"https://getpocket.com/v3/stats",
{
"consumer_key": auth["pocket_consumer_key"],
"access_token": auth["pocket_access_token"],
},
)
response.raise_for_status()
return response.json()
class FetchItems:
def __init__(self, auth, since=None, page_size=500, sleep=2, retry_sleep=3, record_since=None):
self.auth = auth
self.since = since
self.page_size = page_size
self.sleep = sleep
self.retry_sleep = retry_sleep
self.record_since = record_since
def __iter__(self):
offset = 0
retries = 0
while True:
args = {
"consumer_key": self.auth["pocket_consumer_key"],
"access_token": self.auth["pocket_access_token"],
"sort": "oldest",
"state": "all",
"detailType": "complete",
"count": self.page_size,
"offset": offset,
}
if self.since is not None:
args["since"] = self.since
response = requests.get("https://getpocket.com/v3/get", args)
if response.status_code == 503 and retries < 5:
print("Got a 503, retrying...")
retries += 1
time.sleep(retries * self.retry_sleep)
continue
else:
retries = 0
response.raise_for_status()
page = response.json()
items = list((page["list"] or {}).values())
next_since = page["since"]
if self.record_since and next_since:
self.record_since(next_since)
if not items:
break
yield from items
offset += self.page_size
if self.sleep:
time.sleep(self.sleep)
|
422333
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import random
from typing import Any
from .utils import BasicBrain
class PERD3QNAgent(BasicBrain):
""" Prioritized Experience Replay Dueling Double Deep Q Network
Parameters:
-----------
input_dim : int, default 153
The input dimension
output_dim : int, default 8
The output dimension
exploration : int, default 1000
The number of epochs to explore the environment before learning
soft_update_freq : int, default 200
The frequency at which to align the target and eval nets
train_freq : int, default 20
The frequency at which to train the agent
learning_rate : float, default 1e-3
Learning rate
batch_size : int
The number of training samples to work through before the model's internal parameters are updated.
gamma : float, default 0.98
Discount factor. How far out should rewards in the future influence the policy?
capacity : int, default 10_000
Capacity of replay buffer
load_model : str, default False
Path to an existing model
training : bool, default True,
Whether to continue training or not
"""
def __init__(self, input_dim=153, output_dim=8, exploration=1000, soft_update_freq=200, train_freq=20,
learning_rate=1e-3, batch_size=64, capacity=10000, gamma=0.99, load_model=False, training=True):
super().__init__(input_dim, output_dim, "PERD3QN")
self.target_net = DuelingDDQN(input_dim, output_dim)
self.eval_net = DuelingDDQN(input_dim, output_dim)
self.eval_net.load_state_dict(self.target_net.state_dict())
self.optimizer = torch.optim.Adam(self.eval_net.parameters(), lr=learning_rate)
self.buffer = PrioritizedReplayBuffer(capacity)
self.loss_fn = nn.MSELoss()
self.exploration = exploration
self.soft_update_freq = soft_update_freq
self.train_freq = train_freq
self.batch_size = batch_size
self.gamma = gamma
self.n_epi = 0
self.epsilon = 0.9
self.epsilon_min = 0.05
self.decay = 0.99
self.training = training
if not self.training:
self.epsilon = 0
if load_model:
self.eval_net.load_state_dict(torch.load(load_model))
self.eval_net.eval()
if self.training:
self.target_net.load_state_dict(torch.load(load_model))
self.target_net.eval()
self.optimizer = torch.optim.Adam(self.eval_net.parameters(), lr=learning_rate)
def get_action(self, state, n_epi):
if self.training:
if n_epi > self.n_epi:
if self.epsilon > self.epsilon_min:
self.epsilon = self.epsilon * self.decay
self.n_epi = n_epi
action = self.eval_net.act(torch.FloatTensor(np.expand_dims(state, 0)), self.epsilon)
return action
def memorize(self, obs, action, reward, next_obs, done):
self.buffer.store(obs, action, reward, next_obs, done)
def train(self):
observation, action, reward, next_observation, done, indices, weights = self.buffer.sample(self.batch_size)
observation = torch.FloatTensor(observation)
action = torch.LongTensor(action)
reward = torch.FloatTensor(reward)
next_observation = torch.FloatTensor(next_observation)
done = torch.FloatTensor(done)
q_values = self.eval_net.forward(observation)
next_q_values = self.target_net.forward(next_observation)
next_q_value = next_q_values.max(1)[0].detach()
q_value = q_values.gather(1, action.unsqueeze(1)).squeeze(1)
expected_q_value = reward + self.gamma * (1 - done) * next_q_value
loss = self.loss_fn(q_value, expected_q_value)
priorities = torch.abs(next_q_value - q_value).detach().numpy()
self.buffer.update_priorities(indices, priorities)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
def learn(self, age, dead, action, state, reward, state_prime, done, n_epi):
self.memorize(state, action, reward, state_prime, done)
if n_epi > self.exploration:
if age % self.train_freq == 0 or dead:
self.train()
if n_epi % self.soft_update_freq == 0:
self.target_net.load_state_dict(self.eval_net.state_dict())
def apply_gaussian_noise(self):
with torch.no_grad():
self.target_net.fc.weight.add_(torch.randn(self.target_net.fc.weight.size()))
self.target_net.load_state_dict(self.eval_net.state_dict())
class PrioritizedReplayBuffer(object):
def __init__(self, capacity, alpha=.6, beta=.4, beta_increment=1000):
self.capacity = capacity
self.alpha = alpha
self.beta = beta
self.beta_increment = beta_increment
self.pos = 0
self.memory = []
self.priorities = np.zeros([self.capacity], dtype=np.float32)
def store(self, observation, action, reward, next_observation, done):
observation = np.expand_dims(observation, 0)
next_observation = np.expand_dims(next_observation, 0)
max_prior = np.max(self.priorities) if self.memory else 1.0
if len(self.memory) < self.capacity:
self.memory.append([observation, action, reward, next_observation, done])
else:
self.memory[self.pos] = [observation, action, reward, next_observation, done]
self.priorities[self.pos] = max_prior
self.pos += 1
self.pos = self.pos % self.capacity
def sample(self, batch_size):
if len(self.memory) < self.capacity:
probs = self.priorities[: len(self.memory)]
else:
probs = self.priorities
probs = probs ** self.alpha
probs = probs / np.sum(probs)
indices = np.random.choice(len(self.memory), batch_size, p=probs)
samples = [self.memory[idx] for idx in indices]
weights = (len(self.memory) * probs[indices]) ** (- self.beta)
if self.beta < 1:
self.beta += self.beta_increment
weights = weights / np.max(weights)
weights = np.array(weights, dtype=np.float32)
observation, action, reward, next_observation, done = zip(* samples)
return np.concatenate(observation, 0), action, reward, np.concatenate(next_observation, 0), done, indices, weights
def update_priorities(self, indices, priorities):
for idx, priority in zip(indices, priorities):
self.priorities[idx] = priority
def __len__(self):
return len(self.memory)
class DuelingDDQN(nn.Module):
def __init__(self, observation_dim, action_dim):
super(DuelingDDQN, self).__init__()
self.observation_dim = observation_dim
self.action_dim = action_dim
self.fc = nn.Linear(self.observation_dim, 128)
self.adv_fc1 = nn.Linear(128, 128)
self.adv_fc2 = nn.Linear(128, self.action_dim)
self.value_fc1 = nn.Linear(128, 128)
self.value_fc2 = nn.Linear(128, 1)
def forward(self, observation):
feature = self.fc(observation)
advantage = self.adv_fc2(F.relu(self.adv_fc1(F.relu(feature))))
value = self.value_fc2(F.relu(self.value_fc1(F.relu(feature))))
return advantage + value - advantage.mean()
def act(self, observation, epsilon):
if random.random() > epsilon:
q_value = self.forward(observation)
action = q_value.max(1)[1].data[0].item()
else:
action = random.choice(list(range(self.action_dim)))
return action
def __call__(self, *args, **kwargs) -> Any:
""" Necessary to remove linting problem in class above: https://github.com/pytorch/pytorch/issues/24326 """
return super().__call__(*args, **kwargs)
|
422334
|
import torch
from torch import nn
from torch.nn import functional as F
class LWSLinear(nn.Linear):
__constants__ = ['bias', 'in_features', 'out_features']
def __init__(self, in_features, out_features, bias=True):
super(nn.Linear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.Tensor(out_features, in_features))
if bias:
self.bias = nn.Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.weight_scaler = nn.Parameter(torch.Tensor(out_features, 1))
self.reset_parameters()
def reset_parameters(self):
super().reset_parameters()
nn.init.constant_(self.weight_scaler, 1.0)
def forward(self, input):
return F.linear(input, self.weight * self.weight_scaler, self.bias)
|
422347
|
import wx
import wx.grid as gridlib
import Utils
from HighPrecisionTimeEdit import HighPrecisionTimeEdit
class HighPrecisionTimeEditor(gridlib.GridCellEditor):
Empty = '00:00:00.000'
def __init__(self):
self._tc = None
self.startValue = self.Empty
super().__init__()
def Create( self, parent, id=wx.ID_ANY, evtHandler=None ):
self._tc = HighPrecisionTimeEdit(parent, id, allow_none=False, style=wx.TE_PROCESS_ENTER)
self.SetControl( self._tc )
if evtHandler:
self._tc.PushEventHandler( evtHandler )
def SetSize( self, rect ):
self._tc.SetSize( int(rect.x), int(rect.y), int(rect.width+2), int(rect.height+2), wx.SIZE_ALLOW_MINUS_ONE )
def BeginEdit( self, row, col, grid ):
self.startValue = grid.GetTable().GetValue(row, col).strip()
v = self.startValue
v = Utils.SecondsToStr( Utils.StrToSeconds(v), full=True )
self._tc.SetValue( v )
self._tc.SetSelection(-1,-1)
wx.CallAfter( self._tc.SetFocus )
def EndEdit( self, row, col, grid, value = None ):
changed = False
v = self._tc.GetValue()
v = Utils.SecondsToStr( Utils.StrToSeconds(v), full=True )
if v != self.startValue:
if v == self.Empty:
v = ''
else:
v = Utils.SecondsToStr( Utils.StrToSeconds(v), full=False )
changed = True
grid.GetTable().SetValue( row, col, v )
self.startValue = self.Empty
self._tc.SetValue( self.startValue )
def Reset( self ):
self._tc.SetValue( self.startValue )
def Clone( self ):
return HighPrecisionTimeEditor()
|
422353
|
import os
from pathlib import Path
output_dir = Path("outputs")
if not output_dir.exists():
print('output_dir does not exist.'.format(output_dir))
exit(-1)
eval_folder = output_dir.joinpath('eval')
if not eval_folder.exists():
eval_folder.mkdir(parents=True, exist_ok=True)
testlist = "./lists/dtu/test.txt"
with open(testlist) as f:
scans = f.readlines()
scans = [line.rstrip() for line in scans]
for scan in scans:
scan_folder = output_dir.joinpath(scan).joinpath('points_mvsnet')
# print(os.listdir(str(scan_folder)))
consis_folders = [f for f in os.listdir(str(scan_folder)) if f.startswith('consistencyCheck-')]
consis_folders.sort()
# print(consis_folders)
consis_folder = consis_folders[-1]
source_ply = scan_folder.joinpath(consis_folder).joinpath('final3d_model.ply')
scan_idx = int(scan[4:])
target_ply = eval_folder.joinpath('mvsnet{:03d}_l3.ply'.format(scan_idx))
# cmd = 'cp ' + str(source_ply) + ' ' + str(target_ply)
cmd = 'mv ' + str(source_ply) + ' ' + str(target_ply)
print(cmd)
os.system(cmd)
|
422395
|
import json
from django.forms.widgets import Widget
from jarbas.dashboard.admin.subquotas import Subquotas
class ReceiptUrlWidget(Widget):
def render(self, name, value, attrs=None, renderer=None):
if not value:
return ''
url = '<div class="readonly"><a href="{}" target="_blank">{}</a></div>'
return url.format(value, value)
class SubquotaWidget(Widget, Subquotas):
def render(self, name, value, attrs=None, renderer=None):
value = self.pt_br(value) or value
return '<div class="readonly">{}</div>'.format(value)
class SuspiciousWidget(Widget):
SUSPICIONS = (
'meal_price_outlier',
'over_monthly_subquota_limit',
'suspicious_traveled_speed_day',
'invalid_cnpj_cpf',
'election_expenses',
'irregular_companies_classifier'
)
HUMAN_NAMES = (
'Preço de refeição muito incomum',
'Extrapolou limita da (sub)quota',
'Muitas despesas em diferentes cidades no mesmo dia',
'CPF ou CNPJ inválidos',
'Gasto com campanha eleitoral',
'CNPJ irregular'
)
MAP = dict(zip(SUSPICIONS, HUMAN_NAMES))
def render(self, name, value, attrs=None, renderer=None):
value_as_dict = json.loads(value)
if not value_as_dict:
return ''
values = (self.MAP.get(k, k) for k in value_as_dict.keys())
suspicions = '<br>'.join(values)
return '<div class="readonly">{}</div>'.format(suspicions)
|
422403
|
from pwn import *
context.arch = 'amd64'
context.terminal = ['tmux', 'splitw', '-h']
p = process("./simplerop")
gdb.attach(p, '')
binary = ELF("./simplerop")
rop = ROP(binary)
binsh = 0x402008
system = 0x4011df
rop.call(system, [binsh])
print(rop.dump())
p.sendline(b'A' * 8 + rop.chain())
p.interactive()
|
422425
|
import os
import subprocess
import sys
from detail.android_studio_build import android_studio_build
from detail.download_unzip import download_unzip
from detail.polly_build import polly_build
ci_type = os.getenv('TYPE')
ci_type_expected = 'Expected values:\n* polly\n* android-studio'
if ci_type == None:
sys.exit('TYPE is empty.\n{}'.format(ci_type_expected))
if os.getenv('APPVEYOR') == 'True':
download_unzip(
'https://github.com/ruslo/polly/archive/master.zip', '_polly'
)
cwd = os.getcwd()
polly_bin = os.path.join(cwd, '_polly', 'polly-master', 'bin')
# Install dependencies (CMake, Ninja)
ci_deps_script = os.path.join(polly_bin, 'install-ci-dependencies.py')
subprocess.check_call([sys.executable, ci_deps_script])
# Tune locations
cmake_bin = os.path.join(cwd, '_ci', 'cmake', 'bin')
os.environ['PATH'] = "{};{}".format(cmake_bin, os.getenv('PATH'))
if ci_type == 'polly':
polly_build()
elif ci_type == 'android-studio':
android_studio_build()
else:
sys.exit('Unknown TYPE value: "{}".\n{}'.format(ci_type, ci_type_expected))
|
422470
|
import os
from floyd.constants import DEFAULT_FLOYD_IGNORE_LIST
from floyd.log import logger as floyd_logger
class FloydIgnoreManager(object):
"""
Manages .floydignore file in the current directory
"""
CONFIG_FILE_PATH = os.path.join(os.getcwd() + "/.floydignore")
@classmethod
def init(cls):
if os.path.isfile(cls.CONFIG_FILE_PATH):
floyd_logger.debug("floyd ignore file already present at %s",
cls.CONFIG_FILE_PATH)
return
floyd_logger.debug("Setting default floyd ignore in the file %s",
cls.CONFIG_FILE_PATH)
with open(cls.CONFIG_FILE_PATH, "w") as config_file:
config_file.write(DEFAULT_FLOYD_IGNORE_LIST)
@classmethod
def get_lists(cls, config_file_path=None):
# Remove a preceding '/'. The glob matcher we use will interpret a
# pattern starging with a '/' as an absolute path, so we remove the
# '/'. For details on the glob matcher, see:
# https://docs.python.org/3/library/pathlib.html#pathlib.PurePath.match
def trim_slash_prefix(path):
if path.startswith('/'):
return line[1:]
return line
config_file_path = config_file_path or cls.CONFIG_FILE_PATH
if not os.path.isfile(config_file_path):
return ([], [])
ignore_list = []
whitelist = []
with open(config_file_path, "r") as floyd_ignore_file:
for line in floyd_ignore_file:
line = line.strip()
if not line or line.startswith('#'):
continue
if line.startswith('!'):
line = line[1:]
whitelist.append(trim_slash_prefix(line))
continue
# To allow escaping file names that start with !, #, or \,
# remove the escaping \
if line.startswith('\\'):
line = line[1:]
ignore_list.append(trim_slash_prefix(line))
return (ignore_list, whitelist)
|
422479
|
from __future__ import absolute_import, print_function
from llvmlite import ir
from llvmlite.ir.transforms import Visitor, CallVisitor
class FastFloatBinOpVisitor(Visitor):
"""
A pass to add fastmath flag to float-binop instruction if they don't have
any flags.
"""
float_binops = frozenset(['fadd', 'fsub', 'fmul', 'fdiv', 'frem', 'fcmp'])
def __init__(self, flags):
self.flags = flags
def visit_Instruction(self, instr):
if instr.opname in self.float_binops:
if not instr.flags:
for flag in self.flags:
instr.flags.append(flag)
class FastFloatCallVisitor(CallVisitor):
"""
A pass to change all float function calls to use fastmath.
"""
def __init__(self, flags):
self.flags = flags
def visit_Call(self, instr):
# Add to any call that has float/double return type
if instr.type in (ir.FloatType(), ir.DoubleType()):
for flag in self.flags:
instr.fastmath.add(flag)
def rewrite_module(mod, options):
"""
Rewrite the given LLVM module to use fastmath everywhere.
"""
flags = options.flags
FastFloatBinOpVisitor(flags).visit(mod)
FastFloatCallVisitor(flags).visit(mod)
|
422493
|
import tensorflow as tf
import tensorflow.contrib.layers as layers
import numpy as np
import data_util
from model_components import task_specific_attention, bidirectional_rnn
class HANClassifierModel():
""" Implementation of document classification model described in
`Hierarchical Attention Networks for Document Classification (Yang et al.,
2016)`
(https://www.cs.cmu.edu/~diyiy/docs/naacl16.pdf)
"""
def __init__(self,
vocab_size,
embedding_size,
classes,
fw_word_cell,
bw_word_cell,
fw_sentence_cell,
bw_sentence_cell,
word_output_size,
sentence_output_size,
max_grad_norm,
dropout_keep_proba,
is_training=None,
learning_rate=1e-4,
device='/cpu:0',
scope=None):
self.vocab_size = vocab_size
self.embedding_size = embedding_size
self.classes = classes
self.fw_word_cell = fw_word_cell
self.bw_word_cell = bw_word_cell
self.word_output_size = word_output_size
self.fw_sentence_cell = fw_sentence_cell
self.bw_sentence_cell = bw_sentence_cell
self.sentence_output_size = sentence_output_size
self.max_grad_norm = max_grad_norm
self.dropout_keep_proba = dropout_keep_proba
with tf.variable_scope(scope or 'tcm') as scope:
self.global_step = tf.Variable(0, name='global_step', trainable=False)
if is_training is not None:
self.is_training = is_training
else:
self.is_training = tf.placeholder(dtype=tf.bool, name='is_training')
self.sample_weights = tf.placeholder(
shape=(None,), dtype=tf.float32, name='sample_weights')
# [document x sentence x word]
self.inputs = tf.placeholder(
shape=(None, None, None), dtype=tf.int32, name='inputs')
# [document x sentence]
self.word_lengths = tf.placeholder(
shape=(None, None), dtype=tf.int32, name='word_lengths')
# [document]
self.sentence_lengths = tf.placeholder(
shape=(None,), dtype=tf.int32, name='sentence_lengths')
# [document]
self.labels = tf.placeholder(shape=(None,), dtype=tf.int32, name='labels')
(self.document_size, self.sentence_size, self.word_size) = tf.unstack(
tf.shape(self.inputs))
self._init_embedding(scope)
# embeddings cannot be placed on GPU
with tf.device(device):
self._init_body(scope)
with tf.variable_scope('train'):
self.cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=self.labels, logits=self.logits)
self.loss = tf.reduce_mean(
tf.multiply(self.cross_entropy, self.sample_weights))
tf.summary.scalar('loss', self.loss)
self.accuracy = tf.reduce_mean(
tf.cast(tf.nn.in_top_k(self.logits, self.labels, 1), tf.float32))
tf.summary.scalar('accuracy', self.accuracy)
tvars = tf.trainable_variables()
grads, global_norm = tf.clip_by_global_norm(
tf.gradients(self.loss, tvars), self.max_grad_norm)
tf.summary.scalar('global_grad_norm', global_norm)
opt = tf.train.AdamOptimizer(learning_rate)
self.train_op = opt.apply_gradients(
zip(grads, tvars), name='train_op', global_step=self.global_step)
self.summary_op = tf.summary.merge_all()
def _init_embedding(self, scope):
with tf.variable_scope(scope):
with tf.variable_scope('embedding') as scope:
self.embedding_matrix = tf.get_variable(
name='embedding_matrix',
shape=[self.vocab_size, self.embedding_size],
initializer=layers.xavier_initializer(),
dtype=tf.float32)
self.inputs_embedded = tf.nn.embedding_lookup(self.embedding_matrix,
self.inputs)
def _init_body(self, scope):
with tf.variable_scope(scope):
word_level_inputs = tf.reshape(self.inputs_embedded, [
self.document_size * self.sentence_size, self.word_size,
self.embedding_size
])
word_level_lengths = tf.reshape(self.word_lengths,
[self.document_size * self.sentence_size])
with tf.variable_scope('word') as scope:
word_encoder_output, _ = bidirectional_rnn(
self.fw_word_cell,
self.bw_word_cell,
word_level_inputs,
word_level_lengths,
scope=scope)
with tf.variable_scope('attention') as scope:
word_level_output = task_specific_attention(
word_encoder_output, self.word_output_size, scope=scope)
with tf.variable_scope('dropout'):
word_level_output = layers.dropout(
word_level_output,
keep_prob=self.dropout_keep_proba,
is_training=self.is_training,
)
# sentence_level
sentence_inputs = tf.reshape(
word_level_output,
[self.document_size, self.sentence_size, self.word_output_size])
with tf.variable_scope('sentence') as scope:
sentence_encoder_output, _ = bidirectional_rnn(
self.fw_sentence_cell,
self.bw_sentence_cell,
sentence_inputs,
self.sentence_lengths,
scope=scope)
with tf.variable_scope('attention') as scope:
sentence_level_output = task_specific_attention(
sentence_encoder_output, self.sentence_output_size, scope=scope)
with tf.variable_scope('dropout'):
sentence_level_output = layers.dropout(
sentence_level_output,
keep_prob=self.dropout_keep_proba,
is_training=self.is_training,
)
with tf.variable_scope('classifier'):
self.logits = layers.fully_connected(
sentence_level_output, self.classes, activation_fn=None)
self.prediction = tf.argmax(self.logits, axis=-1)
def get_feed_data(self, x, y=None, class_weights=None, is_training=True):
x_m, doc_sizes, sent_sizes = data_util.batch(x)
fd = {
self.inputs: x_m,
self.sentence_lengths: doc_sizes,
self.word_lengths: sent_sizes,
}
if y is not None:
fd[self.labels] = y
if class_weights is not None:
fd[self.sample_weights] = [class_weights[yy] for yy in y]
else:
fd[self.sample_weights] = np.ones(shape=[len(x_m)], dtype=np.float32)
fd[self.is_training] = is_training
return fd
if __name__ == '__main__':
try:
from tensorflow.contrib.rnn import LSTMCell, LSTMStateTuple, GRUCell
except ImportError:
LSTMCell = tf.nn.rnn_cell.LSTMCell
LSTMStateTuple = tf.nn.rnn_cell.LSTMStateTuple
GRUCell = tf.nn.rnn_cell.GRUCell
tf.reset_default_graph()
with tf.Session() as session:
model = HANClassifierModel(
vocab_size=10,
embedding_size=5,
classes=2,
fw_word_cell=GRUCell(10),
bw_word_cell=GRUCell(10),
fw_sentence_cell=GRUCell(10),
bw_sentence_cell=GRUCell(10),
word_output_size=10,
sentence_output_size=10,
max_grad_norm=5.0,
dropout_keep_proba=0.5,
)
session.run(tf.global_variables_initializer())
fd = {
model.is_training: False,
model.inputs: [[[5, 4, 1, 0], [3, 3, 6, 7], [6, 7, 0, 0]],
[[2, 2, 1, 0], [3, 3, 6, 7], [0, 0, 0, 0]]],
model.word_lengths: [
[3, 4, 2],
[3, 4, 0],
],
model.sentence_lengths: [3, 2],
model.labels: [0, 1],
}
print(session.run(model.logits, fd))
session.run(model.train_op, fd)
|
422494
|
from snowflake.ingest import SimpleIngestManager
from snowflake.ingest import StagedFile
import time
import os
def test_simple_ingest(connection_ctx, test_util):
param = connection_ctx['param']
pipe_name = '{}.{}.TEST_SIMPLE_INGEST_PIPE'.format(
param['database'],
param['schema'])
private_key = test_util.read_private_key()
cur = connection_ctx['cnx'].cursor()
test_file = os.path.join(test_util.get_data_dir(), 'test_file.csv')
cur.execute('create or replace table TEST_SIMPLE_INGEST_TABLE(c1 number, c2 string)')
cur.execute('create or replace stage TEST_SIMPLE_INGEST_STAGE')
cur.execute('put file://{} @TEST_SIMPLE_INGEST_STAGE'.format(test_file))
cur.execute('create or replace pipe {0} as copy into TEST_SIMPLE_INGEST_TABLE '
'from @TEST_SIMPLE_INGEST_STAGE'.format(pipe_name))
ingest_manager = SimpleIngestManager(account=param['account'],
user=param['user'],
private_key=private_key,
pipe=pipe_name,
scheme=param['protocol'],
host=param['host'],
port=param['port'])
staged_files = [StagedFile('test_file.csv.gz', None)]
resp = ingest_manager.ingest_files(staged_files)
assert resp['responseCode'] == 'SUCCESS'
start_polling_time = time.time()
while time.time() - start_polling_time < 120:
history_resp = ingest_manager.get_history()
if len(history_resp['files']) == 1:
assert history_resp['files'][0]['path'] == 'test_file.csv.gz'
return
else:
# wait for 20 seconds
time.sleep(20)
assert False
|
422498
|
import pytest
from route_distances.validation import validate_dict
@pytest.mark.parametrize(
"route_index",
[0, 1, 2],
)
def test_validate_example_trees(load_reaction_tree, route_index):
validate_dict(load_reaction_tree("example_routes.json", route_index))
def test_validate_only_mols():
dict_ = {
"smiles": "CCC",
"type": "mol",
"children": [{"smiles": "CCC", "type": "mol"}],
}
with pytest.raises(ValueError, match="string does not match regex"):
validate_dict(dict_)
|
422507
|
import ast
from .fat_tools import (OptimizerStep, ReplaceVariable, FindNodes, NodeTransformer,
compact_dump, copy_lineno, copy_node,
ITERABLE_TYPES)
import inline, copy, astunparse
CANNOT_UNROLL = (ast.Break, ast.Continue, ast.Raise)
#class UnrollStep(OptimizerStep, ast.NodeTransformer):
class UnrollStep(ast.NodeTransformer):
def _visit_For(self, node):
try:
if len(node.iter.args) == 1:
# Ex: for i in range(6)
#num_iter = self.eval_args_helper(node.iter.args[0])
lst_iters = list(range(self.eval_args_helper(node.iter.args[0])))
elif len(node.iter.args) == 2:
# Start and an end
# Ex: for i in range(6, 8)
#num_iter = self.eval_args_helper(node.iter.args[1]) - self.eval_args_helper(node.iter.args[0])
lst_iters = list(range(self.eval_args_helper(node.iter.args[0]), self.eval_args_helper(node.iter.args[1])))
else:
# Start, end and a step
# Ex: for i in range(6, 12, 2)
#num_iter = (self.eval_args_helper(node.iter.args[1]) - self.eval_args_helper(node.iter.args[0])) / self.eval_args_helper(node.iter.args[2])
lst_iters = list(range(self.eval_args_helper(node.iter.args[0]), self.eval_args_helper(node.iter.args[1]), self.eval_args_helper(node.iter.args[2])))
except Exception as e:
print "FOR_UNROLL EXCEPTION", e
print node.iter.args[0].id
return node
print "FOR_UNROLL lst iters: ", lst_iters
name = node.target.id
body = node.body
# replace 'for i in (1, 2, 3): body' with...
new_node = []
#for value in node.iter.value:
for value in lst_iters:
#value_ast = _new_constant(node.iter, value) #self._new_constant(node.iter, value)
#print "Value ast: ", value_ast
value_ast = ast.Num(n=value)
# 'i = 1'
name_ast = ast.Name(id=name, ctx=ast.Store())
#copy_lineno(node, name_ast)
assign = ast.Assign(targets=[name_ast],
value=value_ast)
#copy_lineno(node, assign)
new_node.append(assign)
# duplicate 'body'
for item in body:
if isinstance(item, ast.For):
new_node.extend(self.visit(item))
else:
new_node.append(item)
#new_node.extend(body)
if node.orelse:
new_node.extend(node.orelse)
new_node = [copy.deepcopy(ele) for ele in new_node]
return new_node
def visit_For(self, node):
print "Loop unroll: loop var name", node.target.id
self.generic_visit(node)
copy_node = copy.deepcopy(node)
new_node = self._visit_For(copy_node)
if new_node is None:
return copy_node
# loop was unrolled: run again the optimize on the new nodes
#return self.visit_node_list(new_node)
return new_node
def eval_args_helper(self, node):
if hasattr(node, 'n'):
return node.n
else:
left_val = self.eval_args_helper(node.left)
right_val = self.eval_args_helper(node.right)
res = operators[type(node.op)](left_val, right_val)
return res
class UnrollListComp:
def unroll_comprehension(self, node):
if not self.config.unroll_loops:
return
# FIXME: support multiple generators
# [i for i in range(3) for y in range(3)]
if len(node.generators) > 1:
return
generator = node.generators[0]
if not isinstance(generator, ast.comprehension):
return
# FIXME: support if
if generator.ifs:
return
if not isinstance(generator.target, ast.Name):
return
target = generator.target.id
if not isinstance(generator.iter, ast.Constant):
return
iter_value = generator.iter.value
if not isinstance(iter_value, ITERABLE_TYPES):
return
if not(1 <= len(iter_value) <= self.config.unroll_loops):
return
if isinstance(node, ast.DictComp):
keys = []
values = []
for value in iter_value:
ast_value = self.new_constant(node, value)
if ast_value is None:
return
replace = ReplaceVariable(self.filename, {target: ast_value})
key = replace.visit(node.key)
keys.append(key)
value = replace.visit(node.value)
values.append(value)
new_node = ast.Dict(keys=keys, values=values, ctx=ast.Load())
else:
items = []
for value in iter_value:
ast_value = self.new_constant(node, value)
if ast_value is None:
return
replace = ReplaceVariable(self.filename, {target: ast_value})
item = replace.visit(node.elt)
items.append(item)
# FIXME: move below?
if isinstance(node, ast.SetComp):
new_node = ast.Set(elts=items, ctx=ast.Load())
else:
assert isinstance(node, ast.ListComp)
new_node = ast.List(elts=items, ctx=ast.Load())
copy_lineno(node, new_node)
return new_node
|
422555
|
import pandas as pd
import matplotlib.pylab as plt
df_calculated = pd.read_csv('../logs/output.log', sep=' ', names=['ts', 'x', 'y', 'r'])
df_benchmark = pd.read_csv('../logs/benchmarks.log', sep=' ', names=['x', 'y', 'ts', 'ori', 'subloc'])
plt.gca().set_aspect('equal')
plt.plot(df_calculated.x, df_calculated.y, '.-', label='calculated positions')
plt.plot(df_benchmark.x, df_benchmark.y, '-', label='reference trace')
plt.legend(bbox_to_anchor=(1.0, -0.25))
plt.show()
|
422578
|
from nose.tools import assert_equals, assert_false, assert_true
import json
import imp
imp.load_source("check_user","check_user_py3.py")
from check_user import User
def test_check_user_positive():
chkusr = User("root")
success, ret_msg = chkusr.check_if_user_exists()
assert_true(success)
assert_equals('User root exists', ret_msg)
def test_check_user_negative():
chkusr = User("this_user_does_not_exists")
success, ret_msg = chkusr.check_if_user_exists()
assert_false(success)
assert_equals('User this_user_does_not_exists does not exists', ret_msg)
|
422618
|
import kerastuner
import tensorflow as tf
from codecarbon import EmissionsTracker
class RandomSearchTuner(kerastuner.tuners.RandomSearch):
def run_trial(self, trial, *args, **kwargs):
# You can add additional HyperParameters for preprocessing and custom training loops
# via overriding `run_trial`
kwargs["batch_size"] = trial.hyperparameters.Int("batch_size", 32, 256, step=32)
super(RandomSearchTuner, self).run_trial(trial, *args, **kwargs)
def build_model(hp):
model = tf.keras.models.Sequential(
[
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation="relu"),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10),
]
)
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
model.compile(optimizer="adam", loss=loss_fn, metrics=["accuracy"])
return model
def main():
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
tuner = RandomSearchTuner(
build_model,
objective="val_accuracy",
directory="random_search_results",
project_name="codecarbon",
max_trials=3,
)
tracker = EmissionsTracker(project_name="mnist_random_search")
tracker.start()
tuner.search(x_train, y_train, epochs=10, validation_data=(x_test, y_test))
emissions = tracker.stop()
print(f"Emissions : {emissions} kg CO₂")
if __name__ == "__main__":
main()
|
422628
|
from .Source import Source
from inspect import ismethod
from types import FunctionType
from ..util.Math import interp1d
from ..util.SetDefaultParameterValues import SourceParameters
try:
# this runs with no issues in python 2 but raises error in python 3
basestring
except:
# this try/except allows for python 2/3 compatible string type checking
basestring = str
class UserDefined(Source):
def __init__(self, **kwargs):
"""
Parameters
----------
pf: dict
Full parameter file.
"""
self.pf = SourceParameters()
self.pf.update(kwargs)
Source.__init__(self)
self._name = 'user_defined'
self._load()
def _load(self):
sed = self.pf['source_sed']
E = self.pf['source_E']
L = self.pf['source_L']
if sed is not None:
if sed == 'user':
pass
elif type(sed) is FunctionType or ismethod(sed) or \
isinstance(sed, interp1d):
self._func = sed
return
elif type(sed) is tuple:
E, L = sed
elif isinstance(sed, basestring):
E, L = np.loadtxt(sed, unpack=True)
elif (E is not None) and (L is not None):
assert len(E) == len(L)
else:
raise NotImplemented('sorry, dont understand!')
self._func = interp1d(E, L, kind='cubic', bounds_error=False)
def _Intensity(self, E, t=0):
return self._func(E)
|
422671
|
from __future__ import division
import sys
from statistics import mean
from sklearn.ensemble import BaggingClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import GridSearchCV
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from tools.classifier.evaluate_hter import evaluate_with_values
# from classifier.evaluate_hter import evaluate_predictions, evaluate_with_values
sys.path.append('/Users/rodrigobresan/Documents/dev/github/anti_spoofing/spoopy')
import os
from pandas import *
# from tools.file_utils import file_helper
ARTIFACT_CLEAN_RESULTS_NAME = "results_prediction_names_clean_all.txt"
ARTIFACT_DIRT_RESULTS_NAME = "results_prediction_names_dirt_all.txt"
ARTIFACT_CLEAN_RESULTS_NAME_INCEPTION = "results_prediction_names_clean_mean_inception.txt"
ARTIFACT_DIRT_RESULTS_NAME_INCEPTION = "results_prediction_names_dirt_mean_inception.txt"
TESTING_ALL_FRAMES = True
def generate_results(base_path, output_path, is_inception, perform_clean=False):
try:
if is_inception:
names, y_pred_proba, y_pred, y_test = load_file_info_inception(base_path)
else:
names, y_pred_proba, y_pred, y_test = load_file_info_target(base_path)
dict_results = extract_results(names, y_pred_proba, y_pred, y_test)
if perform_clean:
file_name = generate_predictions_names_cleaned(output_path, dict_results, is_inception)
else:
file_name = generate_predictions_names_dirt(output_path, dict_results, is_inception)
except Exception as e:
print(e)
return file_name, dict_results
def get_metrics(count_fake, count_real, fa, fr):
bpcer = fr / count_real
apcer = fa / count_fake
hter = (apcer + bpcer) / 2
if hter == 0:
print('woah')
return hter, apcer, bpcer
def generate_predictions_names_cleaned(output_path, dict_results, is_inception):
if is_inception:
file_name = os.path.join(output_path, ARTIFACT_CLEAN_RESULTS_NAME_INCEPTION)
else:
file_name = os.path.join(output_path, ARTIFACT_CLEAN_RESULTS_NAME)
file = open(file_name, "w")
empty_probas = 0
print('total results: ', len(dict_results))
for result in dict_results:
try:
y_pred = dict_results[result][2]
if TESTING_ALL_FRAMES:
for result_proba in dict_results[result][0]:
line = result + "," + str(result_proba)
file.write(str(line) + "\n")
else:
right_pred = 0
right_probas = []
for i, pred in enumerate(y_pred):
ground_truth = dict_results[result][1][i]
if ground_truth == pred:
proba = dict_results[result][0][i]
right_probas.append(proba)
if len(right_probas) == 0:
empty_probas = empty_probas + 1
mean_right_probas = 0
else:
mean_right_probas = mean(right_probas)
line = result + "," + str(mean_right_probas)
file.write(str(line) + "\n")
except Exception as e:
print(e)
print('empty: ', empty_probas)
file.close()
return file_name
def generate_predictions_names_dirt(path, dict_results, is_inception):
if is_inception:
file_name = os.path.join(path, ARTIFACT_DIRT_RESULTS_NAME_INCEPTION)
else:
file_name = os.path.join(path, ARTIFACT_DIRT_RESULTS_NAME)
file = open(file_name, "w")
print(file_name)
print('total results: ', len(dict_results))
for result in dict_results:
try:
if TESTING_ALL_FRAMES:
for result_proba in dict_results[result][0]:
line = result + "," + str(result_proba)
file.write(str(line) + "\n")
else:
mean_right_probas = mean(dict_results[result][0])
line = result + "," + str(mean_right_probas)
file.write(str(line) + "\n")
except Exception as e:
print(e)
file.close()
return file_name
def extract_results(names, y_pred_proba, y_pred, y_test):
dict_results = {}
for i, prediction_proba in enumerate(y_pred_proba[:, 1]):
current_id = names[i]
if current_id not in dict_results: # condition for initializing
dict_results[current_id] = []
dict_results[current_id].append([]) # prediction proba
dict_results[current_id].append([]) # real
dict_results[current_id].append([]) # prediction
dict_results[current_id][0].append(prediction_proba)
dict_results[current_id][1].append(y_test[i])
dict_results[current_id][2].append(y_pred[i])
return dict_results
def load_file_info_target(path):
file = open(os.path.join(path, "names_test.txt"), "r")
lines = file.readlines()
names = pandas.io.json.loads(lines[0])
y_test = np.load(os.path.join(path, 'y_test.npy'))
y_pred_proba = np.load(os.path.join(path, 'y_pred_proba.npy'))
y_pred = np.load(os.path.join(path, 'y_pred.npy'))
return names, y_pred_proba, y_pred, y_test
def load_file_info_inception(path):
file = open(os.path.join(path, "names_test_inception.txt"), "r")
lines = file.readlines()
names = pandas.io.json.loads(lines[0])
y_test = np.load(os.path.join(path, 'y_test_inception.npy'))
y_pred_proba = np.load(os.path.join(path, 'y_pred_proba_inception.npy'))
y_pred = np.load(os.path.join(path, 'y_pred_inception.npy'))
return names, y_pred_proba, y_pred, y_test
def list_files(path):
return [f for f in os.listdir(path) if not f.startswith('.')]
def generate_results_intra(features_path, base_output_path):
datasets_origin = list_files(features_path)
for dataset_origin in datasets_origin:
path_origin = os.path.join(features_path, dataset_origin)
attack_types = list_files(path_origin)
for attack_type in attack_types:
path_target = os.path.join(features_path, dataset_origin, attack_type)
properties = list_files(path_target)
for property in properties:
target = os.path.join(features_path, dataset_origin, attack_type, property, "features", "resnet")
try:
output_path = os.path.join(base_output_path, dataset_origin, attack_type, property)
os.makedirs(output_path, exist_ok=True)
generate_results(target, output_path, True, perform_clean=False)
generate_results(target, output_path, True, perform_clean=True)
generate_results(target, output_path, False, perform_clean=False)
generate_results(target, output_path, False, perform_clean=True)
except Exception as e:
print(e)
# result_prediction_names.txt
# nome_video, 0.1
# result_prediction_names.txt
# nome_video_frame_1, 0.1
def evaluate_combinated_all(base_probas):
results = [
['Origin', 'Attack', 'HTER', 'APCER', 'BPCER']
]
datasets = list_files(base_probas)
print('datasets: ', datasets)
# evaluate_results('ra', 'cbsr')
for dataset_origin in datasets:
dataset_path = os.path.join(base_probas, dataset_origin)
attack_types = os.listdir(dataset_path)
for attack in attack_types:
print('===============Train: %s Attack: %s=============' % (dataset_origin, attack))
try:
hter, apcer, bpcer = evaluate_results(base_probas, dataset_origin, attack)
row = [dataset_origin, attack, hter, apcer, bpcer]
results.append(row)
except Exception as e:
print(e)
df = DataFrame(results)
print(df)
df.to_csv('results_hter_combinations_intra.csv', sep=' ')
def evaluate_results(path_probas, origin, attack):
train_depth = path_probas + origin + '/' + attack + '/depth/' + ARTIFACT_DIRT_RESULTS_NAME_INCEPTION
train_illumination = path_probas + origin + '/' + attack + '/illumination/' + ARTIFACT_DIRT_RESULTS_NAME_INCEPTION
train_saliency = path_probas + origin + '/' + attack + '/saliency/' + ARTIFACT_DIRT_RESULTS_NAME_INCEPTION
test_depth = path_probas + origin + '/' + attack + '/depth/' + ARTIFACT_DIRT_RESULTS_NAME
test_illumination = path_probas + origin + '/' + attack + '/illumination/' + ARTIFACT_DIRT_RESULTS_NAME
test_saliency = path_probas + origin + '/' + attack + '/saliency/' + ARTIFACT_DIRT_RESULTS_NAME
X_train, y_train, names_train = get_item_data(train_depth, train_illumination, train_saliency)
X_test, y_test, names_test = get_item_data(test_depth, test_illumination, test_saliency)
#
# from matplotlib import pyplot
#
# pyplot.plot(X_train, y_train)
# pyplot.plot(X_test, y_test)
# pyplot.show()
# print('Running with SVC')
top_model = OneVsRestClassifier(BaggingClassifier(SVC(verbose=False), n_jobs=-1))
top_model.fit(X_train, y_train)
y_pred = top_model.predict(X_test)
acc = accuracy_score(y_test, y_pred)
print('acc: ', acc)
# print('Running with RBF kernel')
param_grid = [{'kernel': ['rbf'], 'gamma': [1e-4, 1e-3], 'C': [1, 10, 100, 1000, 10000]}]
grid_model = GridSearchCV(SVC(), param_grid, verbose=False, n_jobs=3)
grid_model.fit(X_train, y_train)
y_pred = grid_model.predict(X_test)
acc = accuracy_score(y_test, y_pred)
print('acc grid search: ', acc)
hter, apcer, bpcer = evaluate_with_values(y_pred, y_test, names_test)
print('%.4f %.4f %.4f' % (hter, apcer, bpcer))
return hter, apcer, bpcer
#
# model = XGBClassifier()
# model.fit(X_train, y_train)
# # make predictions for test data
# y_pred = model.predict(X_test)
# predictions = [round(value) for value in y_pred]
# # evaluate predictions
# accuracy = accuracy_score(y_test, predictions)
# print("Accuracy XGBoost: %.2f%%" % (accuracy * 100.0))
# output_path = '/Users/rodrigobresan/Documents/dev/github/anti_spoofing/spoopy/spoopy/tools/classifier_probas'
#
# y_test_path = os.path.join(output_path, 'y_test.npy')
# y_pred_path = os.path.join(output_path, 'y_pred.npy')
# names_path = os.path.join(output_path, 'names_test.txt')
#
# np.save(y_test_path, y_test)
# np.save(y_pred_path, y_pred)
# file = open(names_path, "w")
# file.write(str(json.dumps(test_names)) + "\n")
# file.close()
def dict_to_nparray(dict):
for i, result in enumerate(dict):
print(result)
def get_item_data(path_dept, path_illumination, path_saliency):
with open(path_dept) as f:
depth_results = f.readlines()
with open(path_illumination) as f:
illumination_results = f.readlines()
with open(path_saliency) as f:
saliency_results = f.readlines()
dict_results = {}
item_names = []
list_sizes = [len(depth_results), len(illumination_results), len(saliency_results)]
min_size = min(list_sizes)
middle = min_size // 2
margin = 200
step = 10
print()
for i, result in enumerate(depth_results[0:min_size]):
current_item = depth_results[i].split(',')[0]
item_names.append(current_item)
if current_item not in dict_results:
dict_results[current_item] = []
dict_results[current_item].append([]) # depth
dict_results[current_item].append([]) # illumination
dict_results[current_item].append([]) # saliency
dict_results[current_item].append([]) # ground truth
try:
dict_results[current_item][0].append(clean(depth_results[i]))
dict_results[current_item][1].append(clean(illumination_results[i]))
dict_results[current_item][2].append(clean(saliency_results[i]))
dict_results[current_item][3].append(name_to_int(current_item))
except Exception as e:
print(e)
np_results = None
for key, value in dict_results.items():
if np_results is None:
np_results = np.array(value)
else:
np_single = np.array(value)
np_results = np.hstack([np_results, np_single])
np_results = np.transpose(np_results)
x = np_results[:, :3]
y = np_results[:, -1]
return x, y, item_names
def name_to_int(item_name):
item = item_name.split('/')[0]
if item == 'fake':
return 0
else:
return 1
def clean(content):
return float(content.split(',')[1].replace('\n', ''))
if __name__ == '__main__':
features_path = '/codes/bresan/remote/spoopy/spoopy/data/6_features'
output_path = '/codes/bresan/remote/spoopy/spoopy/data/7_probas'
generate_results_intra(features_path, output_path)
# evaluate_combinated_all(output_path)
|
422702
|
import dataclasses
from typing import Any, Dict, Optional, Sequence, Union
from . import MemoryDataSource, RangeOutput
@dataclasses.dataclass
class DictMemoryDataSource(MemoryDataSource):
db: Dict[str, Any] = dataclasses.field(default_factory=dict)
async def set(self, key: str, data: str) -> None:
self.db[key] = data.encode()
async def delete(self, key: str) -> None:
self.db.pop(key, None)
async def expire(self, key: str, time: int) -> None:
...
async def exists(self, key: str) -> bool:
return key in self.db
async def zrange(
self,
key: str,
start: int = 0,
stop: int = -1,
withscores: bool = False,
) -> Optional[RangeOutput]:
data: Optional[RangeOutput] = self.db.get(key)
if data is None:
return None
return [i[0] for i in self.db[key]]
async def zadd(
self, key: str, score: float, member: str, *pairs: Union[float, str]
) -> None:
data = [score, member] + list(pairs)
self.db[key] = sorted(
[
(
data[i].encode() if isinstance(data[i], str) else data[i], # type: ignore
data[i - 1],
)
for i in range(1, len(data), 2)
],
key=lambda d: d[1],
)
async def hmset(
self,
key: str,
field: Union[str, bytes],
value: Union[str, bytes],
*pairs: Union[str, bytes],
) -> None:
data = [field, value] + list(pairs)
self.db[key] = {
f.encode()
if isinstance(f := data[i - 1], str) # noqa
else (f if isinstance(f, bytes) else str(f).encode()): v.encode()
if isinstance(v := data[i], str) # noqa
else (v if isinstance(v, bytes) else str(v).encode())
for i in range(1, len(data), 2)
}
async def hmget(
self, key: str, field: Union[str, bytes], *fields: Union[str, bytes]
) -> Sequence[Optional[bytes]]:
data: Dict[bytes, Any] = self.db.get(key, {})
return [
None
if (d := data.get(f.encode() if isinstance(f, str) else f)) # noqa
is None
else (
d
if isinstance(d, bytes)
else (d.encode() if isinstance(d, str) else str(d).encode())
)
for f in (field,) + fields
]
async def hgetall(self, key: str) -> Dict[bytes, bytes]:
return {
f: d.encode()
if isinstance(d, str)
else (
d
if isinstance(d, bytes)
else (d.encode() if isinstance(d, str) else str(d).encode())
)
for f, d in self.db.get(key, {}).items()
}
|
422740
|
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
# Helper function for returning deprocessing of decorrelated tensors
def get_decorrelation_layers(image_size=(224,224), input_mean=[1,1,1], device='cpu', decay_power=0.75, decorrelate=[True,None]):
mod_list = []
if decorrelate[0] == True:
spatial_mod = SpatialDecorrelationLayer(image_size, decay_power=decay_power, device=device)
mod_list.append(spatial_mod)
if decorrelate[1] != None:
if torch.is_tensor(decorrelate[1]):
matrix = decorrelate[1]
else:
matrix = 'imagenet' if decorrelate[1] == 'none' or decorrelate[1].lower() == 'imagenet' else decorrelate[1]
color_mod = ColorDecorrelationLayer(correlation_matrix=matrix, device=device)
mod_list.append(color_mod)
transform_mod = TransformLayer(input_mean=input_mean, device=device)
mod_list.append(transform_mod)
if decorrelate[0] == True and decorrelate[1] == None:
deprocess_img = lambda x: transform_mod.forward(spatial_mod.forward(x))
elif decorrelate[0] == False and decorrelate[1] != None:
deprocess_img = lambda x: transform_mod.forward(color_mod.forward(x))
elif decorrelate[0] == True and decorrelate[1] != None:
deprocess_img = lambda x: transform_mod.forward(color_mod.forward(spatial_mod.forward(x)))
return mod_list, deprocess_img
# Helper function to decorrelate content image
def decorrelate_content(content_image, mod_list):
s, c, t = None, None, None
for i, mod in enumerate(mod_list):
if isinstance(mod, SpatialDecorrelationLayer):
s = i
if isinstance(mod, ColorDecorrelationLayer):
c = i
if isinstance(mod, TransformLayer):
t = i
if t != None:
content_image = mod_list[t].untransform(content_image)
mod_list[t].activ = lambda x: x.clamp(0,1)
if c != None:
content_image = mod_list[c].decorrelate_color(content_image)
if s != None:
content_image = mod_list[s].fft_image(content_image)
return content_image
# Spatial Decorrelation layer based on tensorflow/lucid & greentfrapp/lucent
class SpatialDecorrelationLayer(torch.nn.Module):
def __init__(self, image_size=(224,224), decay_power=1.0, device='cpu'):
super(SpatialDecorrelationLayer, self).__init__()
self.setup_scale(image_size, decay_power, device)
def setup_scale(self, image_size, decay_power=1.0, device='cpu'):
self.h, self.w = image_size[0], image_size[1]
self.scale = self.create_scale(image_size, decay_power).to(device)
def create_scale(self, size, decay_power=1.0):
freqs = SpatialDecorrelationLayer.rfft2d_freqs(*size)
self.freqs_shape = freqs.size() + (2,)
scale = 1.0 / torch.max(freqs, torch.full_like(freqs, 1.0 / max(size))) ** decay_power
return scale[None, None, ..., None]
@staticmethod
def rfft2d_freqs(h, w):
fy = SpatialDecorrelationLayer.pytorch_fftfreq(h)[:, None]
wadd = 2 if w % 2 == 1 else 1
fx = SpatialDecorrelationLayer.pytorch_fftfreq(w)[: w // 2 + wadd]
return torch.sqrt((fx * fx) + (fy * fy))
@staticmethod
def pytorch_fftfreq(v, d=1.0):
results = torch.empty(v)
s = (v - 1) // 2 + 1
results[:s] = torch.arange(0, s)
results[s:] = torch.arange(-(v // 2), 0)
return results * (1.0 / (v * d))
def fft_image(self, input):
input = input * 4
input = torch.rfft(input, 2, normalized=True)
return input / self.scale
def ifft_image(self, input):
input = input * self.scale
input = torch.irfft(input, 2, normalized=True, signal_sizes=(self.h, self.w))
return input / 4
def forward(self, input):
return self.ifft_image(input)
# Color Decorrelation layer based on tensorflow/lucid & greentfrapp/lucent
class ColorDecorrelationLayer(nn.Module):
def __init__(self, correlation_matrix='imagenet', device='cpu'):
super(ColorDecorrelationLayer, self).__init__()
self.color_correlation_n = self.color_correlation_normalized(correlation_matrix).to(device)
def get_matrix(self, matrix='imagenet'):
if torch.is_tensor(matrix):
color_correlation_svd_sqrt = matrix
elif ',' in matrix:
m = [float(mx) for mx in matrix.replace('n','-').split(',')]
color_correlation_svd_sqrt = torch.Tensor([[m[0], m[1], m[2]],
[m[3], m[4], m[5]],
[m[6], m[7], m[8]]])
elif matrix.lower() == 'imagenet':
color_correlation_svd_sqrt = torch.Tensor([[0.26, 0.09, 0.02],
[0.27, 0.00, -0.05],
[0.27, -0.09, 0.03]])
elif matrix.lower() == 'places365':
raise NotImplementedError
return color_correlation_svd_sqrt
def color_correlation_normalized(self, matrix):
color_correlation_svd_sqrt = self.get_matrix(matrix)
max_norm_svd_sqrt = torch.max(color_correlation_svd_sqrt.norm(0))
color_correlation_normalized = color_correlation_svd_sqrt / max_norm_svd_sqrt
return color_correlation_normalized.T
def decorrelate_color(self, input):
inverse = torch.inverse(self.color_correlation_n)
return torch.matmul(input.permute(0,2,3,1), inverse).permute(0,3,1,2)
def forward(self, input):
return torch.matmul(input.permute(0,2,3,1), self.color_correlation_n).permute(0,3,1,2)
# Preprocess input after decorrelation
class TransformLayer(torch.nn.Module):
def __init__(self, input_mean=[1,1,1], input_sd=[1,1,1], r=255, device='cpu'):
super(TransformLayer, self).__init__()
self.input_mean = torch.as_tensor(input_mean).view(3, 1, 1).to(device)
self.input_sd = torch.as_tensor(input_sd).view(3, 1, 1).to(device)
self.r = r
self.activ = lambda x: torch.sigmoid(x)
def untransform(self, input):
input = (input + self.input_mean) * self.input_sd
return input / self.r
def forward(self, input):
input = self.activ(input) * self.r
return (input - self.input_mean) / self.input_sd
# Randomly scale an input
class RandomScaleLayer(torch.nn.Module):
def __init__(self, scale_list=(1, 0.975, 1.025, 0.95, 1.05)):
super(RandomScaleLayer, self).__init__()
scale_list = (1, 0.975, 1.025, 0.95, 1.05) if scale_list == 'none' else scale_list
scale_list = [float(s) for s in scale_list.split(',')] if ',' in scale_list else scale_list
self.scale_list = scale_list
def get_scale_mat(self, m, device, dtype):
return torch.tensor([[m, 0.0, 0.0], [0.0, m, 0.0]], device=device, dtype=dtype)
def rescale_tensor(self, x, scale):
scale_matrix = self.get_scale_mat(scale, x.device, x.dtype)[None, ...].repeat(x.shape[0], 1, 1)
grid = F.affine_grid(scale_matrix, x.size())
return F.grid_sample(x, grid)
def forward(self, input):
n = random.randint(0, len(self.scale_list)-1)
return self.rescale_tensor(input, scale=self.scale_list[n])
# Randomly rotate a tensor from a list of degrees
class RandomRotationLayer(torch.nn.Module):
def __init__(self, range_degrees=5):
super(RandomRotationLayer, self).__init__()
range_degrees = '5' if range_degrees == 'none' else range_degrees
if range_degrees is not int and ',' in range_degrees:
self.angle_range = [int(r) for r in range_degrees.replace('n','-').split(',')]
else:
self.angle_range = list(range(-int(range_degrees), int(range_degrees) + 1))
def get_random_angle(self):
n = random.randint(0, len(self.angle_range) -1)
return self.angle_range[n] * 3.141592653589793 / 180
def get_rot_mat(self, theta, device, dtype):
theta = torch.tensor(theta, device=device, dtype=dtype)
return torch.tensor([[torch.cos(theta), -torch.sin(theta), 0],
[torch.sin(theta), torch.cos(theta), 0]], device=device, dtype=dtype)
def rotate_tensor(self, x, theta):
rotation_matrix = self.get_rot_mat(theta, x.device, x.dtype)[None, ...].repeat(x.shape[0],1,1)
grid = F.affine_grid(rotation_matrix, x.size())
return F.grid_sample(x, grid)
def forward(self, input):
rnd_angle = self.get_random_angle()
return self.rotate_tensor(input, rnd_angle)
# Crop the padding off a tensor
class CenterCropLayer(torch.nn.Module):
def __init__(self, crop_val=0):
super(CenterCropLayer, self).__init__()
self.crop_val = crop_val
def forward(self, input):
h, w = input.size(2), input.size(3)
h_crop = input.size(2) - self.crop_val
w_crop = input.size(3) - self.crop_val
sw, sh = w // 2 - (w_crop // 2), h // 2 - (h_crop // 2)
return input[:, :, sh:sh + h_crop, sw:sw + w_crop]
|
422750
|
from selenium import webdriver
class FindByIdName():
def test(self):
baseUrl = "https://letskodeit.teachable.com/pages/practice"
driver = webdriver.Firefox()
driver.get(baseUrl)
elementById = driver.find_element_by_id("name")
if elementById is not None:
print("We found an element by Id")
elementByName = driver.find_element_by_name("show-hide")
if elementByName is not None:
print("We found an element by Name")
driver.get("https://www.yahoo.com/")
# This one should fail because the Id is not static
# Exception thrown: NoSuchElementException
driver.find_element_by_id("yui_3_18_0_4_1463100170626_1148")
ff = FindByIdName()
ff.test()
|
422761
|
from .base import Engine
from .finder import engine_finder
from .postgres import PostgresEngine
from .sqlite import SQLiteEngine
__all__ = ["Engine", "PostgresEngine", "SQLiteEngine", "engine_finder"]
|
422795
|
import ast
import dataclasses
import inspect
import typing
from abc import ABC, abstractmethod
from ast import Set, Dict, Tuple
from collections import defaultdict
from contextlib import contextmanager
from dataclasses import dataclass
from functools import lru_cache, singledispatch
from inspect import getmembers
from types import FunctionType
from typing import List, Callable
from securify.grammar import Grammar, Production
from securify.grammar.attributes import AttributeGrammarError, AttributeOccurrence, RuleArgument, ListElement
from securify.grammar.attributes import SynthesizeRule, PushdownRule
T = typing.TypeVar("T")
class RuleParser(ABC):
@abstractmethod
def __call__(self, grammar, classes):
...
"""
Definition of Attributes:
attr1 = synthesized()
attr2 = inherited()
Definition of Rules
@synthesized
def any_name(self) -> attribute:
...
@synthesized
def attribute(self):
...
@pushdown
def any_name(self) -> Attribute @ child:
....
Attributes and rules inherited via over python's class hierarchy mechanism.
"""
class _AttributeBase:
name: str
def __init__(self, source_location=None):
self.source_location = source_location or SourceLocation.current(1)
def __get__(self, instance, owner):
if instance is None:
return self
raise AttributeGrammarError(
f"Attributes cannot be accessed directly without prior evaluation. "
f"Use an evaluator. ({self} in {self.owner.__name__}, {self.source_location})")
def __set_name__(self, owner, name):
self.__name__ = name
self.name = name
self.owner = owner
class SynthesizedAttribute(_AttributeBase):
def __init__(self, default, source_location=None):
super().__init__(source_location)
self.default = default
def __repr__(self):
return f"Syn({self.name})"
class InheritedAttribute(_AttributeBase):
def __init__(self, default, implicit_pushdown, source_location=None):
super().__init__(source_location)
self.default = default
self.implicit_pushdown = implicit_pushdown
def __repr__(self):
return f"Inh({self.name})"
class AttributeDefinitionError(AttributeGrammarError):
pass
class AllChildrenTarget:
pass
All = AllChildrenTarget()
class Parser(RuleParser):
class ParsingHelper:
def __init__(self, grammar: Grammar):
self.grammar = grammar
self.a_s: Dict[type, Set[_AttributeBase]] = defaultdict(lambda: set())
self.a_i: Dict[type, Set[_AttributeBase]] = defaultdict(lambda: set())
self.r_s: Dict[type, Dict[Tuple[str, str], List[SynthesizeRule]]] = defaultdict(lambda: defaultdict(list))
self.r_i: Dict[type, Dict[Tuple[str, str], List[PushdownRule]]] = defaultdict(lambda: defaultdict(list))
for production in grammar.productions:
for cls in production.mro():
self.sort_rules(cls)
# Set owner in rules
for production in grammar.productions:
for cls in production.mro():
self.prepare_attributes(cls)
self.prepare_rules(cls)
self.register_attributes()
self.register_rules()
def register_attributes(self):
for production in self.grammar.productions:
# if self.grammar.is_abstract_production(production):
# self.a_s[production] = set()
# self.a_i[production] = set()
# self.a_a[production] = set()
# continue
self.a_s[production] = self.synthesized_attributes(production)
self.a_i[production] = self.inherited_attributes(production)
def register_rules(self):
for production in self.grammar.productions:
if Production.is_abstract(production):
continue
for r in self.synthesized_rules(production):
assert isinstance(r.attribute, SynthesizedAttribute)
for r in self.pushdown_rules(production):
assert isinstance(r.attribute, InheritedAttribute)
for attribute in self.a_s[production]:
self.r_s[production][("self", attribute.name)] = [
self.synthesized_rule(production, attribute)
]
for name, child in self.grammar.productions[production].items():
for attribute in self.inherited_attributes(child.symbol):
self.r_i[production][(name, attribute.name)] = [
self.pushdown_rule(production, name, attribute)
]
if child.is_list:
production_list = ListElement[production, name]
for attribute in self.inherited_attributes(child.symbol):
self.r_i[production_list][("next", attribute.name)] = [
self.pushdown_rule(production_list, "next", attribute)
]
@property
def result(self):
return (
self.r_s, self.r_i,
{i: frozenset(a.name for a in t) for i, t in self.a_s.items()},
{i: frozenset(a.name for a in t) for i, t in self.a_i.items()},
)
def attributes_in_production_child(self, production, child):
if child == "self":
return self.all_attributes(production)
production_info = self.grammar.productions[production]
child_type = production_info[child].symbol
return self.all_attributes(child_type)
def synthesized_attributes(self, production):
return self.attribute_declarations(production)[0]
def inherited_attributes(self, production):
return self.attribute_declarations(production)[1]
def all_attributes(self, production):
return self.synthesized_attributes(production) | self.inherited_attributes(production)
@lru_cache(None)
def attribute_declarations(self, production):
if production is None:
return set(), set()
syn = getmembers(production, lambda x: isinstance(x, SynthesizedAttribute))
inh = getmembers(production, lambda x: isinstance(x, InheritedAttribute))
for super_production in self.grammar.super_productions[production]:
if super_production in self.grammar.starting_productions:
continue
for i_name, i_attr in inh:
if getattr(super_production, i_name, None) != i_attr:
raise AttributeDefinitionError(
f"Inherited attribute {i_attr} defined in production "
f"'{production.__name__}' must also be available in its "
f"super production '{super_production.__name__}'. "
f"Alternatively '{super_production.__name__}' must be a "
f"start production. {i_attr.source_location}")
syn = set(a for _, a in syn)
inh = set(a for _, a in inh)
parent_attributes = set()
for super_production in self.grammar.super_productions[production]:
parent_attributes_new = self.attribute_declarations(super_production)
if parent_attributes_new is None:
parent_attributes |= parent_attributes_new[0]
parent_attributes |= parent_attributes_new[1]
local_attributes = (syn | inh)
if not parent_attributes.issubset(local_attributes):
raise AttributeGrammarError(local_attributes, parent_attributes)
return syn, inh
@lru_cache(None)
def synthesized_rules(self, production):
rules = [p.__dict__.get("__semantic_rules", []) for p in production.mro()]
rules = [r for rs in rules for r in reversed(rs) if isinstance(r, SynthesizeRuleDescriptor)]
return rules
@lru_cache(None)
def pushdown_rules(self, production):
if "ListElement" in production.__name__:
ttt = production.get_contained_type(self.grammar)
rules = [p.__dict__.get("__semantic_rules", []) for p in (production.mro() + ttt.mro())]
else:
rules = [p.__dict__.get("__semantic_rules", []) for p in production.mro()]
rules = [r for rs in rules for r in reversed(rs) if isinstance(r, PushdownRuleDescriptor)]
return rules
def synthesized_rule(self, production, attribute):
rules: List[SynthesizeRuleDescriptor] = self.synthesized_rules(production)
for r in rules:
if r.attribute == attribute:
return r.to_rule(self, production)
if attribute.default is not ...:
return self.default_synthesized(attribute)
error = (f"Could not find rule for attribute {attribute} in "
f"production {production.__name__}. {attribute.source_location} ")
if attribute.default is ...:
error += "A default value has not been provided. "
raise AttributeDefinitionError(error + f"\n{attribute.source_location}")
def pushdown_rule(self, production, child, attribute):
rules = self.pushdown_rules(production)
for r in rules:
if r.attribute == attribute and r.is_in_targets(child):
return r.to_rule(self, production, child)
if "ListElement" in production.__name__:
production = production.get_contained_type(self.grammar)
# No rule found, try implicit rule inferrence
if attribute.implicit_pushdown:
available_attributes = {a.name: a for a in self.a_i[production]}
if attribute.name in available_attributes:
source = available_attributes[attribute.name]
return self.implicit_pushdown(child, source, attribute)
if attribute.default is not ...:
return self.default_pushdown(child, attribute)
error = (f"Could not find rule for attribute {attribute} for child "
f"'{child}' production '{production.__name__};. ")
if attribute.default is ...:
error += "A default value has not been provided. "
if attribute.implicit_pushdown:
error += "An implicit or default pushdown could not be inferred either. "
else:
error += "Implicit pushdown has been disabled explicitly. "
raise AttributeDefinitionError(error + f"\n{attribute.source_location}")
@staticmethod
def prepare_attributes(cls):
for name, attr in getmembers(cls, lambda x: isinstance(x, _AttributeBase)):
if getattr(attr, "owner", None) is None:
attr.__set_name__(cls, name)
elif attr.name != name:
raise AttributeDefinitionError(
f"Attribute {attr} was renamed in class '{cls.__name__}' "
f"from {attr.name} to {name}.")
@staticmethod
def prepare_rules(cls):
for r in getattr(cls, "__semantic_rules", []):
r._set_owner(cls)
@staticmethod
def sort_rules(cls):
reorder = []
semantic_rules: list = getattr(cls, "__semantic_rules", [])
for r in semantic_rules:
arguments = r.arguments[1]
arguments = [a for a in arguments if a.node == "self"]
if len(arguments) == 0:
continue
arguments = arguments[0]
if arguments.types is not None:
reorder.append((r, arguments.types))
for rule, types in reorder:
semantic_rules.remove(rule)
for cls_other in types:
_rules(cls_other).append(rule)
@staticmethod
def implicit_pushdown(child_name, attribute_source, attribute_target):
return PushdownRule(
Parser.ParsingHelper.implicit_pushdown_impl(attribute_target.name),
arguments=[RuleArgument("self")],
dependencies=[AttributeOccurrence("self", attribute_source.name)],
target=AttributeOccurrence(child_name, attribute_target.name),
name=f"{child_name}_{attribute_target.name}",
annotations="ImplicitPushdown"
)
@staticmethod
def default_pushdown(child_name, attribute_target):
return PushdownRule(
Parser.ParsingHelper.default_value_impl(attribute_target.default),
arguments=[RuleArgument("self")],
dependencies=[],
target=AttributeOccurrence(child_name, attribute_target.name),
name=f"{child_name}_{attribute_target.name}",
annotations="DefaultPushdown"
)
@staticmethod
def default_synthesized(attribute_target):
return SynthesizeRule(
Parser.ParsingHelper.default_value_impl(attribute_target.default),
arguments=[RuleArgument("self")],
dependencies=[],
target=AttributeOccurrence("self", attribute_target.name),
name=f"{attribute_target.name}",
annotations="DefaultSynthesized"
)
@staticmethod
def implicit_pushdown_impl(attribute):
def identity(self):
return getattr(self, attribute)
return identity
@staticmethod
def default_value_impl(value):
def default_value(self):
if isinstance(value, Callable):
return value()
return value
return default_value
def __call__(self, grammar, _):
return self.ParsingHelper(grammar).result
def _rules(d):
field = "__semantic_rules"
if isinstance(d, type):
if field not in d.__dict__:
setattr(d, field, [])
return d.__dict__[field]
if not isinstance(d, dict):
raise RuntimeError(type(d))
return d.setdefault(field, [])
def __set_rule(rule, cls=None):
# Registers a rule in the '__semantic_rules' field of cls.
# If None is passed via cls, it is assumed that the __set_rule
# function was called from within a class definition block. In
# order to prevent shadowing of already declared attributes or
# rules with identical names, the function traverses the stack
# of frames in order to find the namespace of the class it has
# been called from. It then looks for fields whose names match
# the rule's name and returns their value, if found. The value
# can be returned by a decorator so that the existing field is
# effectively not overridden by the new definition.
if cls is not None:
_rules(cls).append(rule)
else:
class_locals = __class_locals()
if class_locals is None:
raise AttributeGrammarError(
f"Rule definition of {rule} is not applicable outside of a class. \n"
f"If defining rules outside of a class, please pass the target class "
f"as first parameter (e.g. @synthesized(MyClass), @inherited(MyClass))."
)
_rules(class_locals).append(rule)
if rule.function_name in class_locals:
return class_locals[rule.function_name]
return rule
@contextmanager
def rules_for(production: T) -> T:
assert getattr(rules_for, "__current_rule__", None) is None
try:
productions = production if isinstance(production, list) else [production]
for production in productions:
if not Production.is_production(production):
raise AttributeDefinitionError(f"{production} is not a production rule.")
setattr(rules_for, "__current_rule__", productions)
yield production
finally:
delattr(rules_for, "__current_rule__")
def synthesized(*args, default=...):
if len(args) == 0:
return SynthesizedAttribute(source_location=SourceLocation.current(),
default=default)
if len(args) == 1:
arg = args[0]
if isinstance(arg, FunctionType):
rule = SynthesizeRuleDescriptor(arg)
if __is_in_class_declaration():
return __set_rule(rule, cls=None)
if __is_in_class_context():
for t in __context_classes():
__set_rule(rule, cls=t)
return rule
raise AttributeDefinitionError(f"Unsupported arguments {args} at {SourceLocation.current()}")
def inherited(*, default=..., implicit_pushdown=...):
return InheritedAttribute(default,
implicit_pushdown,
source_location=SourceLocation.current())
def pushdown(*args):
if len(args) == 1:
arg = args[0]
if isinstance(arg, FunctionType):
rule = PushdownRuleDescriptor(arg)
if __is_in_class_declaration():
return __set_rule(rule, cls=None)
if __is_in_class_context():
for t in __context_classes():
__set_rule(rule, cls=t)
return rule
raise AttributeDefinitionError(f"Unsupported arguments {args} at {SourceLocation.current()}")
class _SemanticRuleDescriptor(ABC):
def __init__(self, function):
self.function = function
self.function_name = function.__name__
self.name = self.function_name
self.source_location = SourceLocation.for_function(function)
def __set_name__(self, owner, name):
# Rules are not stored as attributes of a class, instead they are stored
# in a dedicated array called '__semantic_rules'. The reason for this is
# to make it possible to define rules with the names of their respective
# attributes without shadowing them if they are in a base class.
delattr(owner, name)
def _set_owner(self, owner):
self.owner = owner
def __get__(self, instance, owner):
if instance is None:
return self
raise RuntimeError(
f"Semantic rule descriptors cannot not be accessible directly. "
f"The rule was probably defined via assigment to a class attribute "
f"at which is not supported. "
f"({self.name} in {self.owner.__name__}, {self.source_location})")
def __repr__(self):
return f"SemanticRule '{self.name}' at {self.source_location}"
@property
@abstractmethod
def attribute(self):
...
@property
def arguments(self):
return _parse_dependencies(self)
@property
@lru_cache(None)
def _target(self):
func = self.function
if "return" not in func.__annotations__:
return None
try:
return ast.parse(func.__annotations__["return"], mode='eval').body
except Exception as e:
raise AttributeDefinitionError(
f"Attribute targets could not be parsed for {self}.") from e
def _parse_dependencies(self, parser_helper, production):
if "ListElement" in production.__name__:
production = production.get_contained_type(parser_helper.grammar)
dependencies, dependencies_on_nodes = _parse_dependencies(self)
dependencies_on_nodes = self._with_sub_productions(parser_helper.grammar, dependencies_on_nodes)
accesses, _ = _parse_function_ast(self.function)
dependencies = set(dependencies)
dependencies |= {AttributeOccurrence(d.node, a)
for d in dependencies_on_nodes
for a in accesses.get(d.node, set()) & {
t.name for t in parser_helper.attributes_in_production_child(production, d.node)
}}
return dependencies_on_nodes, list(dependencies)
@staticmethod
def _with_sub_productions(grammar, dependencies_on_nodes):
dependencies_on_nodes_new = []
for dependency in dependencies_on_nodes:
types = dependency.types
if types is not None:
types = list(types)
for i in range(len(types)):
if "ListElement" in types[i].__name__:
types[i] = types[i].get_contained_type(grammar)
types = set(types)
if types is not None:
types = grammar.get_sub_productions(types)
dependency = dataclasses.replace(dependency, types=types)
dependencies_on_nodes_new.append(dependency)
return dependencies_on_nodes_new
class SynthesizeRuleDescriptor(_SemanticRuleDescriptor):
def __init__(self, function):
super().__init__(function)
@property
def attribute(self):
if self._attribute_target is None:
attribute = getattr(self.owner, self.function_name, None)
if not isinstance(attribute, SynthesizedAttribute):
raise AttributeDefinitionError(
f"Attribute cannot be resolved from function name for rule {self}. \n"
f"Please check the function name ('{self.function_name}') against "
f"available attributes in '{self.owner.__name__}'' or use the explict "
f"definition syntax 'def rule() -> MyProduction.Attribute'"
)
return attribute
else:
return self._attribute_target
def to_rule(self, parser_helper, production):
dependencies_on_nodes, dependencies = self._parse_dependencies(parser_helper, production)
return SynthesizeRule(
self.function,
dependencies_on_nodes,
dependencies,
name=self.name,
target=self.attribute.name,
source_location=self.source_location)
@property
@lru_cache(None)
def _attribute_target(self):
if self._target is None:
return None
try:
if not isinstance(self._target, (ast.Name, ast.Attribute)):
raise AttributeDefinitionError("Unknown syntax.")
attribute = _eval_ast(self._target, context=self.function)
except Exception as e:
raise AttributeDefinitionError(
f"Rule target not specified correctly for {self}. "
f"Expected format is 'MyProduction.MyAttribute' or 'MyAttribute'"
f"for synthesize rules.") from e
if not isinstance(attribute, SynthesizedAttribute):
raise AttributeDefinitionError(
f"Target of {self} does not resolve to an instance of SynthesizedAttribute.")
return attribute
class PushdownRuleDescriptor(_SemanticRuleDescriptor):
@property
def attribute(self):
return self._attribute_and_targets[0]
def is_in_targets(self, child_name):
if self._attribute_and_targets[1] == All:
return True
return child_name in self._attribute_and_targets[1]
@property
@lru_cache(None)
def _attribute_and_targets(self):
if self._target is None:
raise AttributeDefinitionError(
f"Target child and attribute not specified for {self}.")
try:
if not isinstance(self._target, ast.BinOp):
raise AttributeDefinitionError("Unknown syntax.")
if not isinstance(self._target.op, ast.MatMult):
raise AttributeDefinitionError("Unknown syntax.")
attribute = self._target.left
children = self._target.right
attribute = _eval_ast(attribute, context=self.function)
# TODO: Assert that children are actual elements of the grammar
if isinstance(children, ast.Name):
if children.id == "All":
targets = All
else:
targets = {children.id}
elif isinstance(children, ast.Attribute):
targets = {children.attr}
elif isinstance(children, ast.Set):
targets = {
e.attr if isinstance(e, ast.Attribute) else e.id
for e in children.elts
}
else:
raise AttributeDefinitionError("Unknown syntax.")
except Exception as e:
raise AttributeDefinitionError(
f"Rule target not specified correctly for {self}. "
f"Expected format is 'MyProduction.MyAttribute @ {{my_child, ...}}' "
f"for pushdown rules.") from e
if not isinstance(attribute, InheritedAttribute):
raise AttributeDefinitionError(
f"Target of {self} does not resolve to an instance of InheritedAttribute.")
return [attribute, targets]
def to_rule(self, parser_helper, production, child):
dependencies_on_nodes, dependencies = self._parse_dependencies(parser_helper, production)
return PushdownRule(
self.function,
dependencies_on_nodes,
dependencies,
AttributeOccurrence(child, self.attribute.name),
self.name,
self.source_location)
@lru_cache(None)
def _parse_dependencies(rule):
func = rule.function
arguments = []
dependencies = []
for _, parameter in inspect.signature(func).parameters.items():
node = parameter.name
hint = parameter.annotation
attributes = set()
try:
if hint is inspect.Signature.empty:
dependency_type = None
else:
annotation_ast = ast.parse(hint, mode='eval').body
if isinstance(annotation_ast, ast.Compare):
if isinstance(annotation_ast.ops[0], ast.In):
attr_info = _eval_ast(annotation_ast.left, context=func)
type_info = _eval_ast(annotation_ast.comparators[0], context=func)
if not isinstance(attr_info, (list, set, tuple)):
attr_info = {attr_info}
if not isinstance(type_info, (list, set, tuple)):
type_info = {type_info}
assert all(isinstance(a, _AttributeBase) for a in attr_info)
assert all(isinstance(a, type) for a in type_info)
dependency_type = type_info
attributes |= attr_info
else:
raise SyntaxError()
elif isinstance(annotation_ast, (ast.Set, ast.List, ast.Tuple)):
attr_info = set(_eval_ast(annotation_ast, context=func))
assert all(isinstance(a, _AttributeBase) for a in attr_info)
attributes |= attr_info
dependency_type = None
elif isinstance(annotation_ast, (ast.Name, ast.Attribute, ast.NameConstant, ast.Subscript)):
attr_or_type = _eval_ast(annotation_ast, context=func)
if isinstance(attr_or_type, _AttributeBase):
attributes.add(attr_or_type)
dependency_type = None
elif isinstance(attr_or_type, type):
dependency_type = {attr_or_type}
elif isinstance(attr_or_type, type(None)):
dependency_type = {None}
else:
raise Exception()
else:
raise SyntaxError("Unexpected annotation AST " + str(annotation_ast))
except Exception as e:
raise AttributeDefinitionError(f"Attribute dependencies could not be parsed for {rule}") from e
for attribute in set(a.name for a in attributes):
dependency = AttributeOccurrence(node, attribute)
dependencies.append(dependency)
arguments.append(RuleArgument(node, dependency_type))
return dependencies, arguments
def _parse_function_ast(func):
source_ast = __get_function_ast(func)
accesses = {}
accesses_suspicious = set()
@singledispatch
def walk_ast(_):
...
@walk_ast.register(list)
def _(elements):
for e in elements:
walk_ast(e)
@walk_ast.register(ast.Name)
def _(name):
accesses_suspicious.add(name)
@walk_ast.register(ast.Attribute)
def _(attribute):
if isinstance(attribute.value, ast.Name):
accesses.setdefault(attribute.value.id, set()).add(attribute.attr)
else:
walk_ast(attribute.value)
@walk_ast.register(ast.AST)
def _(node):
for e in node._fields:
walk_ast(getattr(node, e))
walk_ast(source_ast.body)
return accesses, accesses_suspicious
@dataclass(frozen=True)
class SourceLocation:
file: str
line: int
def __str__(self):
return f"""File "{self.file}", line {self.line}"""
def __iter__(self):
return iter([self.file, self.line])
@staticmethod
def current(skip_frames=1):
frame = get_frame(skip_frames + 1)
return SourceLocation(
frame.f_code.co_filename,
frame.f_lineno)
@staticmethod
def for_function(func):
return SourceLocation(
inspect.getfile(func),
inspect.findsource(func)[1] + 1)
def get_frame(skip_frames):
frame = inspect.currentframe()
for i in range(skip_frames + 1):
frame = frame.f_back
return frame
def __is_in_class_declaration():
return __class_locals() is not None
def __is_in_class_context():
return __context_classes() is not None
def __context_classes():
return getattr(rules_for, "__current_rule__", None)
def __class_locals():
frame = get_frame(1)
while frame.f_back is not None:
f_locals = frame.f_locals
if all(f in f_locals for f in ["__qualname__", "__module__"]):
return f_locals
frame = frame.f_back
return None
def __get_function_ast(func):
source = inspect.getsourcelines(func)[0]
indent = len(source[0]) - len(source[0].lstrip())
source = [l[indent:] for l in source]
source = "".join(source)
return ast.parse(source).body[0]
def _eval_ast(ast_element, context):
if not isinstance(context, dict):
context = getattr(context, "__globals__")
compiled = compile(ast.Expression(ast_element), "<internal>", "eval")
return eval(compiled, context, context)
|
422813
|
expected_output = {
"interfaces": {
"GigabitEthernet1/0/1": {
"name": "foo bar",
"down_time": "00:00:00",
"up_time": "4d5h",
},
}
}
|
422873
|
from __future__ import absolute_import, division, print_function
import logging
import sys
logging.basicConfig(
stream=sys.stdout,
level=logging.DEBUG,
format='%(asctime)s %(name)s-%(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
import matplotlib as mpl
mpl.use('Agg') # TO DISABLE GUI. USEFUL WHEN RUNNING ON CLUSTER WITHOUT X SERVER
import argparse
import numpy as np
from benchmarking import computing
from demystifying import visualization, utils
logger = logging.getLogger("benchmarking")
def _fix_extractor_type(extractor_types):
extractor_types = utils.make_list(extractor_types)
if len(extractor_types) == 1:
et = extractor_types[0]
if et == "supervised":
return ["KL", "RF", "MLP", "RAND"]
elif et == "unsupervised":
return ["PCA", "RBM", "AE", "RAND"]
elif et == "all":
return ["PCA", "RBM", "AE", "KL", "RF", "MLP", "RAND"]
return extractor_types
def create_argparser():
_bool_lambda = lambda x: (str(x).lower() == 'true')
parser = argparse.ArgumentParser(
epilog='Benchmarking for demystifying')
parser.add_argument('--extractor_type', nargs='+', help='list of extractor types (MLP, KL, PCA, ..)', type=str,
required=True)
parser.add_argument('--output_dir', type=str, help='Root directory for output files',
default="output/benchmarking/")
parser.add_argument('--test_model', nargs='+', type=str, help='Toy model displacement: linear or non-linear',
default="linear")
parser.add_argument('--feature_type', nargs='+',
type=str, help='Toy model feature type: cartesian_rot, inv-dist, etc.',
default="cartesian_rot")
parser.add_argument('--noise_level', nargs='+', type=float,
help='Strength of noise added to atomic coordinates at each frame',
default=1e-2)
parser.add_argument('--displacement', type=float, help='Strength of displacement for important atoms', default=1e-1)
parser.add_argument('--overwrite', type=_bool_lambda,
help='Overwrite existing results with new (if set to False no new computations will be performed)',
default=False)
parser.add_argument('--visualize', type=_bool_lambda, help='Generate output figures', default=True)
parser.add_argument('--iterations_per_model', type=int, help='', default=10)
parser.add_argument('--accuracy_method', nargs='+', type=str, help='', default='mse')
parser.add_argument('--n_atoms', nargs='+', type=int, help='Number of atoms in toy model', default=100)
return parser
def do_run(args, extractor_types, noise_level, test_model, feature_type, accuracy_method, natoms):
visualize = args.visualize
output_dir = args.output_dir
fig_filename = "{feature_type}_{test_model}_{noise_level}noise_{natoms}atoms_{accuracy_method}.svg".format(
feature_type=feature_type,
test_model=test_model,
noise_level=noise_level,
accuracy_method=accuracy_method,
natoms=natoms
)
best_processors = []
for et in extractor_types:
try:
postprocessors = computing.compute(extractor_type=et,
output_dir=output_dir,
feature_type=feature_type,
overwrite=args.overwrite,
accuracy_method=accuracy_method,
iterations_per_model=args.iterations_per_model,
noise_level=noise_level,
# Disable visualization here since we all these
# individual performance plot take up disk space and don't give us much
visualize=False,
natoms=natoms,
test_model=test_model)
if visualize:
visualization.show_single_extractor_performance(postprocessors=postprocessors,
extractor_type=et,
filename=fig_filename,
output_dir=output_dir,
accuracy_method=accuracy_method)
best_processors.append(utils.find_best(postprocessors))
except Exception as ex:
logger.exception(ex)
logger.warn("Failed for extractor %s ", et)
raise ex
best_processors = np.array(best_processors)
if visualize:
fig_filename = fig_filename.replace(".svg", "_{}.svg".format("-".join(extractor_types)))
visualization.show_all_extractors_performance(best_processors,
extractor_types,
feature_type=feature_type,
filename=fig_filename,
output_dir=output_dir,
accuracy_method=accuracy_method)
return best_processors
def run_all(args):
extractor_types = _fix_extractor_type(args.extractor_type)
n_atoms = utils.make_list(args.n_atoms)
for feature_type in utils.make_list(args.feature_type):
for noise_level in utils.make_list(args.noise_level):
for test_model in utils.make_list(args.test_model):
for accuracy_method in utils.make_list(args.accuracy_method):
best_processors = []
for natoms in n_atoms:
bp = do_run(args, extractor_types, noise_level,
test_model, feature_type, accuracy_method, natoms)
best_processors.append(bp)
if visualization and len(n_atoms) > 1:
visualization.show_system_size_dependence(n_atoms=n_atoms,
postprocessors=np.array(best_processors),
extractor_types=extractor_types,
noise_level=noise_level,
test_model=test_model,
feature_type=feature_type,
output_dir=args.output_dir,
accuracy_method=accuracy_method)
if __name__ == "__main__":
parser = create_argparser()
args = parser.parse_args()
logger.info("Starting script run_benchmarks with arguments %s", args)
run_all(args)
logger.info("Done!")
|
422919
|
from functools import partial
from typing import Optional
import numpy as np
import xarray as xr
from starfish.core.imagestack.imagestack import ImageStack
from starfish.core.types import Axes, Levels
from ._base import FilterAlgorithm
class LinearUnmixing(FilterAlgorithm):
"""
LinearUnmixing enables the user to correct fluorescent bleed by subtracting fractions of the
intensities of other channels from each channel in the ImageStack.
Examples
--------
The following example provides a coefficient matrix that corrects for spectral
mixing in a 3-channel experiment.
Channel 0 contains a mixture of itself plus 50% of the intensity of
channel 2. Channel 1 has no mixing with other channels. Channel 3
consists of itself plus 10% of the intensity of both channels 0 and 1.
>>> import numpy as np
>>> coeff_mat = np.ndarray([
... [1, 0, -0.1]
... [0, 1, -0.1]
... [-0.5, 0, 1 ]
... ])
The end result of this unmixing will be that 50% of channel 2 will be subtracted from
channel 0, channel 1 will not be changed, and 10% of channels 0 and 1 will be subtracted
from channel 2.
Parameters
----------
coeff_mat : np.ndarray
matrix of the linear unmixing coefficients. Should take the form: B = AX, where B are
the unmixed values, A is coeff_mat and X are the observed values. coeff_mat has shape
(n_ch, n_ch), and poses each channel (column) as a combination of other columns (rows).
level_method : :py:class:`~starfish.types.Levels`
Controls the way that data are scaled to retain skimage dtype requirements that float data
fall in [0, 1]. In all modes, data below 0 are set to 0.
- Levels.CLIP (default): data above 1 are set to 1.
- Levels.SCALE_SATURATED_BY_IMAGE: when any data in the entire ImageStack is greater
than 1, the entire ImageStack is scaled by the maximum value in the ImageStack.
- Levels.SCALE_SATURATED_BY_CHUNK: when any data in any slice is greater than 1, each
slice is scaled by the maximum value found in that slice. The slice shapes are
determined by the ``group_by`` parameters.
- Levels.SCALE_BY_IMAGE: scale the entire ImageStack by the maximum value in the
ImageStack.
- Levels.SCALE_BY_CHUNK: scale each slice by the maximum value found in that slice. The
slice shapes are determined by the ``group_by`` parameters.
"""
def __init__(
self,
coeff_mat: np.ndarray,
level_method: Levels = Levels.CLIP
) -> None:
self.coeff_mat = coeff_mat
self.level_method = level_method
_DEFAULT_TESTING_PARAMETERS = {"coeff_mat": np.array([[1, -0.25], [-0.25, 1]])}
@staticmethod
def _unmix(image: xr.DataArray, coeff_mat: np.ndarray) -> np.ndarray:
"""Perform linear unmixing of channels
Parameters
----------
image : np.ndarray
image to be scaled
coeff_mat : np.ndarray
matrix of the linear unmixing coefficients. Should take the form:
B = AX, where B are the unmixed values, A is coeff_mat and X are
the observed values. coeff_mat has shape (n_ch, n_ch), and poses
each channel (column) as a combination of other columns (rows).
Returns
-------
np.ndarray :
Numpy array of same shape as image
"""
x = image.sizes[Axes.X.value]
y = image.sizes[Axes.Y.value]
c = image.sizes[Axes.CH.value]
# broadcast each channel coefficient across x and y
broadcast_coeff = np.tile(coeff_mat, reps=x * y).reshape(c, y, x, c)
# multiply the image by each coefficient
broadcast_image = image.values[..., None] * broadcast_coeff
# collapse the unmixed result
unmixed_image = np.sum(broadcast_image, axis=0).transpose([2, 0, 1])
return unmixed_image
def run(
self,
stack: ImageStack,
in_place: bool=False,
verbose: bool=False,
n_processes: Optional[int]=None,
*args,
) -> Optional[ImageStack]:
"""Perform filtering of an image stack
Parameters
----------
stack : ImageStack
Stack to be filtered.
in_place : bool
if True, process ImageStack in-place, otherwise return a new stack
verbose : bool
if True, report on filtering progress (default = False)
n_processes : Optional[int]
Number of parallel processes to devote to applying the filter. If None, defaults to
the result of os.cpu_count(). (default None)
Returns
-------
ImageStack :
If in-place is False, return the results of filter as a new stack. Otherwise return the
original stack.
"""
group_by = {Axes.ROUND, Axes.ZPLANE}
unmix = partial(self._unmix, coeff_mat=self.coeff_mat)
result = stack.apply(
unmix,
group_by=group_by, verbose=verbose, in_place=in_place, n_processes=n_processes,
level_method=self.level_method
)
return result
|
422932
|
from typing import Any
import torch
from torch import fx
class NodeProfiler(fx.Interpreter):
"""
This is basically a variant of shape prop in
https://github.com/pytorch/pytorch/blob/74849d9188de30d93f7c523d4eeceeef044147a9/torch/fx/passes/shape_prop.py#L65.
Instead of propagating just the shape, we record all the intermediate node Tensor values.
This is useful to debug some of lowering pass issue where we want to check a specific
tensor value. Note that output value can be tuple(Tensor) as well as Tensor.
"""
def __init__(self, module: fx.GraphModule):
super().__init__(module)
self.execution_time = {}
self.node_map = {}
self.iter = 100
def run_node(self, n: fx.Node) -> Any:
result = super().run_node(n)
if n.op not in {"call_function", "call_method", "call_module"}:
return result
torch.cuda.synchronize()
start_event = torch.cuda.Event(enable_timing=True)
end_event = torch.cuda.Event(enable_timing=True)
start_event.record()
for _ in range(self.iter):
result = super().run_node(n)
end_event.record()
torch.cuda.synchronize()
self.execution_time[f"{n.name}"] = (
start_event.elapsed_time(end_event) / self.iter
)
self.node_map[n.name] = n
return result
def propagate(self, *args):
"""
Run `module` via interpretation and return the result and
record the shape and type of each node.
Args:
*args (Tensor): the sample input.
Returns:
Any: The value returned from executing the Module
"""
return super().run(*args)
|
422937
|
from ethereum import utils
def mk_multisend_code(payments): # expects a dictionary, {address: wei}
kode = b''
for address, wei in payments.items():
kode += b'\x60\x00\x60\x00\x60\x00\x60\x00' # 0 0 0 0
encoded_wei = utils.encode_int(wei) or b'\x00'
kode += utils.ascii_chr(0x5f + len(encoded_wei)) + encoded_wei # value
kode += b'\x73' + utils.normalize_address(address) # to
kode += b'\x60\x00\xf1\x50' # 0 CALL POP
kode += b'\x33\xff' # CALLER SELFDESTRUCT
return kode
def get_multisend_gas(payments):
o = 26002 # 21000 + 2 (CALLER) + 5000 (SELFDESTRUCT)
for address, wei in payments.items():
encoded_wei = utils.encode_int(wei) or b'\x00'
# 20 bytes in txdata for address = 1360
# bytes in txdata for wei = 68 * n
# gas for pushes and pops = 3 * 7 + 2 = 23
# CALL = 9700 + 25000 (possible if new account)
o += 1360 + 68 * len(encoded_wei) + 23 + 34700
return o
|
422952
|
from __future__ import absolute_import
from __future__ import print_function
import veriloggen
import _iter
expected_verilog = """
module blinkled
(
input CLK,
input RST,
output reg [8-1:0] LED
);
reg [32-1:0] count;
always @(posedge CLK) begin
if(RST) begin
count <= 0;
end else begin
if(count == 1023) begin
count <= 0;
end else begin
count <= count + 1;
end
end
end
always @(posedge CLK) begin
if(RST) begin
LED <= 1;
end else begin
if(count == 1023) begin
LED[0] <= LED[7];
LED[1] <= LED[0];
LED[2] <= LED[1];
LED[3] <= LED[2];
LED[4] <= LED[3];
LED[5] <= LED[4];
LED[6] <= LED[5];
LED[7] <= LED[6];
end
end
end
endmodule
"""
def test():
veriloggen.reset()
test_module = _iter.mkLed()
code = test_module.to_verilog()
from pyverilog.vparser.parser import VerilogParser
from pyverilog.ast_code_generator.codegen import ASTCodeGenerator
parser = VerilogParser()
expected_ast = parser.parse(expected_verilog)
codegen = ASTCodeGenerator()
expected_code = codegen.visit(expected_ast)
assert(expected_code == code)
|
422977
|
import sys
import os
nbits = int(sys.argv[1])
for block_size in range(1, nbits + 1):
if (block_size * 8) % nbits == 0:
break
def gen_pack_bytes(num_values):
code = []
nbytes = (num_values * nbits + 7) // 8
for i in range(num_values):
code.append('const unsigned char s{} = *src++;'.format(i))
for i in range(nbytes):
byte_shift = i * 8
byte_mask = 0xff << byte_shift
values = []
for j in range(num_values):
value_shift = j * nbits
value_mask = ((1 << nbits) - 1) << value_shift
mask = value_mask & byte_mask
if not mask:
continue
if value_shift == byte_shift:
values.append('((s{} & 0x{:02x}))'.format(j, mask >> value_shift))
elif value_shift > byte_shift:
values.append('((s{} & 0x{:02x}) << {})'.format(j, mask >> value_shift, value_shift - byte_shift))
else:
values.append('((s{} & 0x{:02x}) >> {})'.format(j, mask >> value_shift, byte_shift - value_shift))
code.append('*dest++ = (unsigned char) {};'.format(' | '.join(values)))
return i + 1, code
print '// Copyright (C) 2016 <NAME>'
print '// Distributed under the MIT license, see the LICENSE file for details.'
print
print '// This file was automatically generate using {}, do not edit.'.format(os.path.basename(__file__))
print
print '#ifndef CHROMAPRINT_UTILS_PACK_INT{}_ARRAY_H_'.format(nbits)
print '#define CHROMAPRINT_UTILS_PACK_INT{}_ARRAY_H_'.format(nbits)
print
print '#include <algorithm>'
print
print 'namespace chromaprint {'
print
print 'inline size_t GetPackedInt{}ArraySize(size_t size) {{'.format(nbits)
print '\treturn (size * {} + {}) / {};'.format(block_size, block_size * 8 // nbits - 1, block_size * 8 // nbits)
print '}'
print
print 'template <typename InputIt, typename OutputIt>'
print 'inline OutputIt PackInt{}Array(const InputIt first, const InputIt last, OutputIt dest) {{'.format(nbits)
print '\tauto size = std::distance(first, last);'
print '\tauto src = first;'
first_if = True
for nbytes in range(block_size * 8 // nbits, 0, -1):
if nbytes == block_size * 8 // nbits:
print '\twhile (size >= {}) {{'.format(nbytes)
else:
if not first_if:
print 'else',
else:
print '\t',
print 'if (size == {}) {{'.format(nbytes)
first_if = False
packed_bits, code = gen_pack_bytes(nbytes)
for line in code:
print '\t\t{}'.format(line)
if nbytes == block_size * 8 // nbits:
print '\t\tsize -= {};'.format(nbytes)
if nbytes == block_size * 8 // nbits or nbytes == 1:
print '\t}'
else:
print '\t}',
print '\t return dest;'
print '}'
print
print '}; // namespace chromaprint'
print
print '#endif'
|
423062
|
import pytest
from ithkuil.parser import parseWord
words_to_test = [
('poi', {
'type': 'personal adjunct',
'[tone]': None,
'[stress]': -2,
'C1': 'p',
'Vc': 'oi',
'Cz': None,
'Vz': None,
'VxC': None,
'Vc2': None,
'Vw': None,
'C2': None,
'Ck': None,
'Cb': None
}),
('¯tiwu', {
'type': 'personal adjunct',
'[tone]': '¯',
'[stress]': -2,
'C1': 't',
'Vc': 'i',
'Cz': 'w',
'Vz': 'u',
'VxC': None,
'Vc2': None,
'Vw': None,
'C2': None,
'Ck': None,
'Cb': None
}),
('foteuye’çç', {
'type': 'personal adjunct',
'[tone]': None,
'[stress]': -2,
'C1': 't',
'Vc': 'eu',
'Cz': 'y',
'Vz': 'e',
'VxC': [{ 'type': 'f', 'degree': 'o' }],
'Vc2': None,
'Vw': None,
'C2': None,
'Ck': None,
'Cb': 'ç',
'Cb+': True
}),
('epoi', {
'type': 'personal adjunct',
'[tone]': None,
'[stress]': -2,
'C1': 'p',
'Vc': 'oi',
'Cz': None,
'Vz': None,
'VxC': None,
'Vc2': 'e',
'Vw': None,
'C2': None,
'Ck': None,
'Cb': None
}),
('ükʰu', {
'type': 'personal adjunct',
'[tone]': None,
'[stress]': -2,
'C1': None,
'Vc': 'u',
'Cz': None,
'Vz': None,
'VxC': None,
'Vc2': 'ü',
'Vw': None,
'C2': None,
'Ck': 'kʰ',
'Cb': None
}),
('_uda', {
'type': 'personal adjunct',
'[tone]': '_',
'[stress]': -2,
'C1': None,
'Vc': 'a',
'Cz': None,
'Vz': None,
'VxC': None,
'Vc2': 'u',
'Vw': None,
'C2': None,
'Ck': 'd',
'Cb': None
}),
('awuçkʰoewi', {
'type': 'personal adjunct',
'[tone]': None,
'[stress]': -2,
'C1': None,
'Vc': 'oe',
'Cz': 'w',
'Vz': 'i',
'VxC': None,
'Vc2': 'u',
'Vw': 'a',
'C2': 'w',
'Ck': 'çkʰ',
'Cb': None
}),
('uhiaksai’wé’ks', {
'type': 'personal adjunct',
'[tone]': None,
'[stress]': -1,
'C1': None,
'Vc': 'ai',
'Cz': '’w',
'Vz': 'é',
'VxC': None,
'Vc2': 'ia',
'Vw': 'u',
'C2': 'h',
'Ck': 'ks',
'Cb': 'ks',
'Cb+': False
}),
('ˇxhoehwe', {
'type': 'personal adjunct',
'[tone]': 'ˇ',
'[stress]': -2,
'C1': 'xh',
'Vc': 'oe',
'Cz': 'hw',
'Vz': 'e',
'VxC': None,
'Vc2': None,
'Vw': None,
'C2': None,
'Ck': None,
'Cb': None
}),
('mrerîwa', {
'type': 'personal adjunct',
'[tone]': None,
'[stress]': -2,
'C1': 'r',
'Vc': 'î',
'Cz': 'w',
'Vz': 'a',
'VxC': [{ 'type': 'mr', 'degree': 'e' }],
'Vc2': None,
'Vw': None,
'C2': None,
'Ck': None,
'Cb': None
})
]
@pytest.mark.parametrize('word, expected', words_to_test)
def test_word(word, expected):
parsedWord = parseWord(word)
for key in expected:
if expected[key] is None:
assert key not in parsedWord
else:
assert parsedWord[key] == expected[key]
|
423064
|
import numpy as np
import math
from sardem.dem import main as load_dem
from numba import jit, prange
from typing import Tuple, NamedTuple, Dict
import gdal
class RPCCoeffs(NamedTuple):
height_off: float
height_scale: float
lat_off: float
lat_scale: float
line_den_coeff: np.array
line_num_coeff: np.array
line_off: float
line_scale: float
long_off: float
long_scale: float
max_lat: float
max_long: float
min_lat: float
min_long: float
samp_den_coeff: np.array
samp_num_coeff: np.array
samp_off: float
samp_scale: float
@jit(nopython=True)
def lon_lat_alt_to_xy(
lon: float,
lat: float,
alt: float,
rpcs: RPCCoeffs,
) -> Tuple[float, float]:
"""
Returns an image pixel coordinate (x, y) corresponding to a provided world coordinate (lon, lat, alt)
using provided RPC coefficients
:param lon: The world coordinate longitude
:param lat: The world coordinate latitude
:param alt: The world coordinate altitude
:param rpcs: NamedTuple with RPC coefficients
:param interp: Interpolation method -- Linear and Nearest are supported
:return: Pixel coordinate (x,y) as floating point values -- interpolation will be needed to arrive at a
pixel intensity value!
"""
# First create the normalized values for x/y/alt
norm_lon = (lon - rpcs.long_off) / rpcs.long_scale
norm_lat = (lat - rpcs.lat_off) / rpcs.lat_scale
norm_alt = (alt - rpcs.height_off) / rpcs.height_scale
# Create the polynomial vector (gets re-used)
formula = np.array(
[
1,
norm_lon,
norm_lat,
norm_alt,
norm_lon * norm_lat,
norm_lon * norm_alt,
norm_lat * norm_alt,
norm_lon ** 2,
norm_lat ** 2,
norm_alt ** 2,
norm_lat * norm_lon * norm_alt,
norm_lon ** 3,
norm_lon * (norm_lat ** 2),
norm_lon * (norm_alt ** 2),
(norm_lon ** 2) * norm_lat,
norm_lat ** 3,
norm_lat * (norm_alt ** 2),
(norm_lon ** 2) * norm_alt,
(norm_lat ** 2) * norm_alt,
norm_alt ** 3,
],
dtype=np.float32
)
# PLUG AND CHUG
f1 = np.dot(rpcs.samp_num_coeff, formula)
f2 = np.dot(rpcs.samp_den_coeff, formula)
f3 = np.dot(rpcs.line_num_coeff, formula)
f4 = np.dot(rpcs.line_den_coeff, formula)
samp_number_normed = f1 / f2
line_number_normed = f3 / f4
# Then denormalize to get the approximate pixel coordinate
samp_number = samp_number_normed * rpcs.samp_scale + rpcs.samp_off
line_number = line_number_normed * rpcs.line_scale + rpcs.line_off
return samp_number, line_number
@jit(nopython=True, parallel=True, nogil=True)
def make_ortho(
x1: float,
x2: float,
y1: float,
y2: float,
width: int,
source: np.ndarray,
rpcs: RPCCoeffs,
dem: np.ndarray,
dem_geot: np.array,
missing_data_value: float = None
) -> Tuple[np.array, float, float, float]:
"""
Produces an orthorectified image given a
:param x1: upper left x
:param x2: lower right x
:param y1: upper left y
:param y2: lower right y
:param width: num pixels for the width of the resulting orthorectified image (height will be set by aspect ratio)
:param source: the original raw image
:param rpcs: rational polynomial coefficients object
:param dem: dem tile corresponding to the image location
:param dem_geot: dem geo transform (affine transform)
:param interp: Interpolation type, 0 == nearest neighbor, 1 == bilinear
:return: a Tuple containing the ortho'd image, the ground sampling distance in degrees,
and the upper left coordinate
"""
cols = np.linspace(x1, x2, width)
gsd = abs(cols[0] - cols[1])
height = int(abs(y1 - y2) / gsd)
rows = np.linspace(y1, y2, height)
ortho = np.zeros(width * height)
for i in prange(len(cols)):
lon = cols[i]
for j in prange(len(rows)):
lat = rows[j]
dem_x = int((lon - dem_geot[0]) / dem_geot[1])
dem_y = int((lat - dem_geot[3]) / dem_geot[5])
if dem_x < 0 or dem_x > dem.shape[1] - 1 or dem_y < 0 or dem_y > dem.shape[0] - 1:
# Numba will segfault if I don't catch this... AND it will fail to compile
# if I try tooogl include a useful message with "dem_x" and "dem_y" :( :( :(
raise IndexError("DEM indices out of bounds")
altitude = linear_interp(dem_x, dem_y, dem.reshape(-1), dem.shape[1])
x, y = lon_lat_alt_to_xy(lon, lat, altitude, rpcs)
if 1 <= x < source.shape[1] - 1 and 1 <= y < source.shape[0] - 1:
idx = i * height + j
result = linear_interp(x, y, source.reshape(-1), source.shape[1])
ortho[idx] = result
else:
pass
return ortho.reshape(width, height).transpose(), gsd, x1, y1
@jit(nopython=True)
def linear_interp(x: float, y: float, source: np.ndarray, source_height: int) -> int:
x_floor = math.floor(x)
x_ceil = math.ceil(x)
y_floor = math.floor(y)
y_ceil = math.ceil(y)
x_frac = x - x_floor
y_frac = y - y_floor
ul_index = x_floor + source_height * y_floor
ur_index = x_ceil + source_height * y_floor
lr_index = x_ceil + source_height * y_ceil
ll_index = x_floor + source_height * y_ceil
ul = source[ul_index]
ur = source[ur_index]
lr = source[lr_index]
ll = source[ll_index]
upper_x = (1 - x_frac) * ul + x_frac * ur
lower_x = (1 - x_frac) * ll + x_frac * lr
upper_y = (1 - y_frac) * ul + y_frac * ll
lower_y = (1 - y_frac) * ur + y_frac * lr
final_value = (upper_x + lower_x + upper_y + lower_y) / 4
return int(final_value)
def unpack_rpc_parameters_dataset(dataset: gdal.Dataset) -> RPCCoeffs:
rpc_dict = dataset.GetMetadata_Dict("RPC")
return unpack_rpc_parameters(rpc_dict)
def unpack_rpc_parameters(rpc_dict: Dict[str, str]) -> RPCCoeffs:
"""
Returns RPC coefficients collection as a NamedTuple
when provided with a GDAL dataset if that dataset contains RPCs
:param dataset: GDAL dataset reference for an image with RPCs
:return: A NamedTuple containing RPC coefficients and parameters
"""
height_off = float(rpc_dict["HEIGHT_OFF"])
height_scale = float(rpc_dict["HEIGHT_SCALE"])
lat_off = float(rpc_dict["LAT_OFF"])
lat_scale = float(rpc_dict["LAT_SCALE"])
line_den_coeff = np.array(
[
float(coeff.strip())
for coeff in rpc_dict["LINE_DEN_COEFF"].strip().split(" ")
],
dtype=np.float32
)
line_num_coeff = np.array(
[
float(coeff.strip())
for coeff in rpc_dict["LINE_NUM_COEFF"].strip().split(" ")
],
dtype=np.float32
)
line_off = float(rpc_dict["LINE_OFF"])
line_scale = float(rpc_dict["LINE_SCALE"])
long_off = float(rpc_dict["LONG_OFF"])
long_scale = float(rpc_dict["LONG_SCALE"])
max_lat = float(rpc_dict["MAX_LAT"])
max_long = float(rpc_dict["MAX_LONG"])
min_lat = float(rpc_dict["MIN_LAT"])
min_long = float(rpc_dict["MIN_LONG"])
samp_den_coeff = np.array(
[
float(coeff.strip())
for coeff in rpc_dict["SAMP_DEN_COEFF"].strip().split(" ")
],
dtype=np.float32
)
samp_num_coeff = np.array(
[
float(coeff.strip())
for coeff in rpc_dict["SAMP_NUM_COEFF"].strip().split(" ")
],
dtype=np.float32
)
samp_off = float(rpc_dict["SAMP_OFF"])
samp_scale = float(rpc_dict["SAMP_SCALE"])
return RPCCoeffs(
height_off,
height_scale,
lat_off,
lat_scale,
line_den_coeff,
line_num_coeff,
line_off,
line_scale,
long_off,
long_scale,
max_lat,
max_long,
min_lat,
min_long,
samp_den_coeff,
samp_num_coeff,
samp_off,
samp_scale,
)
def pixel_to_lon_lat(x: float, y: float, geot: np.ndarray) -> Tuple[float, float]:
"""
Returns a lon/lat coordinate as a tuple when provided with a pixel coordinate x,y
and a geo_transform (affine transform)
:param x: longitude of the world coordinate
:param y: latitude of the world coordinate
:param geot: affine transform parameters in a length 6 np.array
:return: A longitude and latitude tuple (lon,lat)
"""
lon = (x * geot[1]) + geot[0]
lat = (y * geot[5]) + geot[3]
return lon, lat
def lon_lat_to_pixel(lon: float, lat: float, geot: np.array) -> Tuple[float, float]:
"""
Returns a pixel coordinate (x,y) as a tuple when provided with a longitude, latitude coordinate
and a geo_transform (affine transform)
:param lon: longitude of the world coordinate
:param lat: latitude of the world coordinate
:param geot: affine transform parameters in a length 6 np.array
:return: A pixel coordinate as a Tuple (x, y)
"""
x = (lon - geot[0]) / geot[1]
y = (lat - geot[3]) / geot[5]
return x, y
def retrieve_dem(
min_lon: float,
min_lat: float,
degrees_lon: float,
degrees_lat: float,
sampling_rate: int = 1,
output_path: str = "/tmp/elevation.dem",
) -> Tuple[np.ndarray, np.array]:
"""
Load SRTM tiles for a bouding rectangle defined by an upper left point (min_long, min_lat) and a width and height
in degrees. Optionally, an integer sampling rate greater than 1 may be passed in to downsample the DEM.
:param min_lon: x component of the upper left corner of the bounding rectangle
:param min_lat: y component of the upper left corner of the bounding rectangle
:param degrees_lon: width of the DEM in degrees
:param degrees_lat: height of the DEM in degrees
:param sampling_rate: sampling rate
:param output_path: Path where the dem file is saved
:return: A numpy ndarray with elevation data, followed by the geotransform of the elevation data wrapped in a Tuple
"""
load_dem(
min_lon,
min_lat,
degrees_lon,
degrees_lat,
rate=sampling_rate,
data_source="AWS",
output_name=output_path,
)
gdal.AllRegister()
dem_dataset = gdal.Open(output_path)
elevation_data = dem_dataset.ReadAsArray()
geo_transform = np.array([*dem_dataset.GetGeoTransform()])
return elevation_data, geo_transform
|
423075
|
import sessions
import wx
from core.gui import SquareDialog
class CountdownDialog (SquareDialog):
def __init__(self, *args, **kwargs):
super(CountdownDialog, self).__init__(*args, **kwargs)
wx.StaticText(parent=self.pane, label=_("Name:"))
self.name = wx.TextCtrl(parent=self.pane)
wx.StaticText(parent=self.pane, label=_("Hours:"))
self.hours = wx.SpinCtrl(parent=self.pane)
self.hours.SetValue(sessions.current_session.config['countdown']['hours'])
wx.StaticText(parent=self.pane, label=_("Minutes:"))
self.minutes = wx.SpinCtrl(parent=self.pane)
self.minutes.SetValue(sessions.current_session.config['countdown']['minutes'])
wx.StaticText(parent=self.pane, label=_("Seconds:"))
self.seconds = wx.SpinCtrl(parent=self.pane)
self.seconds.SetValue(sessions.current_session.config['countdown']['seconds'])
self.finish_setup(set_focus=False)
self.hours.SetFocus()
def get_time (self):
#Returns the time set in the dialog as a long representing total seconds.
seconds = self.seconds.GetValue()
seconds += (self.minutes.GetValue() * 60)
seconds += (self.hours.GetValue() * 3600)
return seconds
|
423107
|
import os
import glob
from fontTools.misc.loggingTools import LogMixin
from fontTools.designspaceLib import DesignSpaceDocument, AxisDescriptor, SourceDescriptor, RuleDescriptor, InstanceDescriptor
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
# Reader that parses Superpolator documents and buidls designspace objects.
# Note: the Superpolator document format precedes the designspace documnt format.
# For now I just want to migrate data out of Superpolator into designspace.
# So not all data will migrate, just the stuff we can use.
"""
<lib>
<dict>
<key>com.letterror.skateboard.interactionSources</key>
<dict>
<key>horizontal</key>
<array/>
<key>ignore</key>
<array/>
<key>vertical</key>
<array/>
</dict>
<key>com.letterror.skateboard.mutedSources</key>
<array>
<array>
<string>IBM Plex Sans Condensed-Bold.ufo</string>
<string>foreground</string>
</array>
</array>
<key>com.letterror.skateboard.previewLocation</key>
<dict>
<key>weight</key>
<real>0.0</real>
</dict>
<key>com.letterror.skateboard.previewText</key>
<string>SKATE</string>
</dict>
</lib>
"""
superpolatorDataLibKey = "com.superpolator.data" # lib key for Sp data in .designspace
skateboardInteractionSourcesKey = "com.letterror.skateboard.interactionSources"
skateboardMutedSourcesKey = "com.letterror.skateboard.mutedSources"
skipExportKey = "public.skipExportGlyphs"
skateboardPreviewLocationsKey = "com.letterror.skateboard.previewLocation"
skateboardPreviewTextKey = "com.letterror.skateboard.previewText"
class SuperpolatorReader(LogMixin):
ruleDescriptorClass = RuleDescriptor
axisDescriptorClass = AxisDescriptor
sourceDescriptorClass = SourceDescriptor
instanceDescriptorClass = InstanceDescriptor
def __init__(self, documentPath, documentObject, convertRules=True, convertData=True, anisotropic=False):
self.path = documentPath
self.documentObject = documentObject
self.convertRules = convertRules
self.convertData = convertData
self.allowAnisotropic = anisotropic # maybe add conversion options later
tree = ET.parse(self.path)
self.root = tree.getroot()
self.documentObject.formatVersion = self.root.attrib.get("format", "3.0")
self.axisDefaults = {}
self._strictAxisNames = True
@classmethod
def fromstring(cls, string, documentObject):
f = BytesIO(tobytes(string, encoding="utf-8"))
self = cls(f, documentObject)
self.path = None
return self
def read(self):
self.readAxes()
if self.convertData:
self.readData()
if self.convertRules:
self.readOldRules()
self.readSimpleRules()
self.readSources()
self.readInstances()
def readData(self):
# read superpolator specific data, view prefs etc.
# if possible convert it to skateboard
interactionSources = {'horizontal': [], 'vertical': [], 'ignore': []}
ignoreElements = self.root.findall(".ignore")
ignoreGlyphs = []
for ignoreElement in ignoreElements:
names = ignoreElement.attrib.get('glyphs')
if names:
ignoreGlyphs = names.split(",")
if ignoreGlyphs:
self.documentObject.lib[skipExportKey] = ignoreGlyphs
dataElements = self.root.findall(".data")
if not dataElements:
return
newLib = {}
interactionSourcesAdded = False
for dataElement in dataElements:
name = dataElement.attrib.get('name')
value = dataElement.attrib.get('value')
if value in ['True', 'False']:
value = value == "True"
else:
try:
value = float(value)
except ValueError:
pass
if name == "previewtext":
self.documentObject.lib[skateboardPreviewTextKey] = value
elif name == "horizontalPreviewAxis":
interactionSources['horizontal'].append(value)
interactionSourcesAdded = True
elif name == "verticalPreviewAxis":
interactionSources['vertical'].append(value)
interactionSourcesAdded = True
newLib[name] = value
if interactionSourcesAdded:
self.documentObject.lib[skateboardInteractionSourcesKey] = interactionSources
if newLib:
self.documentObject.lib[superpolatorDataLibKey] = newLib
def readOldRules(self):
# read the old rules
# <rule enabled="1" logic="all" resultfalse="B" resulttrue="B.round">
# <condition axisname="AxisWidth" operator="==" xvalue="100.000000"/>
# </rule>
# superpolator old rule to simple rule
# if op in ['<', '<=']:
# # old style data
# axes[axisName]['maximum'] = conditionDict['values']
# newRule.name = "converted %s < and <= "%(axisName)
# elif op in ['>', '>=']:
# # old style data
# axes[axisName]['minimum'] = conditionDict['values']
# newRule.name = "converted %s > and >= "%(axisName)
# elif op == "==":
# axes[axisName]['maximum'] = conditionDict['values']
# axes[axisName]['minimum'] = conditionDict['values']
# newRule.name = "converted %s == "%(axisName)
# newRule.enabled = False
# elif op == "!=":
# axes[axisName]['maximum'] = conditionDict['values']
# axes[axisName]['minimum'] = conditionDict['values']
# newRule.name = "unsupported %s != "%(axisName)
# newRule.enabled = False
# else:
# axes[axisName]['maximum'] = conditionDict['minimum']
# axes[axisName]['minimum'] = conditionDict['maximum']
# newRule.name = "minmax legacy rule for %s"%axisName
# newRule.enabled = False
rules = []
for oldRuleElement in self.root.findall(".rule"):
ruleObject = self.ruleDescriptorClass()
# only one condition set in these old rules
cds = []
a = oldRuleElement.attrib['resultfalse']
b = oldRuleElement.attrib['resulttrue']
ruleObject.subs.append((a,b))
for oldConditionElement in oldRuleElement.findall(".condition"):
cd = {}
operator = oldConditionElement.attrib['operator']
axisValue = float(oldConditionElement.attrib['xvalue'])
axisName = oldConditionElement.attrib['axisname']
if operator in ['<', '<=']:
cd['maximum'] = axisValue
cd['minimum'] = None
cd['name'] = axisName
ruleObject.name = "converted %s < and <= "%(axisName)
elif operator in ['>', '>=']:
cd['maximum'] = None
cd['minimum'] = axisValue
cd['name'] = axisName
ruleObject.name = "converted %s > and >= "%(axisName)
elif operator in ["==", "!="]:
# can't convert this one
continue
cds.append(cd)
if cds:
ruleObject.conditionSets.append(cds)
self.documentObject.addRule(ruleObject)
def readSimpleRules(self):
# read the simple rule elements
# <simplerules>
# <simplerule enabled="1" name="width: < 500.0">
# <sub name="I" with="I.narrow"/>
# <condition axisname="width" maximum="500"/>
# <condition axisname="grade" minimum="0" maximum="500"/>
# </simplerule>
# </simplerules>
rulesContainerElements = self.root.findall(".simplerules")
rules = []
for rulesContainerElement in rulesContainerElements:
for ruleElement in rulesContainerElement:
ruleObject = self.ruleDescriptorClass()
ruleName = ruleObject.name = ruleElement.attrib['name']
# subs
for subElement in ruleElement.findall('.sub'):
a = subElement.attrib['name']
b = subElement.attrib['with']
ruleObject.subs.append((a, b))
# condition sets, .sp3 had none
externalConditions = self._readConditionElements(
ruleElement,
ruleName,
)
if externalConditions:
ruleObject.conditionSets.append(externalConditions)
self.log.info(
"Found stray rule conditions outside a conditionset. "
"Wrapped them in a new conditionset."
)
self.documentObject.addRule(ruleObject)
def _readConditionElements(self, parentElement, ruleName=None):
# modified from the method from fonttools.designspaceLib
# it's not the same!
cds = []
for conditionElement in parentElement.findall('.condition'):
cd = {}
cdMin = conditionElement.attrib.get("minimum")
if cdMin is not None:
cd['minimum'] = float(cdMin)
else:
# will allow these to be None, assume axis.minimum
cd['minimum'] = None
cdMax = conditionElement.attrib.get("maximum")
if cdMax is not None:
cd['maximum'] = float(cdMax)
else:
# will allow these to be None, assume axis.maximum
cd['maximum'] = None
cd['name'] = conditionElement.attrib.get("axisname")
# # test for things
if cd.get('minimum') is None and cd.get('maximum') is None:
raise DesignSpaceDocumentError(
"condition missing required minimum or maximum in rule" +
(" '%s'" % ruleName if ruleName is not None else ""))
cds.append(cd)
return cds
def readAxes(self):
# read the axes elements, including the warp map.
axisElements = self.root.findall(".axis")
if not axisElements:
# raise error, we need axes
return
for axisElement in axisElements:
axisObject = self.axisDescriptorClass()
axisObject.name = axisElement.attrib.get("name")
axisObject.tag = axisElement.attrib.get("shortname")
axisObject.minimum = float(axisElement.attrib.get("minimum"))
axisObject.maximum = float(axisElement.attrib.get("maximum"))
axisObject.default = float(axisElement.attrib.get("initialvalue", axisObject.minimum))
self.documentObject.axes.append(axisObject)
self.axisDefaults[axisObject.name] = axisObject.default
self.documentObject.defaultLoc = self.axisDefaults
def colorFromElement(self, element):
elementColor = None
for colorElement in element.findall('.color'):
elementColor = self.readColorElement(colorElement)
def readColorElement(self, colorElement):
pass
def locationFromElement(self, element):
elementLocation = None
for locationElement in element.findall('.location'):
elementLocation = self.readLocationElement(locationElement)
break
if not self.allowAnisotropic:
# don't want any anisotropic values here
split = {}
for k, v in elementLocation.items():
if type(v) == type(()):
split[k] = v[0]
else:
split[k] = v
elementLocation = split
return elementLocation
def readLocationElement(self, locationElement):
""" Format 0 location reader """
if self._strictAxisNames and not self.documentObject.axes:
raise DesignSpaceDocumentError("No axes defined")
loc = {}
for dimensionElement in locationElement.findall(".dimension"):
dimName = dimensionElement.attrib.get("name")
if self._strictAxisNames and dimName not in self.axisDefaults:
# In case the document contains no axis definitions,
self.log.warning("Location with undefined axis: \"%s\".", dimName)
continue
xValue = yValue = None
try:
xValue = dimensionElement.attrib.get('xvalue')
xValue = float(xValue)
except ValueError:
self.log.warning("KeyError in readLocation xValue %3.3f", xValue)
try:
yValue = dimensionElement.attrib.get('yvalue')
if yValue is not None:
yValue = float(yValue)
except ValueError:
pass
if yValue is not None:
loc[dimName] = (xValue, yValue)
else:
loc[dimName] = xValue
return loc
def readSources(self):
for sourceCount, sourceElement in enumerate(self.root.findall(".master")):
filename = sourceElement.attrib.get('filename')
if filename is not None and self.path is not None:
sourcePath = os.path.abspath(os.path.join(os.path.dirname(self.path), filename))
else:
sourcePath = None
sourceName = sourceElement.attrib.get('name')
if sourceName is None:
# add a temporary source name
sourceName = "temp_master.%d" % (sourceCount)
sourceObject = self.sourceDescriptorClass()
sourceObject.path = sourcePath # absolute path to the ufo source
sourceObject.filename = filename # path as it is stored in the document
sourceObject.name = sourceName
familyName = sourceElement.attrib.get("familyname")
if familyName is not None:
sourceObject.familyName = familyName
styleName = sourceElement.attrib.get("stylename")
if styleName is not None:
sourceObject.styleName = styleName
sourceObject.location = self.locationFromElement(sourceElement)
isMuted = False
for maskedElement in sourceElement.findall('.maskedfont'):
# mute isn't stored in the sourceDescriptor, but we can store it in the lib
if maskedElement.attrib.get('font') == "1":
isMuted = True
for libElement in sourceElement.findall('.provideLib'):
if libElement.attrib.get('state') == '1':
sourceObject.copyLib = True
for groupsElement in sourceElement.findall('.provideGroups'):
if groupsElement.attrib.get('state') == '1':
sourceObject.copyGroups = True
for infoElement in sourceElement.findall(".provideInfo"):
if infoElement.attrib.get('state') == '1':
sourceObject.copyInfo = True
for featuresElement in sourceElement.findall(".provideFeatures"):
if featuresElement.attrib.get('state') == '1':
sourceObject.copyFeatures = True
for glyphElement in sourceElement.findall(".glyph"):
glyphName = glyphElement.attrib.get('name')
if glyphName is None:
continue
if glyphElement.attrib.get('mute') == '1':
sourceObject.mutedGlyphNames.append(glyphName)
self.documentObject.sources.append(sourceObject)
if isMuted:
if not skateboardMutedSourcesKey in self.documentObject.lib:
self.documentObject.lib[skateboardMutedSourcesKey] = []
item = (sourceObject.filename, "foreground")
self.documentObject.lib[skateboardMutedSourcesKey].append(item)
def readInstances(self):
for instanceCount, instanceElement in enumerate(self.root.findall(".instance")):
instanceObject = self.instanceDescriptorClass()
if instanceElement.attrib.get("familyname"):
instanceObject.familyName = instanceElement.attrib.get("familyname")
if instanceElement.attrib.get("stylename"):
instanceObject.styleName = instanceElement.attrib.get("stylename")
if instanceElement.attrib.get("styleMapFamilyName"):
instanceObject.styleMapFamilyName = instanceElement.attrib.get("styleMapFamilyName")
if instanceElement.attrib.get("styleMapStyleName"):
instanceObject.styleMapStyleName = instanceElement.attrib.get("styleMapStyleName")
if instanceElement.attrib.get("styleMapFamilyName"):
instanceObject.styleMapFamilyName = instanceElement.attrib.get("styleMapFamilyName")
instanceObject.location = self.locationFromElement(instanceElement)
instanceObject.filename = instanceElement.attrib.get('filename')
for libElement in instanceElement.findall('.provideLib'):
if libElement.attrib.get('state') == '1':
instanceObject.lib = True
for libElement in instanceElement.findall('.provideInfo'):
if libElement.attrib.get('state') == '1':
instanceObject.info = True
self.documentObject.instances.append(instanceObject)
def sp3_to_designspace(sp3path, designspacePath=None):
if designspacePath is None:
designspacePath = sp3path.replace(".sp3", ".designspace")
doc = DesignSpaceDocument()
reader = SuperpolatorReader(sp3path, doc)
reader.read()
doc.write(designspacePath)
if __name__ == "__main__":
def test_superpolator_testdoc1():
# read superpolator_testdoc1.sp3
# and test all the values
testDoc = DesignSpaceDocument()
testPath = "../../Tests/spReader_testdocs/superpolator_testdoc1.sp3"
reader = SuperpolatorReader(testPath, testDoc)
reader.read()
# check the axes
names = [a.name for a in reader.documentObject.axes]
names.sort()
assert names == ['grade', 'space', 'weight', 'width']
tags = [a.tag for a in reader.documentObject.axes]
tags.sort()
assert tags == ['SPCE', 'grad', 'wdth', 'wght']
# check the data items
assert superpolatorDataLibKey in reader.documentObject.lib
items = list(reader.documentObject.lib[superpolatorDataLibKey].items())
items.sort()
assert items == [('expandRules', False), ('horizontalPreviewAxis', 'width'), ('includeLegacyRules', False), ('instancefolder', 'instances'), ('keepWorkFiles', True), ('lineInverted', True), ('lineStacked', 'lined'), ('lineViewFilled', True), ('outputFormatUFO', 3.0), ('previewtext', 'VA'), ('roundGeometry', False), ('verticalPreviewAxis', 'weight')]
# check the sources
for sd in reader.documentObject.sources:
assert sd.familyName == "MutatorMathTest_SourceFamilyName"
if sd.styleName == "Default":
assert sd.location == {'width': 0.0, 'weight': 0.0, 'space': 0.0, 'grade': -0.5}
assert sd.copyLib == True
assert sd.copyGroups == True
assert sd.copyInfo == True
assert sd.copyFeatures == True
elif sd.styleName == "TheOther":
assert sd.location == {'width': 0.0, 'weight': 1000.0, 'space': 0.0, 'grade': -0.5}
assert sd.copyLib == False
assert sd.copyGroups == False
assert sd.copyInfo == False
assert sd.copyFeatures == False
# check the instances
for nd in reader.documentObject.instances:
assert nd.familyName == "MutatorMathTest_InstanceFamilyName"
if nd.styleName == "AWeightThatILike":
assert nd.location == {'width': 133.152174, 'weight': 723.981097, 'space': 0.0, 'grade': -0.5}
assert nd.filename == "instances/MutatorMathTest_InstanceFamilyName-AWeightThatILike.ufo"
assert nd.styleMapFamilyName == None
assert nd.styleMapStyleName == None
if nd.styleName == "wdth759.79_SPCE0.00_wght260.72":
# note the converted anisotropic location in the width axis.
assert nd.location == {'grade': -0.5, 'width': 500.0, 'weight': 260.7217, 'space': 0.0}
assert nd.filename == "instances/MutatorMathTest_InstanceFamilyName-wdth759.79_SPCE0.00_wght260.72.ufo"
assert nd.styleMapFamilyName == "StyleMappedFamily"
assert nd.styleMapStyleName == "bold"
# check the rules
for rd in reader.documentObject.rules:
assert rd.name == "width: < 500.0"
assert len(rd.conditionSets) == 1
assert rd.subs == [('I', 'I.narrow')]
for conditionSet in rd.conditionSets:
for cd in conditionSet:
if cd['name'] == "width":
assert cd == {'minimum': None, 'maximum': 500.0, 'name': 'width'}
if cd['name'] == "grade":
assert cd == {'minimum': 0.0, 'maximum': 500.0, 'name': 'grade'}
testDoc.write(testPath.replace(".sp3", "_output_roundtripped.designspace"))
def test_testDocs():
# read the test files and convert them
# no tests
root = "../../Tests/spReader_testdocs/test*.sp3"
for path in glob.glob(root):
sp3_to_designspace(path)
test_superpolator_testdoc1()
#test_testDocs()
|
423109
|
from bunq.sdk.exception.api_exception import ApiException
class MethodNotAllowedException(ApiException):
pass
|
423135
|
import numbers
def is_id(value):
"""
Return True, if the input value is a valid ID. False otherwise.
"""
if isinstance(value, numbers.Number):
try:
int(value)
return True
except ValueError:
return False
return False
|
423144
|
import unittest
import pytest
from gitopscli.gitops_config import GitOpsConfig
from gitopscli.gitops_exception import GitOpsException
class GitOpsConfigV0Test(unittest.TestCase):
def setUp(self):
self.yaml = {
"deploymentConfig": {"applicationName": "my-app", "org": "my-org", "repository": "my-repo"},
"previewConfig": {
"route": {"host": {"template": "my-{SHA256_8CHAR_BRANCH_HASH}-host-template"}},
"replace": [{"path": "a.b", "variable": "ROUTE_HOST"}, {"path": "c.d", "variable": "GIT_COMMIT"}],
},
}
def load(self) -> GitOpsConfig:
return GitOpsConfig.from_yaml(self.yaml)
def assert_load_error(self, error_msg: str) -> None:
with pytest.raises(GitOpsException) as ex:
self.load()
self.assertEqual(error_msg, str(ex.value))
def test_application_name(self):
config = self.load()
self.assertEqual(config.application_name, "my-app")
def test_application_name_missing(self):
del self.yaml["deploymentConfig"]["applicationName"]
self.assert_load_error("Key 'deploymentConfig.applicationName' not found in GitOps config!")
def test_application_name_not_a_string(self):
self.yaml["deploymentConfig"]["applicationName"] = 1
self.assert_load_error("Item 'deploymentConfig.applicationName' should be a string in GitOps config!")
def test_team_config_org(self):
config = self.load()
self.assertEqual(config.preview_template_organisation, "my-org")
self.assertEqual(config.preview_target_organisation, "my-org")
self.assertTrue(config.is_preview_template_equal_target())
def test_deployment_config_org_missing(self):
del self.yaml["deploymentConfig"]["org"]
self.assert_load_error("Key 'deploymentConfig.org' not found in GitOps config!")
def test_deployment_config_org_not_a_string(self):
self.yaml["deploymentConfig"]["org"] = True
self.assert_load_error("Item 'deploymentConfig.org' should be a string in GitOps config!")
def test_deployment_config_repo(self):
config = self.load()
self.assertEqual(config.preview_template_repository, "my-repo")
self.assertEqual(config.preview_target_repository, "my-repo")
self.assertTrue(config.is_preview_template_equal_target())
def test_deployment_config_repo_missing(self):
del self.yaml["deploymentConfig"]["repository"]
self.assert_load_error("Key 'deploymentConfig.repository' not found in GitOps config!")
def test_deployment_config_repo_not_a_string(self):
self.yaml["deploymentConfig"]["repository"] = []
self.assert_load_error("Item 'deploymentConfig.repository' should be a string in GitOps config!")
def test_preview_template_branch_is_none(self):
config = self.load()
self.assertIsNone(config.preview_template_branch)
def test_preview_target_branch_is_none(self):
config = self.load()
self.assertIsNone(config.preview_target_branch)
def test_route_host_template(self):
config = self.load()
self.assertEqual(config.preview_host_template, "my-${PREVIEW_ID_HASH}-host-template")
def test_route_host(self):
config = self.load()
self.assertEqual(config.get_preview_host("preview-1"), "my-3e355b4a-host-template")
def test_route_missing(self):
del self.yaml["previewConfig"]["route"]
self.assert_load_error("Key 'previewConfig.route.host.template' not found in GitOps config!")
def test_route_host_missing(self):
del self.yaml["previewConfig"]["route"]["host"]
self.assert_load_error("Key 'previewConfig.route.host.template' not found in GitOps config!")
def test_route_host_template_missing(self):
del self.yaml["previewConfig"]["route"]["host"]["template"]
self.assert_load_error("Key 'previewConfig.route.host.template' not found in GitOps config!")
def test_route_host_template_not_a_string(self):
self.yaml["previewConfig"]["route"]["host"]["template"] = []
self.assert_load_error("Item 'previewConfig.route.host.template' should be a string in GitOps config!")
def test_namespace_template(self):
config = self.load()
self.assertEqual(config.preview_target_namespace_template, "${APPLICATION_NAME}-${PREVIEW_ID_HASH}-preview")
def test_namespace(self):
config = self.load()
self.assertEqual(config.get_preview_namespace("preview-1"), "my-app-3e355b4a-preview")
def test_replacements(self):
config = self.load()
self.assertEqual(config.replacements.keys(), {"Chart.yaml", "values.yaml"})
self.assertEqual(len(config.replacements["Chart.yaml"]), 1)
self.assertEqual(config.replacements["Chart.yaml"][0].path, "name")
self.assertEqual(config.replacements["Chart.yaml"][0].value_template, "${PREVIEW_NAMESPACE}")
self.assertEqual(len(config.replacements["values.yaml"]), 2)
self.assertEqual(config.replacements["values.yaml"][0].path, "a.b")
self.assertEqual(config.replacements["values.yaml"][0].value_template, "${PREVIEW_HOST}")
self.assertEqual(config.replacements["values.yaml"][1].path, "c.d")
self.assertEqual(config.replacements["values.yaml"][1].value_template, "${GIT_HASH}")
def test_replacements_missing(self):
del self.yaml["previewConfig"]["replace"]
self.assert_load_error("Key 'previewConfig.replace' not found in GitOps config!")
def test_replacements_not_a_list(self):
self.yaml["previewConfig"]["replace"] = "foo"
self.assert_load_error("Item 'previewConfig.replace' should be a list in GitOps config!")
def test_replacements_invalid_list(self):
self.yaml["previewConfig"]["replace"] = ["foo"]
self.assert_load_error("Item 'previewConfig.replace.[0]' should be an object in GitOps config!")
def test_replacements_invalid_list_items_missing_path(self):
del self.yaml["previewConfig"]["replace"][1]["path"]
self.assert_load_error("Key 'previewConfig.replace.[1].path' not found in GitOps config!")
def test_replacements_invalid_list_items_missing_variable(self):
del self.yaml["previewConfig"]["replace"][0]["variable"]
self.assert_load_error("Key 'previewConfig.replace.[0].variable' not found in GitOps config!")
def test_replacements_invalid_list_items_path_not_a_string(self):
self.yaml["previewConfig"]["replace"][0]["path"] = 42
self.assert_load_error("Item 'previewConfig.replace.[0].path' should be a string in GitOps config!")
def test_replacements_invalid_list_items_variable_not_a_string(self):
self.yaml["previewConfig"]["replace"][0]["variable"] = []
self.assert_load_error("Item 'previewConfig.replace.[0].variable' should be a string in GitOps config!")
def test_replacements_invalid_list_items_unknown_variable(self):
self.yaml["previewConfig"]["replace"][0]["variable"] = "FOO"
self.assert_load_error("Replacement value '${FOO}' for path 'a.b' contains invalid variable: FOO")
def test_replacements_invalid_list_items_invalid_variable(self):
self.yaml["previewConfig"]["replace"][0]["variable"] = "{FOO"
self.assert_load_error("Item 'previewConfig.replace.[0].variable' must not contain '{' or '}'!")
|
423181
|
import pytest
from pg13 import sqparse2,pgmock
def test_parse_arraylit():
v=sqparse2.ArrayLit((sqparse2.Literal(1),sqparse2.Literal(2),sqparse2.Literal("three")))
assert v==sqparse2.parse("array[1,2,'three']")
assert v==sqparse2.parse("{1,2,'three'}")
def test_parse_select():
# this is also testing nesting and various whatever
from pg13.sqparse2 import NameX,CommaX,SelectX,Literal,ArrayLit,BinX,UnX,OpX,CallX,AsterX
selx=sqparse2.parse('select *,coalesce(x+3,0),{4,5},array[1,2],(select i from tbl) from whatever where (z+-1=10) and (y<5.5)')
assert selx.cols==CommaX((
AsterX(),
CallX('coalesce',CommaX([
BinX(OpX('+'),NameX('x'),Literal(3)),
Literal(0),
])),
ArrayLit((Literal(4),Literal(5))),
ArrayLit((Literal(1),Literal(2))),
SelectX(CommaX([NameX('i')]),['tbl'],None,None,None,None,None),
))
# nested select in comma
assert selx.tables==['whatever']
assert selx.where==BinX(
OpX('and'),
BinX(
OpX('='),
BinX(OpX('+'),NameX('z'),UnX(OpX('-'),Literal(1))),
Literal(10)
),
BinX(OpX('<'),NameX('y'),Literal(5.5))
)
assert selx.limit is None and selx.order is None and selx.offset is None
def test_parse_create():
# todo: real tests here instead of repr comparison
from pg13.sqparse2 import Literal,NameX,CreateX,ColX,PKeyX,NullX,CheckX,BinX,Literal,OpX,TypeX
assert sqparse2.parse('create table tbl (a int, b int, c text[])')==CreateX(
False, 'tbl', [
ColX('a',TypeX('int',None),False,False,None,False),
ColX('b',TypeX('int',None),False,False,None,False),
ColX('c',TypeX('text',None),True,False,None,False),
], None, [], None
)
assert sqparse2.parse('create table tbl (a int, b int, primary key (a,b))')==CreateX(
False, 'tbl', [
ColX('a',TypeX('int',None),False,False,None,False),
ColX('b',TypeX('int',None),False,False,None,False),
], PKeyX(['a','b']), [], None
)
ex=sqparse2.parse('create table t1 (a int default 7, b int default null, d int primary key)')
assert ex.cols[0].default==Literal(7) and ex.cols[1].default==NullX() and ex.cols[2].pkey
assert sqparse2.parse('create table t1 (a int not null)').cols[0].not_null
assert sqparse2.parse('create table if not exists t1 (a int not null)').nexists
assert sqparse2.parse('create table t2 (check (a=5)) inherits (t1)') == CreateX(
False,'t2',[],None,[CheckX(BinX(OpX('='),NameX('a'),Literal(5)))],['t1']
)
# test duplicate primary key
with pytest.raises(sqparse2.SQLSyntaxError): sqparse2.parse('create table t (primary key (a,b),primary key (c,d))')
# test varchar
assert sqparse2.parse('create table t1 (a varchar(10))').cols[0].coltp == TypeX('varchar',10)
def test_parse_insert():
from pg13.sqparse2 import InsertX,NameX,CommaX,Literal,ReturnX,AsterX
assert sqparse2.parse('insert into t1 (a,b) values (1,2)')==InsertX(
't1', ['a','b'], [Literal(1),Literal(2)], None
)
assert sqparse2.parse('insert into t1 values (1,2)')==InsertX(
't1', None, [Literal(1),Literal(2)], None
)
assert sqparse2.parse('insert into t1 values (1,2) returning *')==InsertX(
't1', None, [Literal(1),Literal(2)], ReturnX(AsterX())
)
assert sqparse2.parse('insert into t1 values (1,2) returning (a,b)')==InsertX(
't1', None, [Literal(1),Literal(2)], ReturnX(CommaX((NameX('a'),NameX('b'))))
)
def test_parse_update():
from pg13.sqparse2 import NameX,AssignX,BinX,OpX,Literal,ReturnX,CommaX
x=sqparse2.parse('update t1 set a=5,d=x+9 where 35 > 50 returning (a,b+1)')
assert x.tables==['t1']
assert x.assigns==[
AssignX('a',Literal(5)),
AssignX('d',BinX(OpX('+'),NameX('x'),Literal(9))),
]
assert x.where==BinX(OpX('>'),Literal(35),Literal(50))
assert x.ret==ReturnX(CommaX((
NameX('a'),
BinX(OpX('+'),NameX('b'),Literal(1)),
)))
def test_strlit():
from pg13.sqparse2 import Literal
x=sqparse2.parse("select 'literal1','literal two','literal \\'three\\'' from t1")
assert x.cols==sqparse2.CommaX((Literal('literal1'),Literal('literal two'),Literal("literal 'three'")))
def test_boolx():
"small-scale test of boolx parsing"
from pg13.sqparse2 import Literal,NameX,OpX,BinX,UnX
assert BinX(OpX('and'),BinX(OpX('<'),NameX('a'),Literal(5)),BinX(OpX('='),NameX('z'),Literal(3)))==sqparse2.parse('a<5 and z=3')
assert BinX(OpX('<'),NameX('a'),UnX(OpX('-'),Literal(5)))==sqparse2.parse('a<-5')
def is_balanced(binx):
"helper for test_precedence"
def unbalanced(outer, inner): return not is_balanced(inner) or inner.op < outer.op
if isinstance(binx.left, sqparse2.BinX) and unbalanced(binx, binx.left): return False
if isinstance(binx.right, sqparse2.BinX) and unbalanced(binx, binx.right): return False
return True
def test_precedence():
"check order of operations in boolx"
# this isn't an awesome test; the parser might accidentally get it right. better than nothing.
assert is_balanced(sqparse2.parse('a+1<5 and z=3 or z=6'))
def test_unary_precedence():
assert isinstance(sqparse2.parse('select * from t1 where not a=0').where,sqparse2.UnX)
assert isinstance(sqparse2.parse('select -a+1 from t1').cols.children[0],sqparse2.BinX) # warning: I'm not ensuring this outcome, it just happens to work.
def test_parse_sub():
assert sqparse2.parse('select * from t1 where x=%s').where.right is sqparse2.SubLit
def test_select_emptywhere():
with pytest.raises(sqparse2.SQLSyntaxError): sqparse2.parse('select * from t1 where')
def test_multi_stmt():
"make sure that multi statement strings fail loudly (rather than silently skipping the extras)"
with pytest.raises(sqparse2.SQLSyntaxError): sqparse2.parse('select * from t1; update t2 set a=3')
def test_case():
"parse case stmt"
x=sqparse2.parse('select case when x=3 then 10 when x=4 then 20 else 30 end from t1')
assert len(x.cols.children)==1 # i.e. make sure the case isn't getting distributed across columns somehow
casex,=x.cols.children
assert len(casex.cases)==2
assert casex.elsex==sqparse2.Literal(30)
def test_parse_tuple_in():
x=sqparse2.parse('select * from t1 where (a,b) in %s')
assert isinstance(x.where.left,sqparse2.CommaX)
def test_parse_is_not(): assert sqparse2.parse('select * from t1 where a is not null').where.op.op=='is not'
def test_parse_index():
stmts=[
sqparse2.parse('create index on t1 (a,b)'),
sqparse2.parse('create index on t1 (a,b) where a<30'),
sqparse2.parse('create index on t1 using gist (a,b) where x=5'),
]
assert all(isinstance(x,sqparse2.IndexX) for x in stmts)
def test_parse_delete():
from pg13.sqparse2 import NameX,OpX,BinX,Literal
assert sqparse2.parse('delete from t1 where a=3')==sqparse2.DeleteX(
't1',
BinX(OpX('='), NameX('a'), Literal(3)),
None
)
def test_attr():
from pg13.sqparse2 import AttrX,NameX
assert sqparse2.parse('a.b')==AttrX(NameX('a'),NameX('b'))
assert sqparse2.parse('a.*')==AttrX(NameX('a'),sqparse2.AsterX())
with pytest.raises(sqparse2.SQLSyntaxError): sqparse2.parse('a.b.c')
def test_join_syntax():
from pg13.sqparse2 import JoinX,BinX,OpX,NameX,AttrX,CommaX,AsterX,JoinTypeX
inner_join = JoinTypeX(None,False,None)
ex=sqparse2.parse('select * from t1,t2 where t1.x=t2.y')
assert all(isinstance(x,sqparse2.AttrX) for x in (ex.where.left,ex.where.right))
assert sqparse2.parse('select * from t1 join t2').tables==[
JoinX('t1','t2',None,inner_join)
]
assert sqparse2.parse('select * from t1 join t2 on x=y').tables==[JoinX(
't1','t2',
BinX(OpX('='), NameX('x'), NameX('y')),
inner_join
)]
x = sqparse2.parse('select t1.* from t1 join t2 on x=y and z=a')
assert x.cols==CommaX([AttrX(NameX('t1'),AsterX())])
assert x.tables==[JoinX(
't1','t2',
BinX(OpX('and'),BinX(OpX('='),NameX('x'),NameX('y')),BinX(OpX('='),NameX('z'),NameX('a'))),
inner_join
)]
assert sqparse2.parse('select t1.*,t2.* from t1 join t2 on x=y').cols==CommaX([
AttrX(NameX('t1'),AsterX()),
AttrX(NameX('t2'),AsterX())
])
assert sqparse2.parse('select * from t1 join t2 on t1.x=t2.y').tables==[JoinX(
't1','t2',
BinX(OpX('='),AttrX(NameX('t1'),NameX('x')),AttrX(NameX('t2'),NameX('y'))),
inner_join
)]
def test_jointype():
from pg13.sqparse2 import JoinTypeX
assert JoinTypeX('left',True,None) == sqparse2.parse('select * from t1 left outer join t2').tables[0].jointype
assert JoinTypeX(None,False,None) == sqparse2.parse('select * from t1 inner join t2').tables[0].jointype
assert JoinTypeX('full',True,None) == sqparse2.parse('select * from t1 full join t2').tables[0].jointype
def test_xgetset():
"tree-aware getitem/setitem for expressions"
from pg13.sqparse2 import NameX,AttrX
x = sqparse2.parse('select * from t1,t2 where t1.x=t2.y')
# named get
assert x['where','left','attr']==NameX('x')
# named set
x['where','left','attr'] = 'hello'
assert x['where','left']==AttrX(NameX('t1'),'hello')
# index get
assert x[('tables',0),]=='t1'
# index set
x[('tables',0),] = 'hello'
assert x.tables[0] == 'hello'
assert x[('tables',0),] == 'hello'
def test_mult_vs_aster():
"make sure that asterisk is handled differently from the multiplication operator"
from pg13.sqparse2 import AsterX,BinX,OpX,NameX
assert sqparse2.parse('select *,a*b from t1').cols.children==[
AsterX(),
BinX(OpX('*'),NameX('a'),NameX('b'))
]
|
423185
|
symbology = 'code93'
cases = [
('001.png', 'CODE93', dict(includetext=True)),
('002.png', 'CODE93^SFT/A', dict(parsefnc=True, includecheck=True)),
]
|
423265
|
import os
import contextlib
import hou
import sys
from collections import deque
import pyblish.api
import openpype.api
import openpype.hosts.houdini.api.usd as hou_usdlib
from openpype.hosts.houdini.api.lib import render_rop
class ExitStack(object):
"""Context manager for dynamic management of a stack of exit callbacks.
For example:
with ExitStack() as stack:
files = [stack.enter_context(open(fname)) for fname in filenames]
# All opened files will automatically be closed at the end of
# the with statement, even if attempts to open files later
# in the list raise an exception
"""
def __init__(self):
self._exit_callbacks = deque()
def pop_all(self):
"""Preserve the context stack by transferring it to a new instance"""
new_stack = type(self)()
new_stack._exit_callbacks = self._exit_callbacks
self._exit_callbacks = deque()
return new_stack
def _push_cm_exit(self, cm, cm_exit):
"""Helper to correctly register callbacks to __exit__ methods"""
def _exit_wrapper(*exc_details):
return cm_exit(cm, *exc_details)
_exit_wrapper.__self__ = cm
self.push(_exit_wrapper)
def push(self, exit):
"""Registers a callback with the standard __exit__ method signature.
Can suppress exceptions the same way __exit__ methods can.
Also accepts any object with an __exit__ method (registering a call
to the method instead of the object itself)
"""
# We use an unbound method rather than a bound method to follow
# the standard lookup behaviour for special methods
_cb_type = type(exit)
try:
exit_method = _cb_type.__exit__
except AttributeError:
# Not a context manager, so assume its a callable
self._exit_callbacks.append(exit)
else:
self._push_cm_exit(exit, exit_method)
return exit # Allow use as a decorator
def callback(self, callback, *args, **kwds):
"""Registers an arbitrary callback and arguments.
Cannot suppress exceptions.
"""
def _exit_wrapper(exc_type, exc, tb):
callback(*args, **kwds)
# We changed the signature, so using @wraps is not appropriate, but
# setting __wrapped__ may still help with introspection
_exit_wrapper.__wrapped__ = callback
self.push(_exit_wrapper)
return callback # Allow use as a decorator
def enter_context(self, cm):
"""Enters the supplied context manager
If successful, also pushes its __exit__ method as a callback and
returns the result of the __enter__ method.
"""
# We look up the special methods on the type to match the with
# statement
_cm_type = type(cm)
_exit = _cm_type.__exit__
result = _cm_type.__enter__(cm)
self._push_cm_exit(cm, _exit)
return result
def close(self):
"""Immediately unwind the context stack"""
self.__exit__(None, None, None)
def __enter__(self):
return self
def __exit__(self, *exc_details):
# We manipulate the exception state so it behaves as though
# we were actually nesting multiple with statements
frame_exc = sys.exc_info()[1]
def _fix_exception_context(new_exc, old_exc):
while 1:
exc_context = new_exc.__context__
if exc_context in (None, frame_exc):
break
new_exc = exc_context
new_exc.__context__ = old_exc
# Callbacks are invoked in LIFO order to match the behaviour of
# nested context managers
suppressed_exc = False
while self._exit_callbacks:
cb = self._exit_callbacks.pop()
try:
if cb(*exc_details):
suppressed_exc = True
exc_details = (None, None, None)
except Exception:
new_exc_details = sys.exc_info()
# simulate the stack of exceptions by setting the context
_fix_exception_context(new_exc_details[1], exc_details[1])
if not self._exit_callbacks:
raise
exc_details = new_exc_details
return suppressed_exc
@contextlib.contextmanager
def parm_values(overrides):
"""Override Parameter values during the context."""
originals = []
try:
for parm, value in overrides:
originals.append((parm, parm.eval()))
parm.set(value)
yield
finally:
for parm, value in originals:
# Parameter might not exist anymore so first
# check whether it's still valid
if hou.parm(parm.path()):
parm.set(value)
class ExtractUSDLayered(openpype.api.Extractor):
order = pyblish.api.ExtractorOrder
label = "Extract Layered USD"
hosts = ["houdini"]
families = ["usdLayered", "usdShade"]
# Force Output Processors so it will always save any file
# into our unique staging directory with processed Avalon paths
output_processors = ["avalon_uri_processor", "stagingdir_processor"]
def process(self, instance):
self.log.info("Extracting: %s" % instance)
staging_dir = self.staging_dir(instance)
fname = instance.data.get("usdFilename")
# The individual rop nodes are collected as "publishDependencies"
dependencies = instance.data["publishDependencies"]
ropnodes = [dependency[0] for dependency in dependencies]
assert all(
node.type().name() in {"usd", "usd_rop"} for node in ropnodes
)
# Main ROP node, either a USD Rop or ROP network with
# multiple USD ROPs
node = instance[0]
# Collect any output dependencies that have not been processed yet
# during extraction of other instances
outputs = [fname]
active_dependencies = [
dep
for dep in dependencies
if dep.data.get("publish", True)
and not dep.data.get("_isExtracted", False)
]
for dependency in active_dependencies:
outputs.append(dependency.data["usdFilename"])
pattern = r"*[/\]{0} {0}"
save_pattern = " ".join(pattern.format(fname) for fname in outputs)
# Run a stack of context managers before we start the render to
# temporarily adjust USD ROP settings for our publish output.
rop_overrides = {
# This sets staging directory on the processor to force our
# output files to end up in the Staging Directory.
"stagingdiroutputprocessor_stagingDir": staging_dir,
# Force the Avalon URI Output Processor to refactor paths for
# references, payloads and layers to published paths.
"avalonurioutputprocessor_use_publish_paths": True,
# Only write out specific USD files based on our outputs
"savepattern": save_pattern,
}
overrides = list()
with ExitStack() as stack:
for ropnode in ropnodes:
manager = hou_usdlib.outputprocessors(
ropnode,
processors=self.output_processors,
disable_all_others=True,
)
stack.enter_context(manager)
# Some of these must be added after we enter the output
# processor context manager because those parameters only
# exist when the Output Processor is added to the ROP node.
for name, value in rop_overrides.items():
parm = ropnode.parm(name)
assert parm, "Parm not found: %s.%s" % (
ropnode.path(),
name,
)
overrides.append((parm, value))
stack.enter_context(parm_values(overrides))
# Render the single ROP node or the full ROP network
render_rop(node)
# Assert all output files in the Staging Directory
for output_fname in outputs:
path = os.path.join(staging_dir, output_fname)
assert os.path.exists(path), "Output file must exist: %s" % path
# Set up the dependency for publish if they have new content
# compared to previous publishes
for dependency in active_dependencies:
dependency_fname = dependency.data["usdFilename"]
filepath = os.path.join(staging_dir, dependency_fname)
similar = self._compare_with_latest_publish(dependency, filepath)
if similar:
# Deactivate this dependency
self.log.debug(
"Dependency matches previous publish version,"
" deactivating %s for publish" % dependency
)
dependency.data["publish"] = False
else:
self.log.debug("Extracted dependency: %s" % dependency)
# This dependency should be published
dependency.data["files"] = [dependency_fname]
dependency.data["stagingDir"] = staging_dir
dependency.data["_isExtracted"] = True
# Store the created files on the instance
if "files" not in instance.data:
instance.data["files"] = []
instance.data["files"].append(fname)
def _compare_with_latest_publish(self, dependency, new_file):
from avalon import api, io
import filecmp
_, ext = os.path.splitext(new_file)
# Compare this dependency with the latest published version
# to detect whether we should make this into a new publish
# version. If not, skip it.
asset = io.find_one(
{"name": dependency.data["asset"], "type": "asset"}
)
subset = io.find_one(
{
"name": dependency.data["subset"],
"type": "subset",
"parent": asset["_id"],
}
)
if not subset:
# Subset doesn't exist yet. Definitely new file
self.log.debug("No existing subset..")
return False
version = io.find_one(
{"type": "version", "parent": subset["_id"], },
sort=[("name", -1)]
)
if not version:
self.log.debug("No existing version..")
return False
representation = io.find_one(
{
"name": ext.lstrip("."),
"type": "representation",
"parent": version["_id"],
}
)
if not representation:
self.log.debug("No existing representation..")
return False
old_file = api.get_representation_path(representation)
if not os.path.exists(old_file):
return False
return filecmp.cmp(old_file, new_file)
|
423277
|
from spacy.tokens import Doc
from spacy.language import Language
import operator
@Language.factory("healthsea.aggregation.v1")
def create_clause_aggregation(nlp: Language, name: str):
return ClauseAggregation(nlp, name)
class ClauseAggregation:
"""Aggregate the predicted effects from the clausecat and apply the patient information logic"""
def __init__(self, nlp: Language, name: str):
self.nlp = nlp
self.name = name
def __call__(self, doc: Doc):
patient_information = []
health_effects = {}
for clause in doc._.clauses:
classification = max(clause["cats"].items(), key=operator.itemgetter(1))[0]
if not clause["has_ent"]:
if len(patient_information) > 0:
patient_information[-1][1].append(classification)
continue
entity = str(clause["ent_name"]).replace(" ", "_").strip().lower()
# Collect patient information
if classification == "ANAMNESIS" and entity is not None:
patient_information.append((entity, []))
# Collect health effects
if entity is not None:
if entity not in health_effects:
health_effects[entity] = {
"effects": [],
"effect": "NEUTRAL",
"label": str(clause["blinder"])
.replace("_", "")
.replace("_", ""),
"text": clause["ent_name"],
}
health_effects[entity]["effects"].append(classification)
# Add patient information to list of health effects
for patient_health in patient_information:
entity = patient_health[0]
health_effects[entity]["effects"] += patient_health[1]
# Aggregate health effects
for entity in health_effects:
score = 0
for classification in health_effects[entity]["effects"]:
if classification == "POSITIVE":
score += 1
elif classification == "NEGATIVE":
score -= 1
if score > 0:
aggregated_classification = "POSITIVE"
elif score < 0:
aggregated_classification = "NEGATIVE"
else:
aggregated_classification = "NEUTRAL"
health_effects[entity]["effect"] = aggregated_classification
doc.set_extension("health_effects", default={}, force=True)
doc._.health_effects = health_effects
return doc
|
423326
|
from __future__ import absolute_import, division, print_function
from builtins import bytes, range, int
import math
from itertools import combinations
try:
from itertools import zip_longest
except:
from itertools import izip_longest as zip_longest
from random import randint
from copy import deepcopy
from Crypto.Cipher import AES
from CryptoAttacks.Utils import random_bytes, xor, i2b, b2i, b2h, log
from CryptoAttacks.Math import egcd, invmod
def deg(n):
"""Find degree of polynomial
Args:
n(Polynomial_128/list/int)
Returns:
int
"""
if isinstance(n, Polynomial_128):
n = n.coefficients
if type(n) == list:
for d in reversed(range(len(n))):
if n[d].to_int() != 0:
return d
return -1
else:
if n == 0:
return -1
return int(math.floor(math.log(n, 2) + 1))
class Polynomial_2():
"""Polynomial with coefficients in GF(2)"""
def __init__(self, coefficients):
"""x^3 + x + 1 == 0b1101 == [3, 1, 0]"""
self.coefficients = Polynomial_2.convert_coefficients(coefficients)
@staticmethod
def convert_coefficients(coefficients):
if type(coefficients) == list:
coefficients = Polynomial_2.list_to_int(coefficients)
elif type(coefficients) == bytes:
# reverse bit order
coefficients = int(''.join(map(lambda x: '{:08b}'.format(x), coefficients))[::-1], 2)
elif isinstance(coefficients, int):
pass
else:
raise ValueError("Bad coefficients: {} ({})".format(coefficients, type(coefficients)))
return coefficients
@staticmethod
def egcd(a, b):
"""Extended Euclidean algorithm"""
a, b = map(Polynomial_2, [a, b])
s0, t0, s1, t1 = map(Polynomial_2, [1, 0, 0, 1])
while b.coefficients:
q, a, b = a/b, b, a%b
s0, s1 = s1, s0 - q*s1
t0, t1 = t1, t0 - q*t1
return a, s0, t0
@staticmethod
def list_to_int(coefficients):
result = 0
for coef in coefficients:
result |= 1<<coef
return result
def __str__(self):
return self.to_poly()
def __getitem__(self, no):
if not isinstance(no, int):
return 'No must be a number'
if no < 0 or no > self.to_bits():
return 'Bad no'
return int(self.to_bits()[no])
def to_bits(self):
return '{:b}'.format(self.coefficients)[::-1]
def to_int(self):
return self.coefficients
def to_poly(self):
if self.coefficients == 0:
return '0'
result = ''
for i, coef in enumerate(self.to_bits()):
if coef == '1':
result = 'x^{} + '.format(i) + result
return result[:-3]
def to_list(self):
return list(map(int, list(self.to_bits())))
def __add__(self, other):
return Polynomial_2(self.coefficients ^ other.coefficients)
def __sub__(self, other):
return self + other
def __mul__(self, other):
if isinstance(other, int):
other = Polynomial_2(other)
p = 0
a = self.coefficients
b = other.coefficients
while a > 0:
if a & 1:
p = p ^ b
a = a >> 1
b = b << 1
return Polynomial_2(p)
def __rmul__(self, other):
return self.__mul__(other)
def __divmod__(self, other):
a = self.coefficients
b = other.coefficients
q, r = 0, a
while deg(r) >= deg(b):
d = deg(r) - deg(b)
q = q ^ (1 << d)
r = r ^ (b << d)
return Polynomial_2(q), Polynomial_2(r)
def __mod__(self, other):
return self.__divmod__(other)[1]
def __div__(self, other):
return self.__divmod__(other)[0]
def __floordiv__(self, other):
return self.__div__(other)
def __truediv__(self, other):
return self.__div__(other)
def __pow__(self, y):
p = Polynomial_2(1)
b = Polynomial_2(self.coefficients)
while y > 0:
if y & 1:
p *= b
y >>= 1
b *= b
return p
def __eq__(self, other):
return self.coefficients == other.coefficients
def __hash__(self):
return hash(self.to_int())
class GF_2k():
"""GF(2^k) with elements represented as polynomials with coefficients in GF(2)"""
def __init__(self, coefficients, k, modulus):
"""x^3 + x + 1 == 0b1101"""
self.coefficients = Polynomial_2.convert_coefficients(coefficients)
self.k = k
if isinstance(modulus, Polynomial_2):
self.modulus = modulus
else:
self.modulus = Polynomial_2(modulus)
tmp = Polynomial_2(self.coefficients) % self.modulus
self.coefficients = tmp.coefficients
def __str__(self):
return self.to_poly()
def __repr__(self):
return self.__str__()
def __getitem__(self, no):
if not isinstance(no, int):
return 'No must be a number'
if no > self.to_bits():
return 'Bad no'
return int(self.to_bits()[no])
def to_bytes(self):
return bytes(i2b(int(self.to_bits(), 2)).rjust(self.k//8, bytes(b'\x00')))
def to_bits(self):
return '{:b}'.format(self.coefficients).zfill(self.k)[::-1]
def to_int(self):
return self.coefficients
def to_poly(self):
if self.coefficients == 0:
return '0'
result = ''
for i, coef in enumerate(self.to_bits()):
if coef == '1':
result = 'x^{} + '.format(i) + result
return result[:-3]
def to_list(self):
return list(map(int, list(self.to_bits())))
def __add__(self, other):
return GF_2k(self.coefficients ^ other.coefficients, self.k, self.modulus)
def __sub__(self, other):
return self + other
def __mul__(self, other):
if isinstance(other, int):
other = GF_2k(other, self.k, self.modulus)
p = 0
a = self.coefficients
b = other.coefficients
m = self.modulus.coefficients
while a > 0:
if a & 1:
p = p ^ b
a = a >> 1
b = b << 1
if deg(b) == deg(m):
b = b ^ m
return GF_2k(p, self.k, self.modulus)
def invmod(self):
"""Modular inverse. a*invmod(a) == 1 (mod n)"""
d, s, t = Polynomial_2.egcd(self.coefficients, self.modulus.coefficients)
if d.coefficients != 1:
raise ValueError("Modular inverse doesn't exists ({}**(-1) % {})".format(self, self.modulus))
return GF_2k(s.coefficients, self.k, self.modulus)
def __mod__(self, other):
log.error('Modulo not allowed')
return None
# result = Polynomial_2(self.coefficients) % Polynomial_2(other.coefficients)
# return GF_2k(result.coefficients, self.k, self.modulus)
def __div__(self, other):
return self * other.invmod()
def __floordiv__(self, other):
return self.__div__(other)
def __truediv__(self, other):
return self.__div__(other)
def __pow__(self, y):
p = GF_2k(1, self.k, self.modulus)
b = GF_2k(self.coefficients, self.k, self.modulus)
while y > 0:
if y & 1:
p *= b
y >>= 1
b *= b
return p
def __eq__(self, other):
return self.k == other.k and self.coefficients == other.coefficients
def __hash__(self):
return hash(self.to_bytes() + bytes(b'-') + bytes(self.k))
class GF_2k_generator():
"""Helper for generating GF(2^k) with given k and modulus"""
def __init__(self, k, modulus):
self.k = k
self.modulus = modulus
def __call__(self, coefficients):
return GF_2k(coefficients, self.k, self.modulus)
class Polynomial_128():
"""Polynomial with coefficients in GF(2^128)"""
def __init__(self, coefficients):
"""12*x^2 + x + 43 == [GF_2k(43,128,m), GF_2k(1,128,m), GF_2k(12,128,m)]"""
self.coefficients = coefficients
self.k = self.coefficients[0].k
self.modulus = self.coefficients[0].modulus
for no in range(len(self.coefficients)):
if self.coefficients[no].k != self.k:
raise ValueError("Coefficients not consistient: k=={}, coef[{}].k=={}".format(self.k, no, self.coefficients[no].k))
if self.coefficients[no].modulus.coefficients != self.modulus.coefficients:
raise ValueError("Coefficients not consistient: modulus=={}, coef[{}].modulus=={}".format(self.modulus, no, self.coefficients[no].modulus))
def __add__(self, other):
return Polynomial_128(
[a+b for a,b in zip_longest(self.coefficients, other.coefficients,
fillvalue=GF_2k(0,self.k,self.modulus))])
def __sub__(self, other):
return self + other
def __str__(self):
if len(self.coefficients) == 0 or deg(self) < 0:
return '0'
result = ''
for i, coef in enumerate(self.coefficients):
if coef.to_int() != 0:
if coef.to_int() == 1:
result = 'x^{} + '.format(i) + result
else:
result = '{}*x^{} + '.format(coef.to_int(), i) + result
return result[:-3]
def __repr__(self):
return self.__str__()
def __mul__(self, other):
if isinstance(other, GF_2k):
other = Polynomial_128([other])
if self.is_zero() or other.is_zero():
return self.zero_element()
k = deg(self) + 1
l = deg(other) + 1
c = [GF_2k(0,self.k,self.modulus)]*(k+l-1)
for i in range(k):
for j in range(l):
c[i+j] += self.coefficients[i]*other.coefficients[j]
return Polynomial_128(c)
def __divmod__(self, other):
k = deg(self) + 1
l = deg(other) + 1
if k < l:
return Polynomial_128([GF_2k(0,self.k,self.modulus)]), self
t = other.coefficients[l-1].invmod()
r = [a for a in self.coefficients]
q = [GF_2k(0,self.k,self.modulus)]*(k-l+1)
for i in reversed(range(k-l+1)):
q[i] = t*r[i+l-1]
for j in range(l):
r[i+j] -= q[i]*other.coefficients[j]
return Polynomial_128(q), Polynomial_128(r)
def __mod__(self, other):
return self.__divmod__(other)[1]
def __div__(self, other):
if isinstance(other, GF_2k):
return self.__divmod__(Polynomial_128([other]))[0]
else:
return self.__divmod__(other)[0]
def __floordiv__(self, other):
return self.__div__(other)
def __truediv__(self, other):
return self.__div__(other)
def __pow__(self, y):
p = self.one_element()
b = Polynomial_128(self.coefficients)
while y > 0:
if y & 1:
p *= b
y >>= 1
b *= b
return p
def powmod(self, y, m):
p = self.one_element()
b = Polynomial_128(self.coefficients) % m
while y > 0:
if y & 1:
p = (p*b) % m
y >>= 1
b = (b*b) % m
return p
def __eq__(self, other):
return self.k == other.k and self.modulus == other.modulus and\
all([a == b for a,b in zip(self.coefficients, other.coefficients)])
def __hash__(self):
return hash(''.join(map(str,map(hash, self.coefficients))))
def is_zero(self):
return len(self.coefficients) == 0 or deg(self) < 0
def is_one(self):
return deg(self) == 0 and self.coefficients[0].to_int() == 1
def zero_element(self):
"""0*x^0"""
return Polynomial_128([GF_2k(0,self.k,self.modulus)])
def one_element(self):
"""1*x^0"""
return Polynomial_128([GF_2k(1,self.k,self.modulus)])
def element(self):
"""x^1"""
return Polynomial_128([GF_2k(0,self.k,self.modulus), GF_2k(1,self.k,self.modulus)])
def monic(self):
f = deepcopy(self)
if self.coefficients[deg(f)].to_int() != 1:
f /= self.coefficients[deg(f)]
return f
aes_polynomial = GF_2k_generator(128, [128, 7, 2, 1, 0])
def encrypt_ctr(plaintext, key, nonce, block_size=16, initial_value=0):
aes = AES.new(key, AES.MODE_ECB)
key_stream = bytes(b'')
for counter in range(int(math.ceil(len(plaintext)/16.))):
key_stream += bytes(aes.encrypt(nonce + i2b(counter+initial_value, size=8*(block_size-len(nonce)), endian='big')))
return xor(plaintext, key_stream)
def aes_bytes_to_poly_blocks(ciphertext, additional, block_size=16):
"""Convert ciphertext to list of GF(2^128)"""
size_additional = len(additional)*8
size_ciphertext = len(ciphertext)*8
if len(ciphertext) % block_size != 0:
ciphertext += bytes(b'\x00' * (block_size - len(ciphertext)%block_size))
if len(additional) % block_size != 0:
additional += bytes(b'\x00' * (block_size - len(additional)%block_size))
blocks = []
blocks.extend([additional[block_size*i:(block_size*i)+block_size] for i in range(len(additional)//block_size)])
blocks.extend([ciphertext[block_size*i:(block_size*i)+block_size] for i in range(len(ciphertext)//block_size)])
blocks.append(i2b(size_additional, size=(block_size//2)*8, endian='big') + i2b(size_ciphertext, size=(block_size//2)*8, endian='big'))
blocks = list(map(aes_polynomial, blocks))
return blocks
def poly_blocks_to_aes_bytes(blocks, block_size=16):
"""Convert list of GF(2^128) to ciphertext"""
blocks = list(map(lambda x: x.to_bytes(), blocks))
sizes = blocks[-1]
size_additional = b2i(sizes[:block_size//2], endian='big')
size_ciphertext = b2i(sizes[block_size//2:], endian='big')
size_additional_padded = size_additional//8
if size_additional_padded % block_size != 0:
size_additional_padded += 16 - size_additional_padded % block_size
blocks = bytes(b''.join(blocks[:-1]))
additional = blocks[:size_additional//8]
ciphertext = blocks[size_additional_padded:size_additional_padded + size_ciphertext//8]
return ciphertext, additional
def gcm_compute_parts(additional='', key=None, nonce=None, auth_key=None, s=None, plaintext='', ciphertext='', block_size=16):
if nonce is not None and len(nonce) != 12:
log.error('nonce length must be 12')
return None, None, None
if nonce is None or key is None:
if None in (ciphertext, s):
log.error('nonce can\'t be None if ciphertext, auth_key or s is None')
return None, None, None
blocks = []
if auth_key is None:
auth_key = bytes(AES.new(key, AES.MODE_ECB).encrypt(bytes(b'\x00'*block_size)))
h = aes_polynomial(auth_key)
if ciphertext == '':
ciphertext = encrypt_ctr(plaintext, key, nonce, block_size, 2)
size_additional = len(additional)*8
size_ciphertext = len(ciphertext)*8
if len(additional) % block_size != 0:
additional += bytes(b'\x00'*(block_size - len(additional)%block_size))
if len(ciphertext) % block_size != 0:
ciphertext += bytes(b'\x00'*(block_size - len(ciphertext)%block_size))
blocks.extend([additional[block_size*i:(block_size*i)+block_size] for i in range(len(additional)//block_size)])
blocks.extend([ciphertext[block_size*i:(block_size*i)+block_size] for i in range(len(ciphertext)//block_size)])
blocks.append(i2b(size_additional, size=(block_size//2)*8, endian='big') + i2b(size_ciphertext, size=(block_size//2)*8, endian='big'))
blocks = map(aes_polynomial, blocks)
g = aes_polynomial(0)
for b in blocks:
g = g + b
g = g * h
if s is None:
s = bytes(AES.new(key, AES.MODE_ECB).encrypt(nonce+i2b(1, size=4*8, endian='big')))
s = aes_polynomial(s)
t = g + s
return list(blocks), t, s
def gcm_encrypt(plaintext, additional, key, nonce, tag_size=128, block_size=16):
if len(nonce) != 12:
log.error('nonce length must be 12')
return None, None
ciphertext = encrypt_ctr(plaintext, key, nonce, block_size, 2)
_, t, _ = gcm_compute_parts(ciphertext=ciphertext, additional=additional, key=key, nonce=nonce, block_size=16)
return ciphertext, t.to_bytes()[:tag_size//8]
def gcm_verify(tag, ciphertext, additional, key, nonce, tag_size=16, block_size=16):
_, t, _ = gcm_compute_parts(ciphertext=ciphertext, additional=additional, key=key, nonce=nonce, block_size=16)
return t.to_bytes()[:tag_size//8], t.to_bytes()[:tag_size//8] == tag
def compute_s(tag, ciphertext, additional, auth_key):
blocks = aes_bytes_to_poly_blocks(ciphertext, additional)
t = aes_polynomial(tag)
h = aes_polynomial(auth_key)
g = aes_polynomial(0)
for b in blocks:
g = g + b
g = g * h
s = t - g
return s.to_bytes()
def gcm_forge_tag(ciphertext, additional, auth_key, valid_ciphertext, valid_additional, valid_tag, tag_size=128):
s = compute_s(valid_tag, valid_ciphertext, valid_additional, auth_key)
blocks, t, s = gcm_compute_parts(ciphertext=ciphertext, additional=additional, auth_key=auth_key, s=s)
return t.to_bytes()[:tag_size//8]
def derivate(f):
if deg(f) == 0:
return f.zero_element()
return Polynomial_128([c*(i+1) for i,c in enumerate(f.coefficients[1:])])
def pth_root(f, p, w):
return Polynomial_128([pow(a, p**(w-1)) for a in f.coefficients])
def polynomial_gcd(f, g):
if f.is_one():
return f
if g.is_one():
return g
while deg(g) >= 0:
t = g
g = f % g
f = t
return f
def factor_square_free(f):
# make it monic
f = f.monic()
# square free
L = []
s = 1
p = 2
w = f.k
q = p**w
while True:
j = 1
f_derivative = derivate(f)
d = polynomial_gcd(f, f_derivative)
g = f / d
while not g.is_one():
f /= g
h = polynomial_gcd(f, g)
m = g / h
if not m.is_one():
L.append((m, j*s))
g = h
j += 1
if not f.is_one():
f = pth_root(f, p, w)
s = p*s
if f.is_one():
break
return L
def factor_distinct_degree(f):
L_dd = []
h = f.element() % f
k = 0
q = 2**f.k
while not f.is_one():
h = h.powmod(q, f)
k += 1
g = polynomial_gcd(h-h.element(), f)
if not g.is_one():
L_dd.append((g, k))
f /= g
h %= f
return L_dd
def random_polynomial(f):
return Polynomial_128([GF_2k(randint(0, f.k), f.k, f.modulus) for _ in range(deg(f))]) % f
def factor_equal_degree(f, f_degree):
n = deg(f)
r = n / f_degree
S = set([f])
q = 2**f.k
while len(S) < r:
h = random_polynomial(f)
g = polynomial_gcd(h, f)
if g.is_one():
g = h.modpow(((q**f_degree - 1)/3) - 1, f)
S_tmp = set()
for u in S:
if deg(u) == f_degree:
continue
d = polynomial_gcd(g, u)
if d.is_one() or d == u:
S_tmp.add(u)
else:
S_tmp.update(set([d, u / d]))
S = S_tmp
return S
def factor_polynomial(f):
log.debug('factoring {}'.format(f))
factorization = []
L_sf = factor_square_free(f)
for f_sf, power_sf in L_sf:
L_dd = factor_distinct_degree(f_sf)
for f_dd, f_dd_degree in L_dd:
L_ed = factor_equal_degree(f_dd, f_dd_degree)
for f_ed in L_ed:
factorization.append((f_ed, power_sf))
return factorization
def recover_key_repated_nonce(ciphertexts_additionals_tags):
"""Recover authentication key for GCM given ciphertext encrypted with repeated nonce
Sometimes fail (maybe bug in factorization)
Args:
ciphertexts_additionals_tags(list(tuple(bytes))): [(ciphertext, additional_data, auth_tag), ...]
Returns:
set(bytes): candidates for GCM auth key
"""
auth_key_candidates = set()
pair_count = 0
for (ciphertext1, additional1, tag1), (ciphertext2, additional2, tag2) in combinations(ciphertexts_additionals_tags, 2):
pair_count += 1
log.debug('Trying pair no {}'.format(pair_count))
p1 = aes_bytes_to_poly_blocks(ciphertext1, additional1)
t1 = aes_polynomial(tag1)
p1 = Polynomial_128([t1]+p1[::-1]) # first element is x0
p2 = aes_bytes_to_poly_blocks(ciphertext2, additional2)
t2 = aes_polynomial(tag2)
p2 = Polynomial_128([t2]+p2[::-1])
auth_key_candidates_tmp = set()
factorization = factor_polynomial(p1+p2)
for f, f_degree in factorization:
if deg(f) == 1:
log.debug('auth key candidate: {}'.format(f.monic()))
key_candidate = f.monic().coefficients[0].to_bytes()
auth_key_candidates_tmp.add(key_candidate)
if len(auth_key_candidates) > 0:
auth_key_candidates.intersection_update(auth_key_candidates_tmp)
else:
auth_key_candidates = auth_key_candidates_tmp
if len(auth_key_candidates) == 1:
break
log.success('Found {} auth key candidates'.format(len(auth_key_candidates)))
return auth_key_candidates
|
423373
|
import random
import os
import json
from interfaces.SentenceOperation import SentenceOperation
from tasks.TaskTypes import TaskType
def noun_compound_paraphraser(text, compounds):
L = [[text]]
for k in compounds.keys():
if k in L[0][0]:
new = []
for l in L:
for text in l:
n = [text.replace(k,paraphrase[0]) for paraphrase in compounds[k]]
new.extend(n)
L.append(new)
# Flatten the list and ignore the first (the original text)
paraphrases = [x for u in L[1:] for x in u]
return paraphrases
class NounCompoundParaphraser(SentenceOperation):
"""Replaces two-word noun compounds with a paraphrase.
Args:
max_paraphrases: Maximimum number of paraphrases per noun compound.
All combinations of paraphrases will be returned whenever more
than one noun compound from the dictionary is found in the sentence.
Default: 1.
seed: initial seed. Defaults: 0.
"""
tasks = [
TaskType.TEXT_CLASSIFICATION,
TaskType.TEXT_TO_TEXT_GENERATION,
TaskType.TEXT_TAGGING,
]
languages = ["en"]
keywords = [
"lexical",
"rule-based",
"external-knowledge-based",
"unnatural-sounding",
"high-precision",
"low-coverage",
"high-generations"
]
def __init__(self, seed=0, max_paraphrases=1):
super().__init__(seed)
data_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "data"
)
with open(os.path.join(data_dir, 'compound_paraphrases_semeval2013_task4.json'),'r') as fd:
compound_paraphrases = json.load(fd)
# Trim to only use up to `max_paraphrases` per compound. When sorting,
# by paraphrase score (frequency), randomly break ties.
random.seed(seed)
for k in compound_paraphrases.keys():
randomized_ties = sorted(compound_paraphrases[k], key=lambda i: (int(i[1]), random.random() ) , reverse=True)
compound_paraphrases[k] = randomized_ties[:max_paraphrases]
self.compound_paraphrases = compound_paraphrases
def generate(self, sentence):
perturbed_texts = noun_compound_paraphraser(
text=sentence,
compounds=self.compound_paraphrases,
)
return perturbed_texts
|
423407
|
from test_helper import exchange_updates
import y_py as Y
def test_set():
d1 = Y.YDoc()
x = d1.get_map('test')
value = d1.transact(lambda txn : x.get(txn, 'key'))
assert value == None
d1.transact(lambda txn : x.set(txn, 'key', 'value1'))
value = d1.transact(lambda txn : x.get(txn, 'key'))
assert value == 'value1'
d1.transact(lambda txn : x.set(txn, 'key', 'value2'))
value = d1.transact(lambda txn : x.get(txn, 'key'))
assert value == "value2"
def test_set_nested():
d1 = Y.YDoc()
x = d1.get_map('test')
nested = Y.YMap({ "a": 'A' })
# check out to_json(), setting a nested map in set(), adding to an integrated value
d1.transact(lambda txn : x.set(txn, 'key', nested))
d1.transact(lambda txn : nested.set(txn, 'b', 'B'))
json = d1.transact(lambda txn : x.to_json(txn))
# TODO: Make this a deep diff
assert json == {
"key": {
"a": 'A',
"b": 'B'
}
}
def test_delete():
d1 = Y.YDoc()
x = d1.get_map('test')
d1.transact(lambda txn : x.set(txn, 'key', 'value1'))
len = d1.transact(lambda txn : x.length(txn))
value = d1.transact(lambda txn : x.get(txn, 'key'))
assert len == 1
assert value == "value1"
# TODO: Get length with __len__()
d1.transact(lambda txn : x.delete(txn, 'key'))
len = d1.transact(lambda txn : x.length(txn))
value = d1.transact(lambda txn : x.get(txn, 'key'))
assert len == 0
assert value == None
d1.transact(lambda txn : x.set(txn, 'key', 'value2'))
len = d1.transact(lambda txn : x.length(txn))
value = d1.transact(lambda txn : x.get(txn, 'key'))
assert len == 1
assert value == "value2"
def test_iterator():
d1 = Y.YDoc()
x = d1.get_map('test')
def test(txn):
x.set(txn, 'a', 1)
x.set(txn, 'b', 2)
x.set(txn, 'c', 3)
expected = {
'a': 1,
'b': 2,
'c': 3
}
for (key, val) in x.entries(txn):
v = expected[key]
assert val == v
del expected[key]
d1.transact(test)
|
423408
|
import requests
from unittest import TestCase
from test_arguments import test_print
from test_functions import compare_get_request, compare_post_request
class TestSubmit(TestCase):
def test_submit(self):
test_print("test_main_page starting")
headers = {'Accept':'text/plain'}
compare_get_request("/", test_name = "after_admin_login", headers = headers)
test_print("test_main_page completed")
test_print("test_get_submit_submissions_empty starting")
compare_get_request("submit")
compare_get_request("manage")
# working curl request
#curl -X POST -H "Accept: text/plain" -H "X-authorization: <KEY>" -F id=green -F version="3" -F name="test" -F description="testd" -F citations="none" -F overwrite_merge="0" -F file=@"./SBOLTestRunner/src/main/resources/SBOLTestSuite/SBOL2/BBa_I0462.xml" http://localhost:7777/submit"""
test_print("test_get_submit_submissions_empty completed")
test_print("test_create_id_missing starting")
data = {'version' : (None, '1'),
'name' : (None, 'testcollection'),
'description':(None, 'testdescription'),
'citations':(None, ''),
'overwrite_merge':(None, '0')}
files = {'file':("./SBOLTestRunner/src/main/resources/SBOLTestSuite/SBOL2/BBa_I0462.xml",
open('./SBOLTestRunner/src/main/resources/SBOLTestSuite/SBOL2/BBa_I0462.xml', 'rb'))}
with self.assertRaises(requests.exceptions.HTTPError):
compare_post_request("submit", data, headers = {"Accept": "text/plain"},
files = files, test_name = "missing_id")
test_print("test_create_id_missing completed")
test_print("test_create_and_delete_collections starting")
# create the collection
data = {'id':(None, 'testid'),
'version' : (None, '1'),
'name' : (None, 'testcollection'),
'description':(None, 'testdescription'),
'citations':(None, ''),
'overwrite_merge':(None, '0')}
files = {'file':("./SBOLTestRunner/src/main/resources/SBOLTestSuite/SBOL2/BBa_I0462.xml",
open('./SBOLTestRunner/src/main/resources/SBOLTestSuite/SBOL2/BBa_I0462.xml', 'rb'))}
compare_post_request("submit", data, headers = {"Accept": "text/plain"},
files = files, test_name = "submit_test_BBa")
with self.assertRaises(requests.exceptions.HTTPError):
compare_post_request("submit", data, headers = {"Accept": "text/plain"},
files = files, test_name = "submit_already_in_use")
# self.create_collection2()
compare_get_request("manage", test_name = "two_submissions")
compare_get_request("submit", test_name = "two_submissions")
# now remove the collections
compare_get_request('/user/:userId/:collectionId/:displayId/:version/removeCollection', route_parameters = ["testuser", "testid", "testid_collection", "1"])
compare_get_request('/user/:userId/:collectionId/:displayId/:version/removeCollection', route_parameters = ["testuser", "testid2", "testid2_collection", "1"], test_name = 'remove_second')
compare_get_request("manage", test_name = "no_submissions")
test_print("test_create_and_delete_collections completed")
test_print("create_collection2 starting")
data = {'id':(None, 'testid2'),
'version' : (None, '1'),
'name' : (None, 'testcollection2'),
'description':(None, 'testdescription'),
'citations':(None, ''),
'overwrite_merge':(None, '0')}
files = {'file':("./SBOLTestRunner/src/main/resources/SBOLTestSuite/SBOL2/BBa_I0462.xml",
open('./SBOLTestRunner/src/main/resources/SBOLTestSuite/SBOL2/BBa_I0462.xml', 'rb'))}
compare_post_request("submit", data, headers = {"Accept": "text/plain"},
files = files,
test_name = "create_2")
# delete collection
#compare_get_request("/user/testuser/testid2/testid_collection2/1/removeCollection")
test_print("create_collection2 completed")
test_print("make_new_collection starting")
# create the collection
data = {'id':(None, 'testid1'),
'version' : (None, '1'),
'name' : (None, 'testcollection1'),
'description':(None, 'testdescription1'),
'citations':(None, ''),
'overwrite_merge':(None, '0')}
files = {'file':("./SBOLTestRunner/src/main/resources/SBOLTestSuite/SBOL2/toggle.xml",
open('./SBOLTestRunner/src/main/resources/SBOLTestSuite/SBOL2/toggle.xml', 'rb'))}
compare_post_request("submit", data, headers = {"Accept": "text/plain"},
files = files, test_name = "generic_collection1")
test_print("make_new_collection completed")
# """ def test_bad_make_public(self):
# data = self.make_new_collection("1")
#
# data['tabState'] = 'new'
#
# # try to remove the collection but don't enter a id
# del data['id']
# with self.assertRaises(requests.exceptions.HTTPError):
# compare_post_request("/user/:userId/:collectionId/:displayId/:version/makePublic", route_parameters = ["testuser", "testid1", "testid_collection1", "1"], data = data)
# TODO: uncomment when this does raise an HTTPError in synbiohub
# """
test_print("test_make_public starting")
# get the view
compare_get_request("/user/:userId/:collectionId/:displayId/:version/makePublic", route_parameters = ["testuser", "testid0", "testid0_collection", "1"])
data['tabState'] = 'new'
# make the collection public
compare_post_request("/user/:userId/:collectionId/:displayId/:version/makePublic", route_parameters = ["testuser", "testid1", "testid1_collection", "1"], data = data)
# try to delete the collection
with self.assertRaises(requests.exceptions.HTTPError):
compare_get_request("/public/:collectionId/:displayId/:version/removeCollection", route_parameters = ["testid1", "testid1_collection", "1"], test_name = 'remove')
test_print("test_make_public completed")
test_print("creating new collection for test_attachment")
data = {'id':(None, 'test_attachment'),
'version' : (None, '1'),
'name' : (None, 'test_attachment'),
'description':(None, 'used for tesitng the attachment endpoints'),
'citations':(None, ''),
'overwrite_merge':(None, '0')
}
files = {'file':("./SBOLTestRunner/src/main/resources/SBOLTestSuite/SBOL2/toggle.xml", open('./SBOLTestRunner/src/main/resources/SBOLTestSuite/SBOL2/toggle.xml', 'rb'))}
compare_post_request("submit", data, headers = {"Accept":"text/plain"}, files = files, test_name = "collection_for_test_attachment")
test_print("completed")
test_print("creating new collection for test_hash")
data = {'id':(None, 'test_hash'),
'version' : (None, '2'),
'name' : (None, 'test_hash'),
'description':(None, 'used for testing endpoints with hash built in.'),
'citations':(None, ''),
'overwrite_merge':(None, '0')
}
files = {'file':("./SBOLTestRunner/src/main/resources/SBOLTestSuite/SBOL2/Measure.xml", open('./SBOLTestRunner/src/main/resources/SBOLTestSuite/SBOL2/Measure.xml', 'rb'))}
compare_post_request("submit", data, headers = {"Accept":"text/plain"}, files = files, test_name = "collection_for_test_hash")
test_print("completed")
# def make_new_private_collection(self, uniqueid):
# create the collection
# data = {'id':(None, 'testid1'),
# 'version' : (None, '1'),
# 'name' : (None, 'testcollection1'),
# 'description':(None, 'testdescription'),
# 'citations':(None, ''),
# 'overwrite_merge':(None, '0')
# }
#
# files = {'file':("./SBOLTestRunner/src/main/resources/SBOLTestSuite/SBOL2/toggle.xml", open('./SBOLTestRunner/src/main/resources/SBOLTestSuite/SBOL2/toggle.xml', 'rb'))}
#
# compare_post_request("submit", data, headers = {"Accept":
# "text/plain"}, files = files, test_name = "second_generic_collection")
#
|
423416
|
from a_stanford_data_processor import AStanfordDataProcessor
from stanford_car_dataset_augmented import StanfordCarAugmentedDataset
from stanford_car_dataset_augmented import preprocess_data
from stanford_cars_data_augmentation import StanfordCarsDataAugmentation
class StanfordAugmentedDataProcessor(AStanfordDataProcessor):
def __init__(self, path_images, transforms, path_human_readable_labels):
super().__init__(path_images, transforms, path_human_readable_labels)
def augment_data(self,
path_to_data_mat,
augmentation_multiple,
s3_bucket_name,
store_to_disk,
store_to_s3,
probability,
brightness,
contrast,
saturation,
hue
):
stanford_cars_data_augmentation = StanfordCarsDataAugmentation(path_to_data_mat=path_to_data_mat,
path_images=self.path_images)
augmented_data_directory = stanford_cars_data_augmentation.augment_data(
s3_bucket_name=s3_bucket_name,
store_to_disk=store_to_disk,
store_to_s3=store_to_s3,
augmentation_multiple=augmentation_multiple,
probability=probability,
brightness=brightness,
contrast=contrast,
saturation=saturation,
hue=hue)
return stanford_cars_data_augmentation.get_all_data_matrix(), augmented_data_directory
def preprocess_data(self, augmented_mat_data, validation_percentage, data_subset):
training_struct, validation_struct, unique_labels = preprocess_data(augmented_mat_data,
validation_percentage,
data_subset)
return training_struct, validation_struct, unique_labels
def get_data_generator(self, data_matrix):
return StanfordCarAugmentedDataset(data_matrix=data_matrix,
path_images=self.path_images,
transforms=self.transforms,
path_human_readable_labels=self.path_human_readable_labels)
|
423447
|
from graphilp.imports.ilpgraph import ILPGraph
def read(G):
""" Wrap a NetworkX graph class by an ILPGraph class
The wrapper class is used store the graph and the related variables of an optimisation problem
in a single entity.
:param G: a `NetworkX graph <https://networkx.org/documentation/stable/reference/introduction.html#graphs>`__
:return: an :py:class:`~graphilp.imports.ilpgraph.ILPGraph`
"""
result = ILPGraph()
result.set_nx_graph(G)
return result
|
423503
|
from pathlib import Path
from shutil import copyfile
import click
from autoalbument.cli.lib.migrations import migrate_v1_to_v2
from autoalbument.cli.lib.yaml import yaml
from autoalbument.utils.click import should_write_file
@click.command()
@click.option(
"--config-dir",
type=click.Path(),
required=True,
help="Path to a directory with search.yaml",
)
def main(config_dir):
config_dir = Path(config_dir)
config_path = config_dir / "search.yaml"
config_backup_path = config_dir / "search.yaml.backup"
config = yaml.load(config_path)
version = config.get("_version", 1)
if version == 2:
click.echo("search.yaml is already uses the latest config format")
return
click.echo("Backing up the original search.yaml file to search.yaml.backup")
if should_write_file(config_backup_path):
copyfile(config_path, config_backup_path)
config = migrate_v1_to_v2(config)
yaml.dump(config, config_path)
click.echo("search.yaml is successfully updated to the latest config format")
if __name__ == "__main__":
main()
|
423509
|
import os
from glob import glob
from setuptools import find_packages
from setuptools import setup
package_name = 'crs_utils_py'
setup(
name=package_name,
version='0.0.0',
# Packages to export
packages=find_packages(),
# Files we want to install, specifically launch files
data_files=[
('share/ament_index/resource_index/packages', ['resource/' + package_name]),
# Include our package.xml file
(os.path.join('share', package_name), ['package.xml']),
# Include all launch files.
(os.path.join('share', package_name, 'crs_utils_py'), glob('*.py'))
],
# This is important as well
install_requires=['setuptools'],
zip_safe=True,
author='ROS 2 Developer',
author_email='<EMAIL>',
maintainer='ROS 2 Developer',
maintainer_email='<EMAIL>',
keywords=['ROS', 'ROS2'],
classifiers=[
'Intended Audience :: Developers',
'License :: TODO',
'Programming Language :: Python',
'Topic :: Software Development',
],
description='My awesome package.',
license='TODO',
# Like the CMakeLists add_executable macro, you can add your python
# scripts here.
entry_points={
'console_scripts': [
'joint_state_publisher = crs_utils_py.joint_state_publisher:main'
],
},
)
|
423512
|
import tensorflow as tf
import tensorflow.contrib.slim as slim
from core.config import cfgs
DATA_FORMAT = "NHWC"
debug_dict = {}
BottleNeck_NUM_DICT = {
'resnet50_v1d': [3, 4, 6, 3],
'resnet101_v1d': [3, 4, 23, 3]
}
BASE_CHANNELS_DICT = {
'resnet50_v1d': [64, 128, 256, 512],
'resnet101_v1d': [64, 128, 256, 512]
}
def resnet_arg_scope(
freeze_norm,
is_training=True,
weight_decay=0.0001,
batch_norm_decay=0.9,
batch_norm_epsilon=1e-5,
batch_norm_scale=True):
batch_norm_params = {
'is_training': False, 'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon, 'scale': batch_norm_scale,
'trainable': False,
'updates_collections': tf.GraphKeys.UPDATE_OPS,
'data_format': DATA_FORMAT
}
with slim.arg_scope(
[slim.conv2d],
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=slim.variance_scaling_initializer(),
trainable=is_training,
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
with slim.arg_scope([slim.batch_norm], **batch_norm_params) as arg_sc:
return arg_sc
def stem_7x7(net, scope="C1"):
with tf.variable_scope(scope):
net = tf.pad(net, paddings=[[0, 0], [3, 3], [
3, 3], [0, 0]]) # pad for data
net = slim.conv2d(
net,
num_outputs=64,
kernel_size=[
7,
7],
stride=2,
padding="VALID",
biases_initializer=None,
data_format=DATA_FORMAT,
scope="conv0")
net = tf.pad(net, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
net = slim.max_pool2d(
net,
kernel_size=[
3,
3],
stride=2,
padding="VALID",
data_format=DATA_FORMAT)
return net
def stem_stack_3x3(net, input_channel=32, scope="C1"):
with tf.variable_scope(scope):
net = tf.pad(net, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
net = slim.conv2d(
net,
num_outputs=input_channel,
kernel_size=[
3,
3],
stride=2,
padding="VALID",
biases_initializer=None,
data_format=DATA_FORMAT,
scope='conv0')
net = tf.pad(net, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
net = slim.conv2d(
net,
num_outputs=input_channel,
kernel_size=[
3,
3],
stride=1,
padding="VALID",
biases_initializer=None,
data_format=DATA_FORMAT,
scope='conv1')
net = tf.pad(net, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
net = slim.conv2d(
net,
num_outputs=input_channel * 2,
kernel_size=[
3,
3],
stride=1,
padding="VALID",
biases_initializer=None,
data_format=DATA_FORMAT,
scope='conv2')
net = tf.pad(net, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
net = slim.max_pool2d(
net,
kernel_size=[
3,
3],
stride=2,
padding="VALID",
data_format=DATA_FORMAT)
return net
def bottleneck_v1b(
input_x,
base_channel,
scope,
stride=1,
projection=False,
avg_down=True):
'''
for bottleneck_v1b: reduce spatial dim in conv_3x3 with stride 2.
'''
with tf.variable_scope(scope):
net = slim.conv2d(
input_x,
num_outputs=base_channel,
kernel_size=[
1,
1],
stride=1,
padding="VALID",
biases_initializer=None,
data_format=DATA_FORMAT,
scope='conv0')
net = tf.pad(net, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
net = slim.conv2d(
net,
num_outputs=base_channel,
kernel_size=[
3,
3],
stride=stride,
padding="VALID",
biases_initializer=None,
data_format=DATA_FORMAT,
scope='conv1')
net = slim.conv2d(
net,
num_outputs=base_channel * 4,
kernel_size=[
1,
1],
stride=1,
padding="VALID",
biases_initializer=None,
data_format=DATA_FORMAT,
activation_fn=None,
scope='conv2')
if projection:
if avg_down: # design for resnet_v1d
'''
In GluonCV, padding is "ceil mode". Here we use "SAME" to replace it, which may cause Erros.
And the erro will grow with depth of resnet. e.g. res101 erro > res50 erro
'''
shortcut = slim.avg_pool2d(
input_x,
kernel_size=[
stride,
stride],
stride=stride,
padding="SAME",
data_format=DATA_FORMAT)
shortcut = slim.conv2d(
shortcut,
num_outputs=base_channel * 4,
kernel_size=[
1,
1],
stride=1,
padding="VALID",
biases_initializer=None,
data_format=DATA_FORMAT,
activation_fn=None,
scope='shortcut')
# shortcut should have batch norm.
else:
shortcut = slim.conv2d(
input_x,
num_outputs=base_channel * 4,
kernel_size=[
1,
1],
stride=stride,
padding="VALID",
biases_initializer=None,
activation_fn=None,
data_format=DATA_FORMAT,
scope='shortcut')
else:
shortcut = tf.identity(input_x, name='shortcut/Identity')
net = net + shortcut
net = tf.nn.relu(net)
return net
def make_block(
net,
base_channel,
bottleneck_nums,
scope,
avg_down=True,
spatial_downsample=False):
with tf.variable_scope(scope):
first_stride = 2 if spatial_downsample else 1
net = bottleneck_v1b(
input_x=net,
base_channel=base_channel,
scope='bottleneck_0',
stride=first_stride,
avg_down=avg_down,
projection=True)
for i in range(1, bottleneck_nums):
net = bottleneck_v1b(
input_x=net,
base_channel=base_channel,
scope="bottleneck_%d" %
i,
stride=1,
avg_down=avg_down,
projection=False)
return net
def get_resnet_v1_d_base(
input_x, freeze_norm, scope="resnet50_v1d", bottleneck_nums=[
3, 4, 6, 3], base_channels=[
64, 128, 256, 512], freeze=[
True, False, False, False, False], is_training=True):
assert len(bottleneck_nums) == len(
base_channels), "bottleneck num should same as base_channels size"
assert len(freeze) == len(bottleneck_nums) + \
1, "should satisfy:: len(freeze) == len(bottleneck_nums) + 1"
feature_dict = {}
with tf.variable_scope(scope):
with slim.arg_scope(resnet_arg_scope(is_training=((not freeze[0]) and is_training),
freeze_norm=freeze_norm)):
net = stem_stack_3x3(net=input_x, input_channel=32, scope="C1")
feature_dict["C1"] = net
# print (net)
for i in range(2, len(bottleneck_nums) + 2):
spatial_downsample = False if i == 2 else True # do not downsample in C2
with slim.arg_scope(resnet_arg_scope(is_training=((not freeze[i - 1]) and is_training),
freeze_norm=freeze_norm)):
net = make_block(net=net,
base_channel=base_channels[i - 2],
bottleneck_nums=bottleneck_nums[i - 2],
scope="C%d" % i,
avg_down=True,
spatial_downsample=spatial_downsample)
feature_dict["C%d" % i] = net
return net, feature_dict
def resnet_vd(img_batch, scope_name, is_training=True):
_, feature_dict = get_resnet_v1_d_base(input_x=img_batch, scope=scope_name,
bottleneck_nums=BottleNeck_NUM_DICT[scope_name],
base_channels=BASE_CHANNELS_DICT[scope_name],
is_training=is_training, freeze_norm=True,
freeze=cfgs.FREEZE_BLOCKS)
return feature_dict
|
423522
|
from collections import Counter
def soma_quadrados(n):
pass
import unittest
class SomaQuadradosPerfeitosTestes(unittest.TestCase):
def teste_0(self):
self.assert_possui_mesmo_elementos([0], soma_quadrados(0))
def teste_1(self):
self.assert_possui_mesmo_elementos([1], soma_quadrados(1))
def teste_2(self):
self.assert_possui_mesmo_elementos([1, 1], soma_quadrados(2))
def teste_3(self):
self.assert_possui_mesmo_elementos([1, 1, 1], soma_quadrados(3))
def teste_4(self):
self.assert_possui_mesmo_elementos([4], soma_quadrados(4))
def teste_5(self):
self.assert_possui_mesmo_elementos([4, 1], soma_quadrados(5))
def teste_11(self):
self.assert_possui_mesmo_elementos([9, 1, 1], soma_quadrados(11))
def teste_12(self):
self.assert_possui_mesmo_elementos([4, 4, 4], soma_quadrados(12))
def assert_possui_mesmo_elementos(self, esperado, resultado):
self.assertEqual(Counter(esperado), Counter(resultado))
|
423530
|
from .abstract_portfolio import AbstractPortfolio
from .components import PortfolioHistory
from .. import metrics
class SimulatedPortfolio(AbstractPortfolio):
"""Simulated Portfolio Class
The simulated portfolio is a implementation of the abstract portfolio class
specifically for historical backtesting purposes. Specifically, whenever
historical bar data is streamed, the value of our positions in the market is
updated using the value of the underlying asset at the end of the time
period.
The simulated portfolio also records the historical equity states of the
portfolio for the purposes of computing performance metrics of a backtest.
Parameters
----------
data_handler: Object inheriting from the abstract data handler class.
This object supplies the data to update the prices of held positions and
provide new bar data with which to construct features.
position_handler: Object inheriting from the abstract position handler
class.
The position handler determines how much of an asset to acquire or to
relinquish when trading signals are processed into orders.
portfolio_handler: Portfolio handler object.
The portfolio handler keeps track of positions that are currently held
by the portfolio as well as the current amount of equity and capital in
the portfolio.
"""
def __init__(self, data_handler, position_handler, portfolio_handler):
"""Initialize parameters of the simulated portfolio object."""
super(SimulatedPortfolio, self).__init__(
data_handler, position_handler, portfolio_handler
)
self.history = PortfolioHistory(self.portfolio_handler.portfolio_id)
def process_post_events(self):
"""Implementation of abstract base class method."""
# Update the portfolio history of positions, capital, and equity.
date = self.data_handler.current_date
self.history.add_state(date, self.portfolio_handler.state)
def performance_summary(self):
"""Compute the performance summary for the portfolio over the period of
performance. This reports common performance metrics based on the
equity, returns, and holdings of the portfolio at each instance in the
time-series.
Returns
-------
summary: Pandas data frame object.
A summary of the key performance parameters of the portfolio in the
designated time period.
"""
self.history.compute_attributes()
return metrics.performance_summary(
self.history, self.portfolio_handler.portfolio_id
)
|
423556
|
import os
import shutil
import subprocess
import sparsechem as sc
import numpy as np
import string
import glob
import scipy.sparse
from urllib.request import urlretrieve
def download_chembl23(data_dir="test_chembl23", remove_previous=False):
if remove_previous and os.path.isdir(data_dir):
os.rmdir(data_dir)
if not os.path.isdir(data_dir):
os.mkdir(data_dir)
files = ["chembl_23mini_x.npy",
"chembl_23mini_y.npy",
"chembl_23mini_folds.npy",
"chembl_23mini_class_weights.csv",
"chembl_23mini_regr_weights.csv",
"chembl_23mini_y_censored.npy"]
url = "https://www.esat.kuleuven.be/~jsimm/"
for f in files:
if not os.path.isfile(os.path.join(data_dir, f)):
print(f"Downloading '{f}' into '{data_dir}'.")
urlretrieve(f"{url}{f}", os.path.join(data_dir, f))
def create_weights(data_dir="test_chembl23"):
df = pd.DataFrame({
"task_id": np.arange(100),
"training_weight": np.clip(np.random.randn(100), 0, 1),
"task_type": np.random.choice(["adme", "panel", "other"], size=100),
})
df["aggregation_weight"] = np.sqrt(df.training_weight)
df.to_csv(f"{data_dir}/chembl_23mini_class_weights.csv", index=False)
## censored weights for regression
df["censored_weight"] = np.clip(np.random.randn(100), 0, 1)
df.to_csv(f"{data_dir}/chembl_23mini_regr_weights.csv", index=False)
def random_str(size):
return "".join([string.ascii_lowercase[i] for i in np.random.randint(0, 26, size=12)])
def test_classification(dev, data_dir="test_chembl23", rm_output=True):
rstr = random_str(12)
output_dir = f"./{data_dir}/models-{rstr}/"
cmd = (
f"python train.py --x ./{data_dir}/chembl_23mini_x.npy" +
f" --y_class ./{data_dir}/chembl_23mini_y.npy" +
f" --folding ./{data_dir}/chembl_23mini_folds.npy" +
f" --batch_ratio 0.1" +
f" --output_dir {output_dir}" +
f" --hidden_sizes 20" +
f" --epochs 2" +
f" --lr 1e-3" +
f" --lr_steps 1" +
f" --dev {dev}" +
f" --verbose 1"
)
download_chembl23(data_dir)
res = subprocess.run(cmd.split())
assert res.returncode == 0
conf_file = glob.glob(f"{output_dir}/*.json")[0]
model_file = glob.glob(f"{output_dir}/*.pt")[0]
results = sc.load_results(conf_file)
assert os.path.isdir(os.path.join(output_dir, "boards"))
assert "conf" in results
assert "validation" in results
assert results["validation"]["classification"].shape[0] > 0
cmd_pred = (
f"python predict.py --x ./{data_dir}/chembl_23mini_x.npy" +
f" --outprefix {output_dir}/yhat" +
f" --conf {conf_file}" +
f" --model {model_file}" +
f" --dev {dev}"
)
res_pred = subprocess.run(cmd_pred.split())
assert res_pred.returncode == 0
yhat = np.load(f"{output_dir}/yhat-class.npy")
assert results["conf"].class_output_size == yhat.shape[1]
assert (yhat >= 0).all()
assert (yhat <= 1).all()
## checking --last_hidden 1
cmd_hidden = (
f"python predict.py --x ./{data_dir}/chembl_23mini_x.npy" +
f" --outprefix {output_dir}/yhat" +
f" --conf {conf_file}" +
f" --model {model_file}" +
f" --last_hidden 1" +
f" --dev {dev}"
)
res_hidden = subprocess.run(cmd_hidden.split())
assert res_hidden.returncode == 0
hidden = np.load(f"{output_dir}/yhat-hidden.npy")
assert results["conf"].hidden_sizes[-1] == hidden.shape[1]
## sparse prediction
cmd_sparse = (
f"python predict.py --x ./{data_dir}/chembl_23mini_x.npy" +
f" --y_class ./{data_dir}/chembl_23mini_y.npy" +
f" --outprefix {output_dir}/yhat" +
f" --conf {conf_file}" +
f" --model {model_file}" +
f" --dev {dev}"
)
res_sparse = subprocess.run(cmd_sparse.split())
assert res_sparse.returncode == 0
ysparse = sc.load_sparse(f"{output_dir}/yhat-class.npy")
ytrue = sc.load_sparse(f"./{data_dir}/chembl_23mini_y.npy")
assert ytrue.shape == ysparse.shape
assert type(ysparse) == scipy.sparse.csr.csr_matrix
assert (ysparse.data >= 0).all()
assert (ysparse.data <= 1).all()
ytrue_nz = ytrue.nonzero()
ysparse_nz = ysparse.nonzero()
assert (ytrue_nz[0] == ysparse_nz[0]).all(), "incorrect sparsity pattern"
assert (ytrue_nz[1] == ysparse_nz[1]).all(), "incorrect sparsity pattern"
## fold filtering
cmd_folding = (
f"python predict.py --x ./{data_dir}/chembl_23mini_x.npy" +
f" --y_class ./{data_dir}/chembl_23mini_y.npy" +
f" --folding ./{data_dir}/chembl_23mini_folds.npy" +
f" --predict_fold 1 2"
f" --outprefix {output_dir}/yhat" +
f" --conf {conf_file}" +
f" --model {model_file}" +
f" --dev {dev}"
)
res_folding = subprocess.run(cmd_folding.split())
assert res_folding.returncode == 0
yfolding = sc.load_sparse(f"{output_dir}/yhat-class.npy")
ytrue = sc.load_sparse(f"./{data_dir}/chembl_23mini_y.npy")
assert ytrue.shape == yfolding.shape
assert type(yfolding) == scipy.sparse.csr.csr_matrix
assert (yfolding.data >= 0).all()
assert (yfolding.data <= 1).all()
assert yfolding.nnz < ytrue.nnz
if rm_output:
shutil.rmtree(output_dir)
def test_noboard(dev, data_dir="test_chembl23", rm_output=True):
rstr = random_str(12)
output_dir = f"./{data_dir}/models-{rstr}/"
cmd = (
f"python train.py --x ./{data_dir}/chembl_23mini_x.npy" +
f" --y_class ./{data_dir}/chembl_23mini_y.npy" +
f" --folding ./{data_dir}/chembl_23mini_folds.npy" +
f" --batch_ratio 0.1" +
f" --output_dir {output_dir}" +
f" --hidden_sizes 20" +
f" --epochs 1" +
f" --save_board 0" +
f" --dev {dev}" +
f" --verbose 0"
)
download_chembl23(data_dir)
res = subprocess.run(cmd.split())
assert res.returncode == 0
assert os.path.isdir(os.path.join(output_dir, "boards")) == False
if rm_output:
shutil.rmtree(output_dir)
def test_regression(dev, data_dir="test_chembl23", rm_output=True):
rstr = random_str(12)
output_dir = f"./{data_dir}/models-{rstr}/"
cmd = (
f"python train.py --x ./{data_dir}/chembl_23mini_x.npy" +
f" --y_regr ./{data_dir}/chembl_23mini_y.npy" +
f" --folding ./{data_dir}/chembl_23mini_folds.npy" +
f" --batch_ratio 0.1" +
f" --output_dir {output_dir}" +
f" --hidden_sizes 20" +
f" --epochs 2" +
f" --lr 1e-3" +
f" --lr_steps 1" +
f" --dev {dev}" +
f" --verbose 1"
)
download_chembl23(data_dir)
res = subprocess.run(cmd.split())
assert res.returncode == 0
assert os.path.isdir(os.path.join(output_dir, "boards"))
conf_file = glob.glob(f"{output_dir}/*.json")[0]
model_file = glob.glob(f"{output_dir}/*.pt")[0]
results = sc.load_results(conf_file)
assert "conf" in results
assert "validation" in results
assert results["validation"]["regression"].shape[0] > 0
cmd_pred = (
f"python predict.py --x ./{data_dir}/chembl_23mini_x.npy" +
f" --outprefix {output_dir}/yhat" +
f" --conf {conf_file}" +
f" --model {model_file}" +
f" --dev {dev}"
)
res_pred = subprocess.run(cmd_pred.split())
assert res_pred.returncode == 0
yhat = np.load(f"{output_dir}/yhat-regr.npy")
assert results["conf"].regr_output_size == yhat.shape[1]
if rm_output:
shutil.rmtree(output_dir)
def test_classification_regression(dev, data_dir="test_chembl23", rm_output=True):
rstr = random_str(12)
output_dir = f"./{data_dir}/models-{rstr}/"
cmd = (
f"python train.py --x ./{data_dir}/chembl_23mini_x.npy" +
f" --y_class ./{data_dir}/chembl_23mini_y.npy" +
f" --y_regr ./{data_dir}/chembl_23mini_y.npy" +
f" --folding ./{data_dir}/chembl_23mini_folds.npy" +
f" --batch_ratio 0.1" +
f" --output_dir {output_dir}" +
f" --hidden_sizes 20" +
f" --epochs 2" +
f" --lr 1e-3" +
f" --lr_steps 1" +
f" --dev {dev}" +
f" --verbose 1"
)
download_chembl23(data_dir)
res = subprocess.run(cmd.split())
assert res.returncode == 0
assert os.path.isdir(os.path.join(output_dir, "boards"))
conf_file = glob.glob(f"{output_dir}/*.json")[0]
model_file = glob.glob(f"{output_dir}/*.pt")[0]
results = sc.load_results(conf_file)
assert "conf" in results
assert "validation" in results
assert results["validation"]["regression"].shape[0] > 0
if rm_output:
shutil.rmtree(output_dir)
def test_regression_censor(dev, data_dir="test_chembl23", rm_output=True):
rstr = random_str(12)
output_dir = f"./{data_dir}/models-{rstr}/"
cmd = (
f"python train.py --x ./{data_dir}/chembl_23mini_x.npy" +
f" --y_regr ./{data_dir}/chembl_23mini_y.npy" +
f" --y_censor ./{data_dir}/chembl_23mini_y_censored.npy" +
f" --folding ./{data_dir}/chembl_23mini_folds.npy" +
f" --batch_ratio 0.1" +
f" --output_dir {output_dir}" +
f" --hidden_sizes 20" +
f" --epochs 2" +
f" --lr 1e-3" +
f" --lr_steps 3" +
f" --dev {dev}" +
f" --verbose 1"
)
download_chembl23(data_dir)
res = subprocess.run(cmd.split())
assert res.returncode == 0
conf_file = glob.glob(f"{output_dir}/*.json")[0]
model_file = glob.glob(f"{output_dir}/*.pt")[0]
results = sc.load_results(conf_file)
assert "conf" in results
assert "validation" in results
assert results["validation"]["regression"].shape[0] > 0
if rm_output:
shutil.rmtree(output_dir)
def test_regression_censor_weights(dev, data_dir="test_chembl23", rm_output=True):
rstr = random_str(12)
output_dir = f"./{data_dir}/models-{rstr}/"
cmd = (
f"python train.py --x ./{data_dir}/chembl_23mini_x.npy" +
f" --y_regr ./{data_dir}/chembl_23mini_y.npy" +
f" --y_censor ./{data_dir}/chembl_23mini_y_censored.npy" +
f" --weights_regr ./{data_dir}/chembl_23mini_regr_weights.csv" +
f" --folding ./{data_dir}/chembl_23mini_folds.npy" +
f" --batch_ratio 0.1" +
f" --output_dir {output_dir}" +
f" --hidden_sizes 20" +
f" --epochs 2" +
f" --lr 1e-3" +
f" --lr_steps 3" +
f" --dev {dev}" +
f" --verbose 1"
)
download_chembl23(data_dir)
res = subprocess.run(cmd.split())
assert res.returncode == 0
conf_file = glob.glob(f"{output_dir}/*.json")[0]
model_file = glob.glob(f"{output_dir}/*.pt")[0]
results = sc.load_results(conf_file)
assert "conf" in results
assert "validation" in results
assert results["validation"]["regression"].shape[0] > 0
if rm_output:
shutil.rmtree(output_dir)
if __name__ == "__main__":
test_classification(dev="cuda:0")
#test_noboard(dev="cuda:0")
#test_regression(dev="cuda:0")
#test_regression_censor(dev="cuda:0")
#test_classification_regression(dev="cuda:0")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.