code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
import numpy as np
import matplotlib.pyplot as plt
from six.moves import cPickle
# Y' = 0.2989 R + 0.5870 G + 0.1140 B
def rgb2gray(rgb):
return np.dot(rgb[...,:3], [0.2989, 0.5870, 0.1140])
def readData():
image_data = np.array([])
image_labels = np.array([])
fileindex = 1
while(fileindex<6):
filename = "cifar-10-batches-py/data_batch_{}".format(fileindex)
print(filename)
f = open(filename, 'rb')
datadict = cPickle.load(f,encoding='latin1')
f.close()
X = datadict["data"]
Y = datadict['labels']
if(fileindex==1):
image_data = np.array(X)
image_labels = np.array(Y)
else:
image_data = np.vstack((image_data, X))
image_labels = np.append(image_labels,Y)
fileindex += 1
print(image_data.shape)
image_data = image_data.reshape(50000, 3, 32, 32).transpose(0,2,3,1).astype("uint8")
return image_data,image_labels
def convertAllIntoGrayScale(image_data):
print(len(image_data))
length = len(image_data)
grayscaleImageData = image_data
grayscaleImageData = []
for i in range(length):
grayscaleImageData.append(rgb2gray(image_data[i]))
grayscaleImageData = np.array(grayscaleImageData)
return grayscaleImageData
def calculateMean(data,labels, isGrayScale = False):
imagecount = 0
if(isGrayScale):
mean = np.zeros([10,32,32])
else:
mean = np.zeros([10,32,32,3])
while(imagecount<10):
indexes = np.where(labels==imagecount)[0]
for i in indexes:
mean[imagecount] += data[i]
length = len(indexes)
mean[imagecount] = (mean[imagecount] / length)
imagecount += 1
return mean
image_data,image_labels = readData()
print('Done reading')
gray_scale_image_data = convertAllIntoGrayScale(image_data)
print('Done Converting')
gray_scale_mean = calculateMean(gray_scale_image_data,image_labels,True)
gray_scale_mean = gray_scale_mean.reshape(10,1024)
meandifferencematrix = np.zeros([10,10])
print('Mean Differemce Matrix ')
print( )
print()
for i in range(10):
for j in range(10):
meandifferencematrix[i][j] = np.linalg.norm(gray_scale_mean[i]-gray_scale_mean[j])
print(meandifferencematrix) | Supervised Learning/SMAI HWS/10/3-2.py | import numpy as np
import matplotlib.pyplot as plt
from six.moves import cPickle
# Y' = 0.2989 R + 0.5870 G + 0.1140 B
def rgb2gray(rgb):
return np.dot(rgb[...,:3], [0.2989, 0.5870, 0.1140])
def readData():
image_data = np.array([])
image_labels = np.array([])
fileindex = 1
while(fileindex<6):
filename = "cifar-10-batches-py/data_batch_{}".format(fileindex)
print(filename)
f = open(filename, 'rb')
datadict = cPickle.load(f,encoding='latin1')
f.close()
X = datadict["data"]
Y = datadict['labels']
if(fileindex==1):
image_data = np.array(X)
image_labels = np.array(Y)
else:
image_data = np.vstack((image_data, X))
image_labels = np.append(image_labels,Y)
fileindex += 1
print(image_data.shape)
image_data = image_data.reshape(50000, 3, 32, 32).transpose(0,2,3,1).astype("uint8")
return image_data,image_labels
def convertAllIntoGrayScale(image_data):
print(len(image_data))
length = len(image_data)
grayscaleImageData = image_data
grayscaleImageData = []
for i in range(length):
grayscaleImageData.append(rgb2gray(image_data[i]))
grayscaleImageData = np.array(grayscaleImageData)
return grayscaleImageData
def calculateMean(data,labels, isGrayScale = False):
imagecount = 0
if(isGrayScale):
mean = np.zeros([10,32,32])
else:
mean = np.zeros([10,32,32,3])
while(imagecount<10):
indexes = np.where(labels==imagecount)[0]
for i in indexes:
mean[imagecount] += data[i]
length = len(indexes)
mean[imagecount] = (mean[imagecount] / length)
imagecount += 1
return mean
image_data,image_labels = readData()
print('Done reading')
gray_scale_image_data = convertAllIntoGrayScale(image_data)
print('Done Converting')
gray_scale_mean = calculateMean(gray_scale_image_data,image_labels,True)
gray_scale_mean = gray_scale_mean.reshape(10,1024)
meandifferencematrix = np.zeros([10,10])
print('Mean Differemce Matrix ')
print( )
print()
for i in range(10):
for j in range(10):
meandifferencematrix[i][j] = np.linalg.norm(gray_scale_mean[i]-gray_scale_mean[j])
print(meandifferencematrix) | 0.233269 | 0.481454 |
from ingenico.connect.sdk.data_object import DataObject
class OrderLineDetails(DataObject):
__discount_amount = None
__google_product_category_id = None
__line_amount_total = None
__product_category = None
__product_code = None
__product_name = None
__product_price = None
__product_type = None
__quantity = None
__tax_amount = None
__unit = None
@property
def discount_amount(self):
"""
| Discount on the line item, with the last two digits implied as decimal places
Type: int
"""
return self.__discount_amount
@discount_amount.setter
def discount_amount(self, value):
self.__discount_amount = value
@property
def google_product_category_id(self):
"""
| The Google product category ID for the item.
Type: int
"""
return self.__google_product_category_id
@google_product_category_id.setter
def google_product_category_id(self, value):
self.__google_product_category_id = value
@property
def line_amount_total(self):
"""
| Total amount for the line item
Type: int
"""
return self.__line_amount_total
@line_amount_total.setter
def line_amount_total(self, value):
self.__line_amount_total = value
@property
def product_category(self):
"""
| The category of the product (i.e. home appliance). This property can be used for fraud screening on the Ogone Platform.
Type: str
"""
return self.__product_category
@product_category.setter
def product_category(self, value):
self.__product_category = value
@property
def product_code(self):
"""
| Product or UPC Code, left justified
| Note: Must not be all spaces or all zeros
Type: str
"""
return self.__product_code
@product_code.setter
def product_code(self, value):
self.__product_code = value
@property
def product_name(self):
"""
| The name of the product.
Type: str
"""
return self.__product_name
@product_name.setter
def product_name(self, value):
self.__product_name = value
@property
def product_price(self):
"""
| The price of one unit of the product, the value should be zero or greater
Type: int
"""
return self.__product_price
@product_price.setter
def product_price(self, value):
self.__product_price = value
@property
def product_type(self):
"""
| Code used to classify items that are purchased
| Note: Must not be all spaces or all zeros
Type: str
"""
return self.__product_type
@product_type.setter
def product_type(self, value):
self.__product_type = value
@property
def quantity(self):
"""
| Quantity of the units being purchased, should be greater than zero
| Note: Must not be all spaces or all zeros
Type: int
"""
return self.__quantity
@quantity.setter
def quantity(self, value):
self.__quantity = value
@property
def tax_amount(self):
"""
| Tax on the line item, with the last two digits implied as decimal places
Type: int
"""
return self.__tax_amount
@tax_amount.setter
def tax_amount(self, value):
self.__tax_amount = value
@property
def unit(self):
"""
| Indicates the line item unit of measure; for example: each, kit, pair, gallon, month, etc.
Type: str
"""
return self.__unit
@unit.setter
def unit(self, value):
self.__unit = value
def to_dictionary(self):
dictionary = super(OrderLineDetails, self).to_dictionary()
if self.discount_amount is not None:
dictionary['discountAmount'] = self.discount_amount
if self.google_product_category_id is not None:
dictionary['googleProductCategoryId'] = self.google_product_category_id
if self.line_amount_total is not None:
dictionary['lineAmountTotal'] = self.line_amount_total
if self.product_category is not None:
dictionary['productCategory'] = self.product_category
if self.product_code is not None:
dictionary['productCode'] = self.product_code
if self.product_name is not None:
dictionary['productName'] = self.product_name
if self.product_price is not None:
dictionary['productPrice'] = self.product_price
if self.product_type is not None:
dictionary['productType'] = self.product_type
if self.quantity is not None:
dictionary['quantity'] = self.quantity
if self.tax_amount is not None:
dictionary['taxAmount'] = self.tax_amount
if self.unit is not None:
dictionary['unit'] = self.unit
return dictionary
def from_dictionary(self, dictionary):
super(OrderLineDetails, self).from_dictionary(dictionary)
if 'discountAmount' in dictionary:
self.discount_amount = dictionary['discountAmount']
if 'googleProductCategoryId' in dictionary:
self.google_product_category_id = dictionary['googleProductCategoryId']
if 'lineAmountTotal' in dictionary:
self.line_amount_total = dictionary['lineAmountTotal']
if 'productCategory' in dictionary:
self.product_category = dictionary['productCategory']
if 'productCode' in dictionary:
self.product_code = dictionary['productCode']
if 'productName' in dictionary:
self.product_name = dictionary['productName']
if 'productPrice' in dictionary:
self.product_price = dictionary['productPrice']
if 'productType' in dictionary:
self.product_type = dictionary['productType']
if 'quantity' in dictionary:
self.quantity = dictionary['quantity']
if 'taxAmount' in dictionary:
self.tax_amount = dictionary['taxAmount']
if 'unit' in dictionary:
self.unit = dictionary['unit']
return self | ingenico/connect/sdk/domain/payment/definitions/order_line_details.py | from ingenico.connect.sdk.data_object import DataObject
class OrderLineDetails(DataObject):
__discount_amount = None
__google_product_category_id = None
__line_amount_total = None
__product_category = None
__product_code = None
__product_name = None
__product_price = None
__product_type = None
__quantity = None
__tax_amount = None
__unit = None
@property
def discount_amount(self):
"""
| Discount on the line item, with the last two digits implied as decimal places
Type: int
"""
return self.__discount_amount
@discount_amount.setter
def discount_amount(self, value):
self.__discount_amount = value
@property
def google_product_category_id(self):
"""
| The Google product category ID for the item.
Type: int
"""
return self.__google_product_category_id
@google_product_category_id.setter
def google_product_category_id(self, value):
self.__google_product_category_id = value
@property
def line_amount_total(self):
"""
| Total amount for the line item
Type: int
"""
return self.__line_amount_total
@line_amount_total.setter
def line_amount_total(self, value):
self.__line_amount_total = value
@property
def product_category(self):
"""
| The category of the product (i.e. home appliance). This property can be used for fraud screening on the Ogone Platform.
Type: str
"""
return self.__product_category
@product_category.setter
def product_category(self, value):
self.__product_category = value
@property
def product_code(self):
"""
| Product or UPC Code, left justified
| Note: Must not be all spaces or all zeros
Type: str
"""
return self.__product_code
@product_code.setter
def product_code(self, value):
self.__product_code = value
@property
def product_name(self):
"""
| The name of the product.
Type: str
"""
return self.__product_name
@product_name.setter
def product_name(self, value):
self.__product_name = value
@property
def product_price(self):
"""
| The price of one unit of the product, the value should be zero or greater
Type: int
"""
return self.__product_price
@product_price.setter
def product_price(self, value):
self.__product_price = value
@property
def product_type(self):
"""
| Code used to classify items that are purchased
| Note: Must not be all spaces or all zeros
Type: str
"""
return self.__product_type
@product_type.setter
def product_type(self, value):
self.__product_type = value
@property
def quantity(self):
"""
| Quantity of the units being purchased, should be greater than zero
| Note: Must not be all spaces or all zeros
Type: int
"""
return self.__quantity
@quantity.setter
def quantity(self, value):
self.__quantity = value
@property
def tax_amount(self):
"""
| Tax on the line item, with the last two digits implied as decimal places
Type: int
"""
return self.__tax_amount
@tax_amount.setter
def tax_amount(self, value):
self.__tax_amount = value
@property
def unit(self):
"""
| Indicates the line item unit of measure; for example: each, kit, pair, gallon, month, etc.
Type: str
"""
return self.__unit
@unit.setter
def unit(self, value):
self.__unit = value
def to_dictionary(self):
dictionary = super(OrderLineDetails, self).to_dictionary()
if self.discount_amount is not None:
dictionary['discountAmount'] = self.discount_amount
if self.google_product_category_id is not None:
dictionary['googleProductCategoryId'] = self.google_product_category_id
if self.line_amount_total is not None:
dictionary['lineAmountTotal'] = self.line_amount_total
if self.product_category is not None:
dictionary['productCategory'] = self.product_category
if self.product_code is not None:
dictionary['productCode'] = self.product_code
if self.product_name is not None:
dictionary['productName'] = self.product_name
if self.product_price is not None:
dictionary['productPrice'] = self.product_price
if self.product_type is not None:
dictionary['productType'] = self.product_type
if self.quantity is not None:
dictionary['quantity'] = self.quantity
if self.tax_amount is not None:
dictionary['taxAmount'] = self.tax_amount
if self.unit is not None:
dictionary['unit'] = self.unit
return dictionary
def from_dictionary(self, dictionary):
super(OrderLineDetails, self).from_dictionary(dictionary)
if 'discountAmount' in dictionary:
self.discount_amount = dictionary['discountAmount']
if 'googleProductCategoryId' in dictionary:
self.google_product_category_id = dictionary['googleProductCategoryId']
if 'lineAmountTotal' in dictionary:
self.line_amount_total = dictionary['lineAmountTotal']
if 'productCategory' in dictionary:
self.product_category = dictionary['productCategory']
if 'productCode' in dictionary:
self.product_code = dictionary['productCode']
if 'productName' in dictionary:
self.product_name = dictionary['productName']
if 'productPrice' in dictionary:
self.product_price = dictionary['productPrice']
if 'productType' in dictionary:
self.product_type = dictionary['productType']
if 'quantity' in dictionary:
self.quantity = dictionary['quantity']
if 'taxAmount' in dictionary:
self.tax_amount = dictionary['taxAmount']
if 'unit' in dictionary:
self.unit = dictionary['unit']
return self | 0.76856 | 0.277528 |
try:
from botocore.config import Config
except ImportError:
from c7n.config import Bag as Config # pragma: no cover
from .core import EventAction
from c7n import utils
from c7n.manager import resources
from c7n.version import version as VERSION
class LambdaInvoke(EventAction):
"""Invoke an arbitrary lambda
serialized invocation parameters
- resources / collection of resources
- policy / policy that is invoke the lambda
- action / action that is invoking the lambda
- event / cloud trail event if any
- version / version of custodian invoking the lambda
We automatically batch into sets of 250 for invocation,
We try to utilize async invocation by default, this imposes
some greater size limits of 128kb which means we batch
invoke.
Example::
- type: invoke-lambda
function: my-function
Note if your synchronously invoking the lambda, you may also need
to configure the timeout, to avoid multiple invokes. The default
is 90s, if the lambda doesn't respond within that time the boto
sdk will invoke the lambda again with the same
arguments. Alternatively use async: true
"""
schema_alias = True
schema = {
'type': 'object',
'required': ['type', 'function'],
'additionalProperties': False,
'properties': {
'type': {'enum': ['invoke-lambda']},
'function': {'type': 'string'},
'region': {'type': 'string'},
'async': {'type': 'boolean'},
'qualifier': {'type': 'string'},
'batch_size': {'type': 'integer'},
'timeout': {'type': 'integer'},
'vars': {'type': 'object'},
}
}
permissions = ('lambda:InvokeFunction',
'iam:ListAccountAliases',)
def process(self, resources, event=None):
params = dict(FunctionName=self.data['function'])
if self.data.get('qualifier'):
params['Qualifier'] = self.data['Qualifier']
if self.data.get('async', True):
params['InvocationType'] = 'Event'
config = Config(read_timeout=self.data.get(
'timeout', 90), region_name=self.data.get('region', None))
client = utils.local_session(
self.manager.session_factory).client('lambda', config=config)
alias = utils.get_account_alias_from_sts(
utils.local_session(self.manager.session_factory))
payload = {
'version': VERSION,
'event': event,
'account_id': self.manager.config.account_id,
'account': alias,
'region': self.manager.config.region,
'action': self.data,
'policy': self.manager.data}
results = []
for resource_set in utils.chunks(resources, self.data.get('batch_size', 250)):
payload['resources'] = resource_set
params['Payload'] = utils.dumps(payload)
result = client.invoke(**params)
result['Payload'] = result['Payload'].read()
if isinstance(result['Payload'], bytes):
result['Payload'] = result['Payload'].decode('utf-8')
results.append(result)
return results
@classmethod
def register_resources(klass, registry, resource_class):
if 'invoke-lambda' not in resource_class.action_registry:
resource_class.action_registry.register('invoke-lambda', LambdaInvoke)
resources.subscribe(LambdaInvoke.register_resources) | c7n/actions/invoke.py |
try:
from botocore.config import Config
except ImportError:
from c7n.config import Bag as Config # pragma: no cover
from .core import EventAction
from c7n import utils
from c7n.manager import resources
from c7n.version import version as VERSION
class LambdaInvoke(EventAction):
"""Invoke an arbitrary lambda
serialized invocation parameters
- resources / collection of resources
- policy / policy that is invoke the lambda
- action / action that is invoking the lambda
- event / cloud trail event if any
- version / version of custodian invoking the lambda
We automatically batch into sets of 250 for invocation,
We try to utilize async invocation by default, this imposes
some greater size limits of 128kb which means we batch
invoke.
Example::
- type: invoke-lambda
function: my-function
Note if your synchronously invoking the lambda, you may also need
to configure the timeout, to avoid multiple invokes. The default
is 90s, if the lambda doesn't respond within that time the boto
sdk will invoke the lambda again with the same
arguments. Alternatively use async: true
"""
schema_alias = True
schema = {
'type': 'object',
'required': ['type', 'function'],
'additionalProperties': False,
'properties': {
'type': {'enum': ['invoke-lambda']},
'function': {'type': 'string'},
'region': {'type': 'string'},
'async': {'type': 'boolean'},
'qualifier': {'type': 'string'},
'batch_size': {'type': 'integer'},
'timeout': {'type': 'integer'},
'vars': {'type': 'object'},
}
}
permissions = ('lambda:InvokeFunction',
'iam:ListAccountAliases',)
def process(self, resources, event=None):
params = dict(FunctionName=self.data['function'])
if self.data.get('qualifier'):
params['Qualifier'] = self.data['Qualifier']
if self.data.get('async', True):
params['InvocationType'] = 'Event'
config = Config(read_timeout=self.data.get(
'timeout', 90), region_name=self.data.get('region', None))
client = utils.local_session(
self.manager.session_factory).client('lambda', config=config)
alias = utils.get_account_alias_from_sts(
utils.local_session(self.manager.session_factory))
payload = {
'version': VERSION,
'event': event,
'account_id': self.manager.config.account_id,
'account': alias,
'region': self.manager.config.region,
'action': self.data,
'policy': self.manager.data}
results = []
for resource_set in utils.chunks(resources, self.data.get('batch_size', 250)):
payload['resources'] = resource_set
params['Payload'] = utils.dumps(payload)
result = client.invoke(**params)
result['Payload'] = result['Payload'].read()
if isinstance(result['Payload'], bytes):
result['Payload'] = result['Payload'].decode('utf-8')
results.append(result)
return results
@classmethod
def register_resources(klass, registry, resource_class):
if 'invoke-lambda' not in resource_class.action_registry:
resource_class.action_registry.register('invoke-lambda', LambdaInvoke)
resources.subscribe(LambdaInvoke.register_resources) | 0.61173 | 0.16654 |
import os
from zstacklib.utils import jsonobject
from zstacklib.utils import log
from zstacklib.utils import shell
from zstacklib.utils.bash import bash_r
logger = log.get_logger(__name__)
class AgentResponse(object):
def __init__(self, success=True, error=None):
self.success = success
self.error = error if error else ''
self.totalCapacity = None
self.availableCapacity = None
class ImageStoreClient(object):
ZSTORE_CLI_BIN = "/usr/local/zstack/imagestore/bin/zstcli"
ZSTORE_CLI_PATH = ZSTORE_CLI_BIN + " -rootca /var/lib/zstack/imagestorebackupstorage/package/certs/ca.pem"
ZSTORE_PROTOSTR = "zstore://"
ZSTORE_DEF_PORT = 8000
def _check_zstore_cli(self):
if not os.path.exists(self.ZSTORE_CLI_BIN):
errmsg = '%s not found. Please reconnect all baremetal pxeservers, and try again.' % self.ZSTORE_CLI_BIN
raise Exception(errmsg)
def _parse_image_reference(self, bs_install_path):
if not bs_install_path.startswith(self.ZSTORE_PROTOSTR):
raise Exception('unexpected backup storage install path %s' % bs_install_path)
xs = bs_install_path[len(self.ZSTORE_PROTOSTR):].split('/')
if len(xs) != 2:
raise Exception('unexpected backup storage install path %s' % bs_install_path)
return xs[0], xs[1]
def download_image_from_imagestore(self, cmd):
self._check_zstore_cli()
rsp = AgentResponse()
name, imageid = self._parse_image_reference(cmd.imageInstallPath)
cmdstr = '%s -url %s:%s pull -installpath %s %s:%s' % (
self.ZSTORE_CLI_PATH, cmd.hostname, self.ZSTORE_DEF_PORT, cmd.cacheInstallPath, name, imageid)
logger.debug('pulling %s:%s from image store' % (name, imageid))
ret = bash_r(cmdstr)
if ret != 0:
rsp.success = False
rsp.error = "failed to download image from imagestore to baremetal image cache"
else:
logger.debug('%s:%s pulled to baremetal pxeserver' % (name, imageid))
return rsp | baremetalpxeserver/baremetalpxeserver/imagestore.py | import os
from zstacklib.utils import jsonobject
from zstacklib.utils import log
from zstacklib.utils import shell
from zstacklib.utils.bash import bash_r
logger = log.get_logger(__name__)
class AgentResponse(object):
def __init__(self, success=True, error=None):
self.success = success
self.error = error if error else ''
self.totalCapacity = None
self.availableCapacity = None
class ImageStoreClient(object):
ZSTORE_CLI_BIN = "/usr/local/zstack/imagestore/bin/zstcli"
ZSTORE_CLI_PATH = ZSTORE_CLI_BIN + " -rootca /var/lib/zstack/imagestorebackupstorage/package/certs/ca.pem"
ZSTORE_PROTOSTR = "zstore://"
ZSTORE_DEF_PORT = 8000
def _check_zstore_cli(self):
if not os.path.exists(self.ZSTORE_CLI_BIN):
errmsg = '%s not found. Please reconnect all baremetal pxeservers, and try again.' % self.ZSTORE_CLI_BIN
raise Exception(errmsg)
def _parse_image_reference(self, bs_install_path):
if not bs_install_path.startswith(self.ZSTORE_PROTOSTR):
raise Exception('unexpected backup storage install path %s' % bs_install_path)
xs = bs_install_path[len(self.ZSTORE_PROTOSTR):].split('/')
if len(xs) != 2:
raise Exception('unexpected backup storage install path %s' % bs_install_path)
return xs[0], xs[1]
def download_image_from_imagestore(self, cmd):
self._check_zstore_cli()
rsp = AgentResponse()
name, imageid = self._parse_image_reference(cmd.imageInstallPath)
cmdstr = '%s -url %s:%s pull -installpath %s %s:%s' % (
self.ZSTORE_CLI_PATH, cmd.hostname, self.ZSTORE_DEF_PORT, cmd.cacheInstallPath, name, imageid)
logger.debug('pulling %s:%s from image store' % (name, imageid))
ret = bash_r(cmdstr)
if ret != 0:
rsp.success = False
rsp.error = "failed to download image from imagestore to baremetal image cache"
else:
logger.debug('%s:%s pulled to baremetal pxeserver' % (name, imageid))
return rsp | 0.277375 | 0.059976 |
import functools
from typing import Callable, Optional
import numpy as np
import scipy.integrate
import scipy.linalg
from probnum import randvars
from probnum.type import FloatArgType, IntArgType
from . import discrete_transition, transition
from .sde_utils import matrix_fraction_decomposition
class SDE(transition.Transition):
"""Stochastic differential equation.
.. math:: d x(t) = g(t, x(t)) d t + L(t) d w(t),
driven by a Wiener process with unit diffusion.
"""
def __init__(
self,
dimension: IntArgType,
driftfun: Callable[[FloatArgType, np.ndarray], np.ndarray],
dispmatfun: Callable[[FloatArgType, np.ndarray], np.ndarray],
jacobfun: Callable[[FloatArgType, np.ndarray], np.ndarray],
):
self.dimension = dimension
self.driftfun = driftfun
self.dispmatfun = dispmatfun
self.jacobfun = jacobfun
super().__init__(input_dim=dimension, output_dim=dimension)
def forward_realization(
self,
realization,
t,
dt=None,
compute_gain=False,
_diffusion=1.0,
**kwargs,
):
return self._forward_realization_via_forward_rv(
realization,
t=t,
dt=dt,
compute_gain=compute_gain,
_diffusion=_diffusion,
**kwargs,
)
def forward_rv(
self,
rv,
t,
dt=None,
compute_gain=False,
_diffusion=1.0,
**kwargs,
):
raise NotImplementedError("Not available.")
def backward_realization(
self,
realization_obtained,
rv,
rv_forwarded=None,
gain=None,
t=None,
dt=None,
_diffusion=1.0,
**kwargs,
):
return self._backward_realization_via_backward_rv(
realization_obtained,
rv=rv,
rv_forwarded=rv_forwarded,
gain=gain,
t=t,
dt=dt,
_diffusion=_diffusion,
**kwargs,
)
def backward_rv(
self,
real_obtained,
rv,
rv_forwarded=None,
gain=None,
t=None,
dt=None,
_diffusion=1.0,
**kwargs,
):
raise NotImplementedError("Not available.")
class LinearSDE(SDE):
"""Linear stochastic differential equation (SDE),
.. math:: d x(t) = [G(t) x(t) + v(t)] d t + L(t) x(t) d w(t).
For Gaussian initial conditions, this solution is a Gaussian process.
Parameters
----------
driftmatfun :
This is G = G(t). The evaluations of this function are called
the driftmatrix of the SDE.
Returns np.ndarray with shape=(n, n)
forcevecfun :
This is v = v(t). Evaluations of this function are called
the force(vector) of the SDE.
Returns np.ndarray with shape=(n,)
dispmatfun :
This is L = L(t). Evaluations of this function are called
the dispersion(matrix) of the SDE.
Returns np.ndarray with shape=(n, s)
mde_atol
Absolute tolerance passed to the solver of the moment differential equations (MDEs). Optional. Default is 1e-6.
mde_rtol
Relative tolerance passed to the solver of the moment differential equations (MDEs). Optional. Default is 1e-6.
mde_solver
Method that is chosen in `scipy.integrate.solve_ivp`. Any string that is compatible with ``solve_ivp(..., method=mde_solve,...)`` works here.
Usual candidates are ``[RK45, LSODA, Radau, BDF, RK23, DOP853]``. Optional. Default is LSODA.
"""
def __init__(
self,
dimension: IntArgType,
driftmatfun: Callable[[FloatArgType], np.ndarray],
forcevecfun: Callable[[FloatArgType], np.ndarray],
dispmatfun: Callable[[FloatArgType], np.ndarray],
mde_atol: Optional[FloatArgType] = 1e-6,
mde_rtol: Optional[FloatArgType] = 1e-6,
mde_solver: Optional[str] = "LSODA",
):
# Once different filtering and smoothing algorithms are available,
# replicate the scheme from DiscreteGaussian here, in which
# the initialisation decides between, e.g., classic and sqrt implementations.
self.driftmatfun = driftmatfun
self.forcevecfun = forcevecfun
super().__init__(
dimension=dimension,
driftfun=(lambda t, x: self.driftmatfun(t) @ x + self.forcevecfun(t)),
dispmatfun=dispmatfun,
jacobfun=(lambda t, x: self.driftmatfun(t)),
)
self.mde_atol = mde_atol
self.mde_rtol = mde_rtol
self.mde_solver = mde_solver
def forward_rv(
self,
rv,
t,
dt=None,
_compute_gain=False,
_diffusion=1.0,
**kwargs,
):
if dt is None:
raise ValueError(
"Continuous-time transitions require a time-increment ``dt``."
)
return self._solve_mde_forward(rv, t, dt, _diffusion=_diffusion)
def backward_rv(
self,
rv_obtained,
rv,
rv_forwarded=None,
gain=None,
t=None,
dt=None,
_diffusion=1.0,
**kwargs,
):
if dt is None:
raise ValueError(
"Continuous-time transitions require a time-increment ``dt``."
)
# Ignore rv_forwarded
return self._solve_mde_backward(
rv_obtained=rv_obtained,
rv=rv,
t=t,
dt=dt,
_diffusion=_diffusion,
)
# Forward and backward implementation(s)
def _solve_mde_forward(self, rv, t, dt, _diffusion=1.0):
"""Solve forward moment differential equations (MDEs)."""
mde, y0 = self._setup_vectorized_mde_forward(
rv,
_diffusion=_diffusion,
)
# Dense output for lambda-expression
sol = scipy.integrate.solve_ivp(
mde,
(t, t + dt),
y0,
method=self.mde_solver,
atol=self.mde_atol,
rtol=self.mde_rtol,
dense_output=True,
)
dim = rv.mean.shape[0]
y_end = sol.y[:, -1]
new_mean = y_end[:dim]
new_cov = y_end[dim:].reshape((dim, dim))
# Useful for backward transitions
# Aka continuous time smoothing.
sol_mean = lambda t: sol.sol(t)[:dim]
sol_cov = lambda t: sol.sol(t)[dim:].reshape((dim, dim))
return randvars.Normal(mean=new_mean, cov=new_cov), {
"sol": sol,
"sol_mean": sol_mean,
"sol_cov": sol_cov,
}
def _solve_mde_backward(self, rv_obtained, rv, t, dt, _diffusion=1.0):
"""Solve backward moment differential equations (MDEs)."""
_, mde_forward_info = self._solve_mde_forward(rv, t, dt, _diffusion=_diffusion)
mde_forward_sol_mean = mde_forward_info["sol_mean"]
mde_forward_sol_cov = mde_forward_info["sol_cov"]
mde, y0 = self._setup_vectorized_mde_backward(
rv_obtained,
_diffusion=_diffusion,
)
# Use forward solution for mean and covariance in scipy's ivp
# Dense output for lambda-expression
sol = scipy.integrate.solve_ivp(
mde,
(t + dt, t),
y0,
method=self.mde_solver,
atol=self.mde_atol,
rtol=self.mde_rtol,
args=(mde_forward_sol_mean, mde_forward_sol_cov),
dense_output=True,
)
dim = rv.mean.shape[0]
y_end = sol.y[:, -1]
new_mean = y_end[:dim]
new_cov = y_end[dim:].reshape((dim, dim))
sol_mean = lambda t: sol.sol(t)[:dim]
sol_cov = lambda t: sol.sol(t)[dim:].reshape((dim, dim))
return randvars.Normal(mean=new_mean, cov=new_cov), {
"sol": sol,
"sol_mean": sol_mean,
"sol_cov": sol_cov,
}
def _setup_vectorized_mde_forward(self, initrv, _diffusion=1.0):
"""Set up forward moment differential equations (MDEs).
Compute an ODE vector field that represents the MDEs and is
compatible with scipy.solve_ivp.
"""
dim = len(initrv.mean)
def f(t, y):
# Undo vectorization
mean, cov_flat = y[:dim], y[dim:]
cov = cov_flat.reshape((dim, dim))
# Apply iteration
F = self.driftmatfun(t)
u = self.forcevecfun(t)
L = self.dispmatfun(t)
new_mean = F @ mean + u
new_cov = F @ cov + cov @ F.T + _diffusion * L @ L.T
# Vectorize outcome
new_cov_flat = new_cov.flatten()
y_new = np.hstack((new_mean, new_cov_flat))
return y_new
initcov_flat = initrv.cov.flatten()
y0 = np.hstack((initrv.mean, initcov_flat))
return f, y0
def _setup_vectorized_mde_backward(self, finalrv_obtained, _diffusion=1.0):
"""Set up backward moment differential equations (MDEs).
Compute an ODE vector field that represents the MDEs and is
compatible with scipy.solve_ivp.
"""
dim = len(finalrv_obtained.mean)
def f(t, y, mde_forward_sol_mean, mde_forward_sol_cov):
# Undo vectorization
mean, cov_flat = y[:dim], y[dim:]
cov = cov_flat.reshape((dim, dim))
# Apply iteration
F = self.driftmatfun(t)
u = self.forcevecfun(t)
L = self.dispmatfun(t)
mde_forward_sol_cov_mat = mde_forward_sol_cov(t)
mde_forward_sol_mean_vec = mde_forward_sol_mean(t)
LL = _diffusion * L @ L.T
LL_inv_cov = np.linalg.solve(mde_forward_sol_cov_mat, LL.T).T
new_mean = F @ mean + LL_inv_cov @ (mean - mde_forward_sol_mean_vec) + u
new_cov = (F + LL_inv_cov) @ cov + cov @ (F + LL_inv_cov).T - LL
new_cov_flat = new_cov.flatten()
y_new = np.hstack((new_mean, new_cov_flat))
return y_new
finalcov_flat = finalrv_obtained.cov.flatten()
y0 = np.hstack((finalrv_obtained.mean, finalcov_flat))
return f, y0
class LTISDE(LinearSDE):
"""Linear time-invariant continuous Markov models of the form.
.. math:: d x(t) = [G x(t) + v] d t + L d w(t).
In the language of dynamic models,
x(t) : state process
G : drift matrix
v : force term/vector
L : dispersion matrix.
w(t) : Wiener process with unit diffusion.
Parameters
----------
driftmat :
This is F. It is the drift matrix of the SDE.
forcevec :
This is U. It is the force vector of the SDE.
dispmat :
This is L. It is the dispersion matrix of the SDE.
"""
def __init__(
self,
driftmat: np.ndarray,
forcevec: np.ndarray,
dispmat: np.ndarray,
forward_implementation="classic",
backward_implementation="classic",
):
_check_initial_state_dimensions(driftmat, forcevec, dispmat)
dimension = len(driftmat)
self.driftmat = driftmat
self.forcevec = forcevec
self.dispmat = dispmat
super().__init__(
dimension,
(lambda t: self.driftmat),
(lambda t: self.forcevec),
(lambda t: self.dispmat),
)
self.forward_implementation = forward_implementation
self.backward_implementation = backward_implementation
def forward_rv(
self,
rv,
t,
dt=None,
compute_gain=False,
_diffusion=1.0,
**kwargs,
):
if dt is None:
raise ValueError(
"Continuous-time transitions require a time-increment ``dt``."
)
discretised_model = self.discretise(dt=dt)
return discretised_model.forward_rv(
rv, t, compute_gain=compute_gain, _diffusion=_diffusion
)
def backward_rv(
self,
rv_obtained,
rv,
rv_forwarded=None,
gain=None,
t=None,
dt=None,
_diffusion=1.0,
**kwargs,
):
if dt is None:
raise ValueError(
"Continuous-time transitions require a time-increment ``dt``."
)
discretised_model = self.discretise(dt=dt)
return discretised_model.backward_rv(
rv_obtained=rv_obtained,
rv=rv,
rv_forwarded=rv_forwarded,
gain=gain,
t=t,
_diffusion=_diffusion,
)
@functools.lru_cache(maxsize=None)
def discretise(self, dt):
"""Return a discrete transition model (i.e. mild solution to SDE) using matrix
fraction decomposition.
That is, matrices A(h) and Q(h) and vector s(h) such
that the transition is
.. math:: x | x_\\text{old} \\sim \\mathcal{N}(A(h) x_\\text{old} + s(h), Q(h)) ,
which is the transition of the mild solution to the LTI SDE.
"""
if np.linalg.norm(self.forcevec) > 0:
zeros = np.zeros((self.dimension, self.dimension))
eye = np.eye(self.dimension)
driftmat = np.block([[self.driftmat, eye], [zeros, zeros]])
dispmat = np.concatenate((self.dispmat, np.zeros(self.dispmat.shape)))
ah_stack, qh_stack, _ = matrix_fraction_decomposition(driftmat, dispmat, dt)
proj = np.eye(self.dimension, 2 * self.dimension)
proj_rev = np.flip(proj, axis=1)
ah = proj @ ah_stack @ proj.T
sh = proj @ ah_stack @ proj_rev.T @ self.forcevec
qh = proj @ qh_stack @ proj.T
else:
ah, qh, _ = matrix_fraction_decomposition(self.driftmat, self.dispmat, dt)
sh = np.zeros(len(ah))
return discrete_transition.DiscreteLTIGaussian(
ah,
sh,
qh,
forward_implementation=self.forward_implementation,
backward_implementation=self.backward_implementation,
)
def _check_initial_state_dimensions(driftmat, forcevec, dispmat):
"""Checks that the matrices all align and are of proper shape.
Parameters
----------
driftmat : np.ndarray, shape=(n, n)
forcevec : np.ndarray, shape=(n,)
dispmat : np.ndarray, shape=(n, s)
"""
if driftmat.ndim != 2 or driftmat.shape[0] != driftmat.shape[1]:
raise ValueError("driftmatrix not of shape (n, n)")
if forcevec.ndim != 1:
raise ValueError("force not of shape (n,)")
if forcevec.shape[0] != driftmat.shape[1]:
raise ValueError("force not of shape (n,) or driftmatrix not of shape (n, n)")
if dispmat.ndim != 2:
raise ValueError("dispersion not of shape (n, s)") | src/probnum/statespace/sde.py | import functools
from typing import Callable, Optional
import numpy as np
import scipy.integrate
import scipy.linalg
from probnum import randvars
from probnum.type import FloatArgType, IntArgType
from . import discrete_transition, transition
from .sde_utils import matrix_fraction_decomposition
class SDE(transition.Transition):
"""Stochastic differential equation.
.. math:: d x(t) = g(t, x(t)) d t + L(t) d w(t),
driven by a Wiener process with unit diffusion.
"""
def __init__(
self,
dimension: IntArgType,
driftfun: Callable[[FloatArgType, np.ndarray], np.ndarray],
dispmatfun: Callable[[FloatArgType, np.ndarray], np.ndarray],
jacobfun: Callable[[FloatArgType, np.ndarray], np.ndarray],
):
self.dimension = dimension
self.driftfun = driftfun
self.dispmatfun = dispmatfun
self.jacobfun = jacobfun
super().__init__(input_dim=dimension, output_dim=dimension)
def forward_realization(
self,
realization,
t,
dt=None,
compute_gain=False,
_diffusion=1.0,
**kwargs,
):
return self._forward_realization_via_forward_rv(
realization,
t=t,
dt=dt,
compute_gain=compute_gain,
_diffusion=_diffusion,
**kwargs,
)
def forward_rv(
self,
rv,
t,
dt=None,
compute_gain=False,
_diffusion=1.0,
**kwargs,
):
raise NotImplementedError("Not available.")
def backward_realization(
self,
realization_obtained,
rv,
rv_forwarded=None,
gain=None,
t=None,
dt=None,
_diffusion=1.0,
**kwargs,
):
return self._backward_realization_via_backward_rv(
realization_obtained,
rv=rv,
rv_forwarded=rv_forwarded,
gain=gain,
t=t,
dt=dt,
_diffusion=_diffusion,
**kwargs,
)
def backward_rv(
self,
real_obtained,
rv,
rv_forwarded=None,
gain=None,
t=None,
dt=None,
_diffusion=1.0,
**kwargs,
):
raise NotImplementedError("Not available.")
class LinearSDE(SDE):
"""Linear stochastic differential equation (SDE),
.. math:: d x(t) = [G(t) x(t) + v(t)] d t + L(t) x(t) d w(t).
For Gaussian initial conditions, this solution is a Gaussian process.
Parameters
----------
driftmatfun :
This is G = G(t). The evaluations of this function are called
the driftmatrix of the SDE.
Returns np.ndarray with shape=(n, n)
forcevecfun :
This is v = v(t). Evaluations of this function are called
the force(vector) of the SDE.
Returns np.ndarray with shape=(n,)
dispmatfun :
This is L = L(t). Evaluations of this function are called
the dispersion(matrix) of the SDE.
Returns np.ndarray with shape=(n, s)
mde_atol
Absolute tolerance passed to the solver of the moment differential equations (MDEs). Optional. Default is 1e-6.
mde_rtol
Relative tolerance passed to the solver of the moment differential equations (MDEs). Optional. Default is 1e-6.
mde_solver
Method that is chosen in `scipy.integrate.solve_ivp`. Any string that is compatible with ``solve_ivp(..., method=mde_solve,...)`` works here.
Usual candidates are ``[RK45, LSODA, Radau, BDF, RK23, DOP853]``. Optional. Default is LSODA.
"""
def __init__(
self,
dimension: IntArgType,
driftmatfun: Callable[[FloatArgType], np.ndarray],
forcevecfun: Callable[[FloatArgType], np.ndarray],
dispmatfun: Callable[[FloatArgType], np.ndarray],
mde_atol: Optional[FloatArgType] = 1e-6,
mde_rtol: Optional[FloatArgType] = 1e-6,
mde_solver: Optional[str] = "LSODA",
):
# Once different filtering and smoothing algorithms are available,
# replicate the scheme from DiscreteGaussian here, in which
# the initialisation decides between, e.g., classic and sqrt implementations.
self.driftmatfun = driftmatfun
self.forcevecfun = forcevecfun
super().__init__(
dimension=dimension,
driftfun=(lambda t, x: self.driftmatfun(t) @ x + self.forcevecfun(t)),
dispmatfun=dispmatfun,
jacobfun=(lambda t, x: self.driftmatfun(t)),
)
self.mde_atol = mde_atol
self.mde_rtol = mde_rtol
self.mde_solver = mde_solver
def forward_rv(
self,
rv,
t,
dt=None,
_compute_gain=False,
_diffusion=1.0,
**kwargs,
):
if dt is None:
raise ValueError(
"Continuous-time transitions require a time-increment ``dt``."
)
return self._solve_mde_forward(rv, t, dt, _diffusion=_diffusion)
def backward_rv(
self,
rv_obtained,
rv,
rv_forwarded=None,
gain=None,
t=None,
dt=None,
_diffusion=1.0,
**kwargs,
):
if dt is None:
raise ValueError(
"Continuous-time transitions require a time-increment ``dt``."
)
# Ignore rv_forwarded
return self._solve_mde_backward(
rv_obtained=rv_obtained,
rv=rv,
t=t,
dt=dt,
_diffusion=_diffusion,
)
# Forward and backward implementation(s)
def _solve_mde_forward(self, rv, t, dt, _diffusion=1.0):
"""Solve forward moment differential equations (MDEs)."""
mde, y0 = self._setup_vectorized_mde_forward(
rv,
_diffusion=_diffusion,
)
# Dense output for lambda-expression
sol = scipy.integrate.solve_ivp(
mde,
(t, t + dt),
y0,
method=self.mde_solver,
atol=self.mde_atol,
rtol=self.mde_rtol,
dense_output=True,
)
dim = rv.mean.shape[0]
y_end = sol.y[:, -1]
new_mean = y_end[:dim]
new_cov = y_end[dim:].reshape((dim, dim))
# Useful for backward transitions
# Aka continuous time smoothing.
sol_mean = lambda t: sol.sol(t)[:dim]
sol_cov = lambda t: sol.sol(t)[dim:].reshape((dim, dim))
return randvars.Normal(mean=new_mean, cov=new_cov), {
"sol": sol,
"sol_mean": sol_mean,
"sol_cov": sol_cov,
}
def _solve_mde_backward(self, rv_obtained, rv, t, dt, _diffusion=1.0):
"""Solve backward moment differential equations (MDEs)."""
_, mde_forward_info = self._solve_mde_forward(rv, t, dt, _diffusion=_diffusion)
mde_forward_sol_mean = mde_forward_info["sol_mean"]
mde_forward_sol_cov = mde_forward_info["sol_cov"]
mde, y0 = self._setup_vectorized_mde_backward(
rv_obtained,
_diffusion=_diffusion,
)
# Use forward solution for mean and covariance in scipy's ivp
# Dense output for lambda-expression
sol = scipy.integrate.solve_ivp(
mde,
(t + dt, t),
y0,
method=self.mde_solver,
atol=self.mde_atol,
rtol=self.mde_rtol,
args=(mde_forward_sol_mean, mde_forward_sol_cov),
dense_output=True,
)
dim = rv.mean.shape[0]
y_end = sol.y[:, -1]
new_mean = y_end[:dim]
new_cov = y_end[dim:].reshape((dim, dim))
sol_mean = lambda t: sol.sol(t)[:dim]
sol_cov = lambda t: sol.sol(t)[dim:].reshape((dim, dim))
return randvars.Normal(mean=new_mean, cov=new_cov), {
"sol": sol,
"sol_mean": sol_mean,
"sol_cov": sol_cov,
}
def _setup_vectorized_mde_forward(self, initrv, _diffusion=1.0):
"""Set up forward moment differential equations (MDEs).
Compute an ODE vector field that represents the MDEs and is
compatible with scipy.solve_ivp.
"""
dim = len(initrv.mean)
def f(t, y):
# Undo vectorization
mean, cov_flat = y[:dim], y[dim:]
cov = cov_flat.reshape((dim, dim))
# Apply iteration
F = self.driftmatfun(t)
u = self.forcevecfun(t)
L = self.dispmatfun(t)
new_mean = F @ mean + u
new_cov = F @ cov + cov @ F.T + _diffusion * L @ L.T
# Vectorize outcome
new_cov_flat = new_cov.flatten()
y_new = np.hstack((new_mean, new_cov_flat))
return y_new
initcov_flat = initrv.cov.flatten()
y0 = np.hstack((initrv.mean, initcov_flat))
return f, y0
def _setup_vectorized_mde_backward(self, finalrv_obtained, _diffusion=1.0):
"""Set up backward moment differential equations (MDEs).
Compute an ODE vector field that represents the MDEs and is
compatible with scipy.solve_ivp.
"""
dim = len(finalrv_obtained.mean)
def f(t, y, mde_forward_sol_mean, mde_forward_sol_cov):
# Undo vectorization
mean, cov_flat = y[:dim], y[dim:]
cov = cov_flat.reshape((dim, dim))
# Apply iteration
F = self.driftmatfun(t)
u = self.forcevecfun(t)
L = self.dispmatfun(t)
mde_forward_sol_cov_mat = mde_forward_sol_cov(t)
mde_forward_sol_mean_vec = mde_forward_sol_mean(t)
LL = _diffusion * L @ L.T
LL_inv_cov = np.linalg.solve(mde_forward_sol_cov_mat, LL.T).T
new_mean = F @ mean + LL_inv_cov @ (mean - mde_forward_sol_mean_vec) + u
new_cov = (F + LL_inv_cov) @ cov + cov @ (F + LL_inv_cov).T - LL
new_cov_flat = new_cov.flatten()
y_new = np.hstack((new_mean, new_cov_flat))
return y_new
finalcov_flat = finalrv_obtained.cov.flatten()
y0 = np.hstack((finalrv_obtained.mean, finalcov_flat))
return f, y0
class LTISDE(LinearSDE):
"""Linear time-invariant continuous Markov models of the form.
.. math:: d x(t) = [G x(t) + v] d t + L d w(t).
In the language of dynamic models,
x(t) : state process
G : drift matrix
v : force term/vector
L : dispersion matrix.
w(t) : Wiener process with unit diffusion.
Parameters
----------
driftmat :
This is F. It is the drift matrix of the SDE.
forcevec :
This is U. It is the force vector of the SDE.
dispmat :
This is L. It is the dispersion matrix of the SDE.
"""
def __init__(
self,
driftmat: np.ndarray,
forcevec: np.ndarray,
dispmat: np.ndarray,
forward_implementation="classic",
backward_implementation="classic",
):
_check_initial_state_dimensions(driftmat, forcevec, dispmat)
dimension = len(driftmat)
self.driftmat = driftmat
self.forcevec = forcevec
self.dispmat = dispmat
super().__init__(
dimension,
(lambda t: self.driftmat),
(lambda t: self.forcevec),
(lambda t: self.dispmat),
)
self.forward_implementation = forward_implementation
self.backward_implementation = backward_implementation
def forward_rv(
self,
rv,
t,
dt=None,
compute_gain=False,
_diffusion=1.0,
**kwargs,
):
if dt is None:
raise ValueError(
"Continuous-time transitions require a time-increment ``dt``."
)
discretised_model = self.discretise(dt=dt)
return discretised_model.forward_rv(
rv, t, compute_gain=compute_gain, _diffusion=_diffusion
)
def backward_rv(
self,
rv_obtained,
rv,
rv_forwarded=None,
gain=None,
t=None,
dt=None,
_diffusion=1.0,
**kwargs,
):
if dt is None:
raise ValueError(
"Continuous-time transitions require a time-increment ``dt``."
)
discretised_model = self.discretise(dt=dt)
return discretised_model.backward_rv(
rv_obtained=rv_obtained,
rv=rv,
rv_forwarded=rv_forwarded,
gain=gain,
t=t,
_diffusion=_diffusion,
)
@functools.lru_cache(maxsize=None)
def discretise(self, dt):
"""Return a discrete transition model (i.e. mild solution to SDE) using matrix
fraction decomposition.
That is, matrices A(h) and Q(h) and vector s(h) such
that the transition is
.. math:: x | x_\\text{old} \\sim \\mathcal{N}(A(h) x_\\text{old} + s(h), Q(h)) ,
which is the transition of the mild solution to the LTI SDE.
"""
if np.linalg.norm(self.forcevec) > 0:
zeros = np.zeros((self.dimension, self.dimension))
eye = np.eye(self.dimension)
driftmat = np.block([[self.driftmat, eye], [zeros, zeros]])
dispmat = np.concatenate((self.dispmat, np.zeros(self.dispmat.shape)))
ah_stack, qh_stack, _ = matrix_fraction_decomposition(driftmat, dispmat, dt)
proj = np.eye(self.dimension, 2 * self.dimension)
proj_rev = np.flip(proj, axis=1)
ah = proj @ ah_stack @ proj.T
sh = proj @ ah_stack @ proj_rev.T @ self.forcevec
qh = proj @ qh_stack @ proj.T
else:
ah, qh, _ = matrix_fraction_decomposition(self.driftmat, self.dispmat, dt)
sh = np.zeros(len(ah))
return discrete_transition.DiscreteLTIGaussian(
ah,
sh,
qh,
forward_implementation=self.forward_implementation,
backward_implementation=self.backward_implementation,
)
def _check_initial_state_dimensions(driftmat, forcevec, dispmat):
"""Checks that the matrices all align and are of proper shape.
Parameters
----------
driftmat : np.ndarray, shape=(n, n)
forcevec : np.ndarray, shape=(n,)
dispmat : np.ndarray, shape=(n, s)
"""
if driftmat.ndim != 2 or driftmat.shape[0] != driftmat.shape[1]:
raise ValueError("driftmatrix not of shape (n, n)")
if forcevec.ndim != 1:
raise ValueError("force not of shape (n,)")
if forcevec.shape[0] != driftmat.shape[1]:
raise ValueError("force not of shape (n,) or driftmatrix not of shape (n, n)")
if dispmat.ndim != 2:
raise ValueError("dispersion not of shape (n, s)") | 0.899718 | 0.464598 |
import argparse
import io
import os
from typing import Iterable
from typing import Optional
from typing import Tuple
import apache_beam as beam
import torch
from apache_beam.io.filesystems import FileSystems
from apache_beam.ml.inference.base import KeyedModelHandler
from apache_beam.ml.inference.base import PredictionResult
from apache_beam.ml.inference.base import RunInference
from apache_beam.ml.inference.pytorch_inference import PytorchModelHandler
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
from PIL import Image
from torchvision import transforms
from torchvision.models.mobilenetv2 import MobileNetV2
def read_image(image_file_name: str,
path_to_dir: Optional[str] = None) -> Tuple[str, Image.Image]:
if path_to_dir is not None:
image_file_name = os.path.join(path_to_dir, image_file_name)
with FileSystems().open(image_file_name, 'r') as file:
data = Image.open(io.BytesIO(file.read())).convert('RGB')
return image_file_name, data
def preprocess_image(data: Image.Image) -> torch.Tensor:
image_size = (224, 224)
# Pre-trained PyTorch models expect input images normalized with the
# below values (see: https://pytorch.org/vision/stable/models.html)
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
transform = transforms.Compose([
transforms.Resize(image_size),
transforms.ToTensor(),
normalize,
])
return transform(data)
class PostProcessor(beam.DoFn):
def process(self, element: Tuple[str, PredictionResult]) -> Iterable[str]:
filename, prediction_result = element
prediction = torch.argmax(prediction_result.inference, dim=0)
yield filename + ',' + str(prediction.item())
def parse_known_args(argv):
"""Parses args for the workflow."""
parser = argparse.ArgumentParser()
parser.add_argument(
'--input',
dest='input',
default='gs://apache-beam-ml/testing/inputs/'
'it_mobilenetv2_imagenet_validation_inputs.txt',
help='Path to the text file containing image names.')
parser.add_argument(
'--output',
dest='output',
help='Path where to save output predictions.'
' text file.')
parser.add_argument(
'--model_state_dict_path',
dest='model_state_dict_path',
default='gs://apache-beam-ml/'
'models/imagenet_classification_mobilenet_v2.pt',
help="Path to the model's state_dict. "
"Default state_dict would be MobilenetV2.")
parser.add_argument(
'--images_dir',
default=None,
help='Path to the directory where images are stored.'
'Not required if image names in the input file have absolute path.')
return parser.parse_known_args(argv)
def run(argv=None, model_class=None, model_params=None, save_main_session=True):
"""
Args:
argv: Command line arguments defined for this example.
model_class: Reference to the class definition of the model.
If None, MobilenetV2 will be used as default .
model_params: Parameters passed to the constructor of the model_class.
These will be used to instantiate the model object in the
RunInference API.
"""
known_args, pipeline_args = parse_known_args(argv)
pipeline_options = PipelineOptions(pipeline_args)
pipeline_options.view_as(SetupOptions).save_main_session = save_main_session
if not model_class:
model_class = MobileNetV2
model_params = {'num_classes': 1000}
# In this example we pass keyed inputs to RunInference transform.
# Therefore, we use KeyedModelHandler wrapper over PytorchModelHandler.
model_handler = KeyedModelHandler(
PytorchModelHandler(
state_dict_path=known_args.model_state_dict_path,
model_class=model_class,
model_params=model_params))
with beam.Pipeline(options=pipeline_options) as p:
filename_value_pair = (
p
| 'ReadImageNames' >> beam.io.ReadFromText(
known_args.input, skip_header_lines=1)
| 'ReadImageData' >> beam.Map(
lambda image_name: read_image(
image_file_name=image_name, path_to_dir=known_args.images_dir))
| 'PreprocessImages' >> beam.MapTuple(
lambda file_name, data: (file_name, preprocess_image(data))))
predictions = (
filename_value_pair
| 'PyTorchRunInference' >> RunInference(model_handler)
| 'ProcessOutput' >> beam.ParDo(PostProcessor()))
if known_args.output:
predictions | "WriteOutputToGCS" >> beam.io.WriteToText( # pylint: disable=expression-not-assigned
known_args.output,
shard_name_template='',
append_trailing_newlines=True)
if __name__ == '__main__':
run() | sdks/python/apache_beam/examples/inference/pytorch_image_classification.py | import argparse
import io
import os
from typing import Iterable
from typing import Optional
from typing import Tuple
import apache_beam as beam
import torch
from apache_beam.io.filesystems import FileSystems
from apache_beam.ml.inference.base import KeyedModelHandler
from apache_beam.ml.inference.base import PredictionResult
from apache_beam.ml.inference.base import RunInference
from apache_beam.ml.inference.pytorch_inference import PytorchModelHandler
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
from PIL import Image
from torchvision import transforms
from torchvision.models.mobilenetv2 import MobileNetV2
def read_image(image_file_name: str,
path_to_dir: Optional[str] = None) -> Tuple[str, Image.Image]:
if path_to_dir is not None:
image_file_name = os.path.join(path_to_dir, image_file_name)
with FileSystems().open(image_file_name, 'r') as file:
data = Image.open(io.BytesIO(file.read())).convert('RGB')
return image_file_name, data
def preprocess_image(data: Image.Image) -> torch.Tensor:
image_size = (224, 224)
# Pre-trained PyTorch models expect input images normalized with the
# below values (see: https://pytorch.org/vision/stable/models.html)
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
transform = transforms.Compose([
transforms.Resize(image_size),
transforms.ToTensor(),
normalize,
])
return transform(data)
class PostProcessor(beam.DoFn):
def process(self, element: Tuple[str, PredictionResult]) -> Iterable[str]:
filename, prediction_result = element
prediction = torch.argmax(prediction_result.inference, dim=0)
yield filename + ',' + str(prediction.item())
def parse_known_args(argv):
"""Parses args for the workflow."""
parser = argparse.ArgumentParser()
parser.add_argument(
'--input',
dest='input',
default='gs://apache-beam-ml/testing/inputs/'
'it_mobilenetv2_imagenet_validation_inputs.txt',
help='Path to the text file containing image names.')
parser.add_argument(
'--output',
dest='output',
help='Path where to save output predictions.'
' text file.')
parser.add_argument(
'--model_state_dict_path',
dest='model_state_dict_path',
default='gs://apache-beam-ml/'
'models/imagenet_classification_mobilenet_v2.pt',
help="Path to the model's state_dict. "
"Default state_dict would be MobilenetV2.")
parser.add_argument(
'--images_dir',
default=None,
help='Path to the directory where images are stored.'
'Not required if image names in the input file have absolute path.')
return parser.parse_known_args(argv)
def run(argv=None, model_class=None, model_params=None, save_main_session=True):
"""
Args:
argv: Command line arguments defined for this example.
model_class: Reference to the class definition of the model.
If None, MobilenetV2 will be used as default .
model_params: Parameters passed to the constructor of the model_class.
These will be used to instantiate the model object in the
RunInference API.
"""
known_args, pipeline_args = parse_known_args(argv)
pipeline_options = PipelineOptions(pipeline_args)
pipeline_options.view_as(SetupOptions).save_main_session = save_main_session
if not model_class:
model_class = MobileNetV2
model_params = {'num_classes': 1000}
# In this example we pass keyed inputs to RunInference transform.
# Therefore, we use KeyedModelHandler wrapper over PytorchModelHandler.
model_handler = KeyedModelHandler(
PytorchModelHandler(
state_dict_path=known_args.model_state_dict_path,
model_class=model_class,
model_params=model_params))
with beam.Pipeline(options=pipeline_options) as p:
filename_value_pair = (
p
| 'ReadImageNames' >> beam.io.ReadFromText(
known_args.input, skip_header_lines=1)
| 'ReadImageData' >> beam.Map(
lambda image_name: read_image(
image_file_name=image_name, path_to_dir=known_args.images_dir))
| 'PreprocessImages' >> beam.MapTuple(
lambda file_name, data: (file_name, preprocess_image(data))))
predictions = (
filename_value_pair
| 'PyTorchRunInference' >> RunInference(model_handler)
| 'ProcessOutput' >> beam.ParDo(PostProcessor()))
if known_args.output:
predictions | "WriteOutputToGCS" >> beam.io.WriteToText( # pylint: disable=expression-not-assigned
known_args.output,
shard_name_template='',
append_trailing_newlines=True)
if __name__ == '__main__':
run() | 0.846435 | 0.354964 |
import pygame
from settings import settings
from map import Pipes, Background, Base
from bird import Bird
import random
import time
# Initalisation du module Pygame
pygame.init()
# INITIALISATION
# Active ou non les collisions avec les tuyau (=débug)
collision = True
gravitiy = True
# La boucle de jeu principale doit être executée tant que nous sommes en jeu
gameOver = False
isPlaying = True
speed_multiplier = 1
menu = True
score = 0
IATraining = True
if IATraining:
collision = False
gravitiy = False
# Les variables qui sont importées depuis un autre fichier sont stockées ici, pour éviter de les importer à chaque utilisation
pipe_img_x_height = settings['pipe_img_x_height']
horizontal_space_btw_pipes = settings['horizontal_space_btw_pipes']
vertical_space_btw_pipes = settings['vertical_space_btw_pipes']
window_x_size = settings['window_size'][0]
window_y_size = settings['window_size'][1]
populationNumber = settings['populationNumber']
# Variable qui va permettre de réguler les FPS
clock = pygame.time.Clock()
# Initialisation de la fenêtre
window = pygame.display.set_mode((window_x_size, window_y_size))
# Titre de la fenêtre
pygame.display.set_caption('I.A Flappy Bird')
# On récupère une image et on l'affiche en en-tête de fenêtre
icon = pygame.image.load('imgs/bird1.png')
pygame.display.set_icon(icon)
# SAUVEGARDE SCORE
# Ouverture en mode append ; Cela permet de créer le fichier si il n'existe pas
scoreFile = open("score.txt", "a")
# scoreFile.close()
# Dans un soucis de simplicité et de légereté du code, stockage des images dans des variables
bg_img = pygame.image.load('imgs/bg2.png').convert_alpha()
pipe_img = pygame.image.load('imgs/pipe.png').convert_alpha()
bird_img = pygame.image.load('imgs/bird1.png').convert_alpha()
base_img = pygame.image.load('imgs/base.png').convert_alpha()
# Création des objets tuyaux et fond de carte depuis la class Map dans map.py
def createObjects():
'''
Créé tous les objets (2 tuyaux, le sol, le fond, et l'oiseau depuis les classes respectives)
'''
global background, base, pipes, pipes2, bird
background = Background(bg_img, window)
base = Base(base_img, window)
pipes = Pipes(pipe_img, window_x_size)
pipes2 = Pipes(pipe_img, window_x_size + horizontal_space_btw_pipes)
bird = Bird(200, 200, window)
return(background, base, pipes, pipes2, bird)
createObjects()
def displayNumber(x, y, text, color=(255, 255, 255)):
'''
Affiche un nombre
'''
# Font est une variable qui définie la police que nous voulons utiliser. Nous en avons importée une libre de droits sur internet
font = pygame.font.Font("flappy-bird-font.ttf", 50)
message = font.render(text, True, color) # On pré-rend le message pour pouvoir l'afficher
window.blit(message, [x, y])
def displayText(x, y, text, font_size, color=(255, 255, 255)):
'''
Affiche un texte
'''
font = pygame.font.SysFont("comicsansms", font_size)
message = font.render(text, True, color) # On pré-rend le message pour pouvoir l'afficher
window.blit(message, [x, y])
def saveScore(score):
'''
Enregistre le score dans le fichier score.txt
'''
savedScores = open('score.txt', "a")
scoreToSave = "," + str(score)
savedScores.write(scoreToSave)
savedScores.close()
print("Score sauvegardée : {}".format(score))
def checkBestScore():
"""
Retourne le meilleur score du fichier score.txt en tant que bestScore
"""
with open("score.txt", 'r') as score:
listScore = (score.read().split(sep=","))
listScoreInt = []
for n in range(len(listScore)):
listScoreInt.append(int(listScore[n]))
bestScore = max(listScoreInt)
return bestScore
runOnce = 0
birdsPopulation = []
def generatePopulation(birdsPopulation):
"""
Génère la population d'oiseau que l'on va entraîner. Le nombre d'oiseau dépend de la valeur choisie dans settings.py
"""
print('Création de la population ...')
while len(birdsPopulation) < populationNumber:
randomJumpDistance = random.randint(50, 300)
birdsPopulation.append(Bird(300, 150, window, pipe1Jump=randomJumpDistance, pipe2Jump=randomJumpDistance))
print('Nb d\'oiseau : ', len(birdsPopulation), '/', populationNumber)
return birdsPopulation
# On utilise une fonction de pygame qu'on stock dans une variable pour pouvoir accèder plus tard aux touches préssées
keys = pygame.key.get_pressed()
titre = "Flappy Bird"
regle = "Règles: - Il faut que l'oiseau passe entre les tuyaux"
regle2 = "- Il ne faut pas que l'oiseau touche les tuyaux"
regle3 = "- A chaque tuyaux passé, +1 point"
regle4 = "- Appuyez sur espace pour sauter et lancer le jeu !"
bestScoreWithText = "Meilleur score : "
# Boucle principale, tant que le jeu est actif, cette boucle tourne
while isPlaying:
# MENU ACCUEIL
if menu and not IATraining:
# Création du fond, est des textes explicatfis
background.draw_background()
base.draw_base()
displayText(175, 25, titre, 40)
displayText(30, 130, regle, 20)
displayText(100, 180, regle2, 20)
displayText(100, 230, regle3, 20)
displayText(100, 280, regle4, 20)
displayText(175, 380, bestScoreWithText + str(checkBestScore()), 30)
# Récupération des touches préssées et événements
for event in pygame.event.get():
# Si nous récupérons l'évenement "quitter", on arrête la boucle de jeu principale
if event.type == pygame.QUIT:
isPlaying = False
# Si on appuie sur la touche espace, le menu s'efface et le jeu commence
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
menu = False
# Actualisation de l'affichage Pygame
pygame.display.update()
# JEU
elif not gameOver:
# Régulation du nombre de répétitions de la boucle par seconde
clock.tick(settings['fps'] * speed_multiplier)
# On empêche le multiplicateur de descendre trop bas, car un nombre d'IPS ne peut pas être négatif
if speed_multiplier <= 0.2:
speed_multiplier = 0.2
# Capture des boutons appuyés
for event in pygame.event.get():
# Si nous récupérons l'évenement "quitter", on arrête la boucle de jeu principale
if event.type == pygame.QUIT:
isPlaying = False
# Si on appuie sur la touche espace, l'oiseau saute
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
if not bird.isJumping:
bird.jump()
if bird.isJumping:
bird.resetJump()
bird.jump()
# On peut contrôler avec les flèches la vitesse du jeu
if event.key == pygame.K_RIGHT:
speed_multiplier += .1
print("speed multiplier: {}".format(round(speed_multiplier, 2)), end="\r") # On est obligés de round() la valeur à cause des floating points
if event.key == pygame.K_LEFT:
speed_multiplier -= .1
print("speed multiplier: {}".format(round(speed_multiplier, 2)), end="\r")
if event.key == pygame.K_DOWN:
speed_multiplier = 1.0
print("speed multiplier: {}".format(round(speed_multiplier, 2)), end="\r")
# On est obligés de re-créer un nouvel event car le type est différents
if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
if not bird.isJumping:
bird.jump()
if bird.isJumping:
bird.resetJump()
bird.jump()
# Affichage du fond grâce à l'appel de la méthode draw_background de la class Background depuis map.py
background.draw_background()
background.move_background()
# Affichage et déplacements des tuyeaux grâce à l'appel de la méthode show et move de la class Pipes depuis map.py
pipes.show(window)
pipes.move()
# Affichage du deuxième groupe de tuyau
pipes2.show(window)
pipes2.move()
# Affichage de l'oiseau
bird.show()
# Déplacement et actualisation de l'affichage via les méthodes de la class Background depuis map.py
base.move_base()
base.draw_base()
# Quand le premier tuyau sort de la carte:
if pipes.x <= -pipe_img_x_height:
otherPipePosition = pipes2.x
# Recréation de l'objet tuyaux
del(pipes)
pipes = Pipes(pipe_img, otherPipePosition + horizontal_space_btw_pipes)
# Quand le second tuyeaux sort de la carte
if pipes2.x <= -pipe_img_x_height:
otherPipePosition = pipes.x
# Recréation de l'objet tuyaux2
del(pipes2)
pipes2 = Pipes(pipe_img, otherPipePosition + horizontal_space_btw_pipes)
# Si la base arrive à -48px (comme elle recule), il faut la redessiner à sa position initiale ; permet d'avoir un défilement infinie de la base
if base.x <= -48:
del(base)
# print('new base')
base = Base(base_img, window)
# Si le fond est trop à gauche, alors on le supprime et on en recréer un
if background.x <= -350:
del(background)
# print('new background')
background = Background(bg_img, window)
# Si l'oiseau touche le sol, on perd
if bird.y >= 492:
gameOver = True
saveScore(score)
# Si l'oiseau va au dessus de la limite de la fenêtre, on perd
if bird.y <= 0:
gameOver = True
saveScore(score)
# Si l'oiseau n'est pas en saut, il subit la force de gravité
if gravitiy:
if not bird.isJumping:
bird.y += bird.velocity
# COLLISION
# tuyau 1
if pipes.collide(bird, window):
# Si l'oiseau n'est pas dans la séparation verticale des 2 tuyaux
if bird.y < pipes.y or bird.y > (pipes.y + vertical_space_btw_pipes):
print("Collision 1 détéctée {}".format(random.randint(0, 99)))
if collision:
gameOver = True
saveScore(score)
else:
if bird.x - (pipes.x + 44) == 0:
score += 1
print('score : ', score)
# tuyau 2
if pipes2.collide(bird, window):
# Si l'oiseau n'est pas dans la séparation verticale des 2 tuyaux
if bird.y < pipes2.y or bird.y > (pipes2.y + vertical_space_btw_pipes):
print("Collision 2 détéctée {}".format(random.randint(0, 99)))
if collision:
gameOver = True
saveScore(score)
else:
if bird.x - (pipes2.x + 44) == 0:
score += 1
print('score : ', score)
# Affiche le score
displayNumber(260, 30, str(score))
# Si le mode IA est activé
if IATraining:
# Lancer qu'une seule fois la création de population
if runOnce == 0:
generatePopulation(birdsPopulation)
runOnce += 1
else:
print("Nb d'oiseau : {}/{}".format(len(birdsPopulation), populationNumber))
birdPipes1Distance = pipes.x - bird.x
print("DISTANCE OISEAU TUYAU1 = {}".format(birdPipes1Distance))
birdPipes2Distance = pipes2.x - bird.x
print("DISTANCE OISEAU TUYAU2 = {}".format(birdPipes2Distance))
# Si il reste une population d'oiseau
if len(birdsPopulation) > 0:
# Pour chaque oiseau de la population
for uniqueBird in birdsPopulation:
# n est le numéro de l'index de chaque oiseau dans la liste de population
n = birdsPopulation.index(uniqueBird)
# print('bird number', n, 'will jump at dist =', birdsPopulation[n].pipe1Jump)
# Afficher l'oiseau
birdsPopulation[n].show()
# Faire subir à chaque oiseau la gravité
if not birdsPopulation[n].isJumping:
birdsPopulation[n].y += birdsPopulation[n].velocity
# Faire sauter chaque oiseau aléatoirement (=débug)
birdsPopulation[random.randint(0, len(birdsPopulation)-1)].jump()
# Chaque oiseau saute quand il atteint sa personnalité
if(birdPipes1Distance == birdsPopulation[n].pipe1Jump):
birdsPopulation[n].jump()
print("l'oiseau a sauté")
# Augmente le fitness de chaque oiseau de 0.1 par frame
birdsPopulation[n].fitness += 0.1
# print('fitness oiseau ', n, '=', birdsPopulation[n].fitness)
# Enregistrement du fitness de tous les oiseaux
listFitness = []
listFitness.append(int(birdsPopulation[n].fitness))
bestFitness = max(listFitness)
# print('best fitness = ',bestFitness, 'for bird index =', listFitness.index(bestFitness))
# COLLISION tuyau 1
if pipes.collide(birdsPopulation[n], window):
# Si l'oiseau n'est pas dans la séparation verticale des 2 tuyaux
if birdsPopulation[n].y < pipes.y or birdsPopulation[n].y > (pipes.y + vertical_space_btw_pipes):
# print('Collision 1 détéctée', random.randint(0, 99))
birdsPopulation.pop(n)
# print('bird', n, 'died on first pipe')
n -= 1
else:
if birdsPopulation[n].x - (pipes.x + 44) == 0:
birdsPopulation[n].fitness += 1
if len(birdsPopulation) > 0:
# COLLISION tuyau 2
if pipes2.collide(birdsPopulation[n], window):
# Si l'oiseau n'est pas dans la séparation verticale des 2 tuyaux
if birdsPopulation[n].y < pipes2.y or birdsPopulation[n].y > (pipes2.y + vertical_space_btw_pipes):
# print('Collision 1 détéctée', random.randint(0, 99))
birdsPopulation.pop(n)
# print('bird', n, 'died on second pipe')
n -= 1
else:
if birdsPopulation[n].x - (pipes2.x + 44) == 0:
birdsPopulation[n].fitness += 1
# Actualisation de l'affichage Pygame
pygame.display.update()
# GAME OVER
else:
background.draw_background()
base.draw_base()
displayText(175, 100, "Game Over", 40)
displayNumber(260, 30, str(score))
displayText(175, 200, "Appuyez sur SPACE pour rejouer", 20)
displayText(175, 250, "Appuyez sur ECHAP pour quitter", 20)
# Le joueur a peut être fait un nouveau meilleur score, il faut donc actualiser la variable bestScore
bestScore = checkBestScore()
# Récupération des touches préssées et événements
for event in pygame.event.get():
# Si nous récupérons l'évenement "quitter", on arrête la boucle de jeu principale
if event.type == pygame.QUIT:
isPlaying = False
# Si on appuie sur la touche espace, le menu s'efface et le jeu commence
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
gameOver = False
menu = True
score = 0
createObjects()
if event.key == pygame.K_ESCAPE:
isPlaying = False
# Actualisation de l'affichage Pygame
pygame.display.update()
# Si la boucle principale de jeu est finie, on doit quitter proprement le programme
pygame.quit()
print("Fin du jeu :)")
quit() | main.py | import pygame
from settings import settings
from map import Pipes, Background, Base
from bird import Bird
import random
import time
# Initalisation du module Pygame
pygame.init()
# INITIALISATION
# Active ou non les collisions avec les tuyau (=débug)
collision = True
gravitiy = True
# La boucle de jeu principale doit être executée tant que nous sommes en jeu
gameOver = False
isPlaying = True
speed_multiplier = 1
menu = True
score = 0
IATraining = True
if IATraining:
collision = False
gravitiy = False
# Les variables qui sont importées depuis un autre fichier sont stockées ici, pour éviter de les importer à chaque utilisation
pipe_img_x_height = settings['pipe_img_x_height']
horizontal_space_btw_pipes = settings['horizontal_space_btw_pipes']
vertical_space_btw_pipes = settings['vertical_space_btw_pipes']
window_x_size = settings['window_size'][0]
window_y_size = settings['window_size'][1]
populationNumber = settings['populationNumber']
# Variable qui va permettre de réguler les FPS
clock = pygame.time.Clock()
# Initialisation de la fenêtre
window = pygame.display.set_mode((window_x_size, window_y_size))
# Titre de la fenêtre
pygame.display.set_caption('I.A Flappy Bird')
# On récupère une image et on l'affiche en en-tête de fenêtre
icon = pygame.image.load('imgs/bird1.png')
pygame.display.set_icon(icon)
# SAUVEGARDE SCORE
# Ouverture en mode append ; Cela permet de créer le fichier si il n'existe pas
scoreFile = open("score.txt", "a")
# scoreFile.close()
# Dans un soucis de simplicité et de légereté du code, stockage des images dans des variables
bg_img = pygame.image.load('imgs/bg2.png').convert_alpha()
pipe_img = pygame.image.load('imgs/pipe.png').convert_alpha()
bird_img = pygame.image.load('imgs/bird1.png').convert_alpha()
base_img = pygame.image.load('imgs/base.png').convert_alpha()
# Création des objets tuyaux et fond de carte depuis la class Map dans map.py
def createObjects():
'''
Créé tous les objets (2 tuyaux, le sol, le fond, et l'oiseau depuis les classes respectives)
'''
global background, base, pipes, pipes2, bird
background = Background(bg_img, window)
base = Base(base_img, window)
pipes = Pipes(pipe_img, window_x_size)
pipes2 = Pipes(pipe_img, window_x_size + horizontal_space_btw_pipes)
bird = Bird(200, 200, window)
return(background, base, pipes, pipes2, bird)
createObjects()
def displayNumber(x, y, text, color=(255, 255, 255)):
'''
Affiche un nombre
'''
# Font est une variable qui définie la police que nous voulons utiliser. Nous en avons importée une libre de droits sur internet
font = pygame.font.Font("flappy-bird-font.ttf", 50)
message = font.render(text, True, color) # On pré-rend le message pour pouvoir l'afficher
window.blit(message, [x, y])
def displayText(x, y, text, font_size, color=(255, 255, 255)):
'''
Affiche un texte
'''
font = pygame.font.SysFont("comicsansms", font_size)
message = font.render(text, True, color) # On pré-rend le message pour pouvoir l'afficher
window.blit(message, [x, y])
def saveScore(score):
'''
Enregistre le score dans le fichier score.txt
'''
savedScores = open('score.txt', "a")
scoreToSave = "," + str(score)
savedScores.write(scoreToSave)
savedScores.close()
print("Score sauvegardée : {}".format(score))
def checkBestScore():
"""
Retourne le meilleur score du fichier score.txt en tant que bestScore
"""
with open("score.txt", 'r') as score:
listScore = (score.read().split(sep=","))
listScoreInt = []
for n in range(len(listScore)):
listScoreInt.append(int(listScore[n]))
bestScore = max(listScoreInt)
return bestScore
runOnce = 0
birdsPopulation = []
def generatePopulation(birdsPopulation):
"""
Génère la population d'oiseau que l'on va entraîner. Le nombre d'oiseau dépend de la valeur choisie dans settings.py
"""
print('Création de la population ...')
while len(birdsPopulation) < populationNumber:
randomJumpDistance = random.randint(50, 300)
birdsPopulation.append(Bird(300, 150, window, pipe1Jump=randomJumpDistance, pipe2Jump=randomJumpDistance))
print('Nb d\'oiseau : ', len(birdsPopulation), '/', populationNumber)
return birdsPopulation
# On utilise une fonction de pygame qu'on stock dans une variable pour pouvoir accèder plus tard aux touches préssées
keys = pygame.key.get_pressed()
titre = "Flappy Bird"
regle = "Règles: - Il faut que l'oiseau passe entre les tuyaux"
regle2 = "- Il ne faut pas que l'oiseau touche les tuyaux"
regle3 = "- A chaque tuyaux passé, +1 point"
regle4 = "- Appuyez sur espace pour sauter et lancer le jeu !"
bestScoreWithText = "Meilleur score : "
# Boucle principale, tant que le jeu est actif, cette boucle tourne
while isPlaying:
# MENU ACCUEIL
if menu and not IATraining:
# Création du fond, est des textes explicatfis
background.draw_background()
base.draw_base()
displayText(175, 25, titre, 40)
displayText(30, 130, regle, 20)
displayText(100, 180, regle2, 20)
displayText(100, 230, regle3, 20)
displayText(100, 280, regle4, 20)
displayText(175, 380, bestScoreWithText + str(checkBestScore()), 30)
# Récupération des touches préssées et événements
for event in pygame.event.get():
# Si nous récupérons l'évenement "quitter", on arrête la boucle de jeu principale
if event.type == pygame.QUIT:
isPlaying = False
# Si on appuie sur la touche espace, le menu s'efface et le jeu commence
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
menu = False
# Actualisation de l'affichage Pygame
pygame.display.update()
# JEU
elif not gameOver:
# Régulation du nombre de répétitions de la boucle par seconde
clock.tick(settings['fps'] * speed_multiplier)
# On empêche le multiplicateur de descendre trop bas, car un nombre d'IPS ne peut pas être négatif
if speed_multiplier <= 0.2:
speed_multiplier = 0.2
# Capture des boutons appuyés
for event in pygame.event.get():
# Si nous récupérons l'évenement "quitter", on arrête la boucle de jeu principale
if event.type == pygame.QUIT:
isPlaying = False
# Si on appuie sur la touche espace, l'oiseau saute
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
if not bird.isJumping:
bird.jump()
if bird.isJumping:
bird.resetJump()
bird.jump()
# On peut contrôler avec les flèches la vitesse du jeu
if event.key == pygame.K_RIGHT:
speed_multiplier += .1
print("speed multiplier: {}".format(round(speed_multiplier, 2)), end="\r") # On est obligés de round() la valeur à cause des floating points
if event.key == pygame.K_LEFT:
speed_multiplier -= .1
print("speed multiplier: {}".format(round(speed_multiplier, 2)), end="\r")
if event.key == pygame.K_DOWN:
speed_multiplier = 1.0
print("speed multiplier: {}".format(round(speed_multiplier, 2)), end="\r")
# On est obligés de re-créer un nouvel event car le type est différents
if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
if not bird.isJumping:
bird.jump()
if bird.isJumping:
bird.resetJump()
bird.jump()
# Affichage du fond grâce à l'appel de la méthode draw_background de la class Background depuis map.py
background.draw_background()
background.move_background()
# Affichage et déplacements des tuyeaux grâce à l'appel de la méthode show et move de la class Pipes depuis map.py
pipes.show(window)
pipes.move()
# Affichage du deuxième groupe de tuyau
pipes2.show(window)
pipes2.move()
# Affichage de l'oiseau
bird.show()
# Déplacement et actualisation de l'affichage via les méthodes de la class Background depuis map.py
base.move_base()
base.draw_base()
# Quand le premier tuyau sort de la carte:
if pipes.x <= -pipe_img_x_height:
otherPipePosition = pipes2.x
# Recréation de l'objet tuyaux
del(pipes)
pipes = Pipes(pipe_img, otherPipePosition + horizontal_space_btw_pipes)
# Quand le second tuyeaux sort de la carte
if pipes2.x <= -pipe_img_x_height:
otherPipePosition = pipes.x
# Recréation de l'objet tuyaux2
del(pipes2)
pipes2 = Pipes(pipe_img, otherPipePosition + horizontal_space_btw_pipes)
# Si la base arrive à -48px (comme elle recule), il faut la redessiner à sa position initiale ; permet d'avoir un défilement infinie de la base
if base.x <= -48:
del(base)
# print('new base')
base = Base(base_img, window)
# Si le fond est trop à gauche, alors on le supprime et on en recréer un
if background.x <= -350:
del(background)
# print('new background')
background = Background(bg_img, window)
# Si l'oiseau touche le sol, on perd
if bird.y >= 492:
gameOver = True
saveScore(score)
# Si l'oiseau va au dessus de la limite de la fenêtre, on perd
if bird.y <= 0:
gameOver = True
saveScore(score)
# Si l'oiseau n'est pas en saut, il subit la force de gravité
if gravitiy:
if not bird.isJumping:
bird.y += bird.velocity
# COLLISION
# tuyau 1
if pipes.collide(bird, window):
# Si l'oiseau n'est pas dans la séparation verticale des 2 tuyaux
if bird.y < pipes.y or bird.y > (pipes.y + vertical_space_btw_pipes):
print("Collision 1 détéctée {}".format(random.randint(0, 99)))
if collision:
gameOver = True
saveScore(score)
else:
if bird.x - (pipes.x + 44) == 0:
score += 1
print('score : ', score)
# tuyau 2
if pipes2.collide(bird, window):
# Si l'oiseau n'est pas dans la séparation verticale des 2 tuyaux
if bird.y < pipes2.y or bird.y > (pipes2.y + vertical_space_btw_pipes):
print("Collision 2 détéctée {}".format(random.randint(0, 99)))
if collision:
gameOver = True
saveScore(score)
else:
if bird.x - (pipes2.x + 44) == 0:
score += 1
print('score : ', score)
# Affiche le score
displayNumber(260, 30, str(score))
# Si le mode IA est activé
if IATraining:
# Lancer qu'une seule fois la création de population
if runOnce == 0:
generatePopulation(birdsPopulation)
runOnce += 1
else:
print("Nb d'oiseau : {}/{}".format(len(birdsPopulation), populationNumber))
birdPipes1Distance = pipes.x - bird.x
print("DISTANCE OISEAU TUYAU1 = {}".format(birdPipes1Distance))
birdPipes2Distance = pipes2.x - bird.x
print("DISTANCE OISEAU TUYAU2 = {}".format(birdPipes2Distance))
# Si il reste une population d'oiseau
if len(birdsPopulation) > 0:
# Pour chaque oiseau de la population
for uniqueBird in birdsPopulation:
# n est le numéro de l'index de chaque oiseau dans la liste de population
n = birdsPopulation.index(uniqueBird)
# print('bird number', n, 'will jump at dist =', birdsPopulation[n].pipe1Jump)
# Afficher l'oiseau
birdsPopulation[n].show()
# Faire subir à chaque oiseau la gravité
if not birdsPopulation[n].isJumping:
birdsPopulation[n].y += birdsPopulation[n].velocity
# Faire sauter chaque oiseau aléatoirement (=débug)
birdsPopulation[random.randint(0, len(birdsPopulation)-1)].jump()
# Chaque oiseau saute quand il atteint sa personnalité
if(birdPipes1Distance == birdsPopulation[n].pipe1Jump):
birdsPopulation[n].jump()
print("l'oiseau a sauté")
# Augmente le fitness de chaque oiseau de 0.1 par frame
birdsPopulation[n].fitness += 0.1
# print('fitness oiseau ', n, '=', birdsPopulation[n].fitness)
# Enregistrement du fitness de tous les oiseaux
listFitness = []
listFitness.append(int(birdsPopulation[n].fitness))
bestFitness = max(listFitness)
# print('best fitness = ',bestFitness, 'for bird index =', listFitness.index(bestFitness))
# COLLISION tuyau 1
if pipes.collide(birdsPopulation[n], window):
# Si l'oiseau n'est pas dans la séparation verticale des 2 tuyaux
if birdsPopulation[n].y < pipes.y or birdsPopulation[n].y > (pipes.y + vertical_space_btw_pipes):
# print('Collision 1 détéctée', random.randint(0, 99))
birdsPopulation.pop(n)
# print('bird', n, 'died on first pipe')
n -= 1
else:
if birdsPopulation[n].x - (pipes.x + 44) == 0:
birdsPopulation[n].fitness += 1
if len(birdsPopulation) > 0:
# COLLISION tuyau 2
if pipes2.collide(birdsPopulation[n], window):
# Si l'oiseau n'est pas dans la séparation verticale des 2 tuyaux
if birdsPopulation[n].y < pipes2.y or birdsPopulation[n].y > (pipes2.y + vertical_space_btw_pipes):
# print('Collision 1 détéctée', random.randint(0, 99))
birdsPopulation.pop(n)
# print('bird', n, 'died on second pipe')
n -= 1
else:
if birdsPopulation[n].x - (pipes2.x + 44) == 0:
birdsPopulation[n].fitness += 1
# Actualisation de l'affichage Pygame
pygame.display.update()
# GAME OVER
else:
background.draw_background()
base.draw_base()
displayText(175, 100, "Game Over", 40)
displayNumber(260, 30, str(score))
displayText(175, 200, "Appuyez sur SPACE pour rejouer", 20)
displayText(175, 250, "Appuyez sur ECHAP pour quitter", 20)
# Le joueur a peut être fait un nouveau meilleur score, il faut donc actualiser la variable bestScore
bestScore = checkBestScore()
# Récupération des touches préssées et événements
for event in pygame.event.get():
# Si nous récupérons l'évenement "quitter", on arrête la boucle de jeu principale
if event.type == pygame.QUIT:
isPlaying = False
# Si on appuie sur la touche espace, le menu s'efface et le jeu commence
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
gameOver = False
menu = True
score = 0
createObjects()
if event.key == pygame.K_ESCAPE:
isPlaying = False
# Actualisation de l'affichage Pygame
pygame.display.update()
# Si la boucle principale de jeu est finie, on doit quitter proprement le programme
pygame.quit()
print("Fin du jeu :)")
quit() | 0.217919 | 0.277314 |
import pytest
from catkit.catkit_types import FlipMountPosition
def test_import():
import catkit.hardware.thorlabs.ThorlabsMFF101
def test_delayed_import():
import catkit.hardware.thorlabs.ThorlabsMFF101
with pytest.raises(ImportError):
catkit.hardware.thorlabs.ThorlabsMFF101.ThorlabsMFF101()
def test_emulator_import():
from catkit.emulators.thorlabs.MFF101 import MFF101Emulator
from catkit.interfaces.Instrument import SimInstrument
import catkit.hardware.thorlabs.ThorlabsMFF101
class HicatMFF101Emulator(MFF101Emulator):
def move_to_position_1(self):
pass
def move_to_position_2(self):
pass
class ThorlabsMFF101(SimInstrument, catkit.hardware.thorlabs.ThorlabsMFF101.ThorlabsMFF101):
instrument_lib = HicatMFF101Emulator
ThorlabsMFF101(config_id="dummy", serial="sn", in_beam_position=1)
def test_position_tracking():
from catkit.emulators.thorlabs.MFF101 import MFF101Emulator
from catkit.interfaces.Instrument import SimInstrument
import catkit.hardware.thorlabs.ThorlabsMFF101
class HicatMFF101Emulator(MFF101Emulator):
def __init__(self, config_id, in_beam_position):
super().__init__(config_id, in_beam_position)
self.pos1_counter = 0
self.pos2_counter = 0
def move_to_position_1(self):
self.pos1_counter += 1
def move_to_position_2(self):
self.pos2_counter += 1
class ThorlabsMFF101(SimInstrument, catkit.hardware.thorlabs.ThorlabsMFF101.ThorlabsMFF101):
instrument_lib = HicatMFF101Emulator
with ThorlabsMFF101(config_id="dummy", serial="sn", in_beam_position=1) as device:
device.move(FlipMountPosition.IN_BEAM)
assert device.current_position is FlipMountPosition.IN_BEAM
assert device.instrument_lib.pos1_counter == 1
device.move(FlipMountPosition.OUT_OF_BEAM)
assert device.current_position is FlipMountPosition.OUT_OF_BEAM
assert device.instrument_lib.pos2_counter == 1
device.move(FlipMountPosition.OUT_OF_BEAM)
assert device.current_position is FlipMountPosition.OUT_OF_BEAM
assert device.instrument_lib.pos2_counter == 1 # Already in position so shouldn't be incremented.
device.move(FlipMountPosition.OUT_OF_BEAM, force=True)
assert device.current_position is FlipMountPosition.OUT_OF_BEAM
assert device.instrument_lib.pos2_counter == 2 | catkit/emulators/tests/test_MFF101.py | import pytest
from catkit.catkit_types import FlipMountPosition
def test_import():
import catkit.hardware.thorlabs.ThorlabsMFF101
def test_delayed_import():
import catkit.hardware.thorlabs.ThorlabsMFF101
with pytest.raises(ImportError):
catkit.hardware.thorlabs.ThorlabsMFF101.ThorlabsMFF101()
def test_emulator_import():
from catkit.emulators.thorlabs.MFF101 import MFF101Emulator
from catkit.interfaces.Instrument import SimInstrument
import catkit.hardware.thorlabs.ThorlabsMFF101
class HicatMFF101Emulator(MFF101Emulator):
def move_to_position_1(self):
pass
def move_to_position_2(self):
pass
class ThorlabsMFF101(SimInstrument, catkit.hardware.thorlabs.ThorlabsMFF101.ThorlabsMFF101):
instrument_lib = HicatMFF101Emulator
ThorlabsMFF101(config_id="dummy", serial="sn", in_beam_position=1)
def test_position_tracking():
from catkit.emulators.thorlabs.MFF101 import MFF101Emulator
from catkit.interfaces.Instrument import SimInstrument
import catkit.hardware.thorlabs.ThorlabsMFF101
class HicatMFF101Emulator(MFF101Emulator):
def __init__(self, config_id, in_beam_position):
super().__init__(config_id, in_beam_position)
self.pos1_counter = 0
self.pos2_counter = 0
def move_to_position_1(self):
self.pos1_counter += 1
def move_to_position_2(self):
self.pos2_counter += 1
class ThorlabsMFF101(SimInstrument, catkit.hardware.thorlabs.ThorlabsMFF101.ThorlabsMFF101):
instrument_lib = HicatMFF101Emulator
with ThorlabsMFF101(config_id="dummy", serial="sn", in_beam_position=1) as device:
device.move(FlipMountPosition.IN_BEAM)
assert device.current_position is FlipMountPosition.IN_BEAM
assert device.instrument_lib.pos1_counter == 1
device.move(FlipMountPosition.OUT_OF_BEAM)
assert device.current_position is FlipMountPosition.OUT_OF_BEAM
assert device.instrument_lib.pos2_counter == 1
device.move(FlipMountPosition.OUT_OF_BEAM)
assert device.current_position is FlipMountPosition.OUT_OF_BEAM
assert device.instrument_lib.pos2_counter == 1 # Already in position so shouldn't be incremented.
device.move(FlipMountPosition.OUT_OF_BEAM, force=True)
assert device.current_position is FlipMountPosition.OUT_OF_BEAM
assert device.instrument_lib.pos2_counter == 2 | 0.498291 | 0.562237 |
import sys
from shapely.wkt import loads
from shapely import geometry
class Node():
def __init__(self,kwargs,row):
keys=kwargs.keys()
node_id=kwargs['node_id'] if 'node_id' in keys else ''
if node_id:
try:
self.node_id=int(float(node_id))
except Exception as e:
print("broken at row{},".format(row),end=' ')
print(e)
sys.exit(0)
else:
print("node_id is not defined in node.csv, please check it!")
sys.exit(0)
ctrl_type=kwargs['ctrl_type'] if 'ctrl_type' in keys else ''
try:
self.ctrl_type=int(float(ctrl_type))
except:
self.ctrl_type=0
self.activity_type = kwargs['activity_type'] if 'activity_type' in keys else 'unclassified'
x_coord=kwargs['x_coord'] if 'x_coord' in keys else ''
if x_coord:
try:
self.x_coord=float(x_coord)
except Exception as e:
print('broken at row {},'.format(row),end=' ')
print(e)
sys.exit(0)
else:
print("x_coord not found in node.csv, please check it!")
sys.exit(0)
y_coord=kwargs['y_coord'] if 'y_coord' in keys else ''
if y_coord:
try:
self.y_coord=float(y_coord)
except Exception as e:
print('broken at row {},'.format(row), end=' ')
print(e)
sys.exit(0)
else:
print("y_coord not found in node.csv, please check it!")
sys.exit(0)
self.geometry=geometry.Point(self.x_coord,self.y_coord)
production=kwargs['production'] if 'production' in keys else ''
try:
self.production=float(production)
except:
self.production=''
attraction=kwargs['attraction'] if 'attraction' in keys else ''
try:
self.attraction=float(attraction)
except:
self.attraction=''
self.out_link_list=[]
self.in_link_list=[]
class Link():
def __init__(self,kwargs,row):
keys=kwargs.keys()
link_id=kwargs['link_id'] if 'link_id' in keys else ''
if link_id:
try:
self.link_id=int(float(link_id))
except Exception as e:
print('broken at row {},'.format(row),end=' ')
print(e)
sys.exit(0)
else:
self.link_id=None
from_node_id=kwargs['from_node_id'] if 'from_node_id' in keys else ''
if from_node_id:
try:
self.from_node_id=int(float(from_node_id))
except Exception as e:
print('broken at row {},'.format(row), end=' ')
print(e)
sys.exit(0)
else:
print("from_node_id not found in link.csv, please check it!")
sys.exit(0)
to_node_id=kwargs['to_node_id'] if 'to_node_id' in keys else ''
if to_node_id:
try:
self.to_node_id=int(float(to_node_id))
except Exception as e:
print('broken at row {},'.format(row), end=' ')
print(e)
sys.exit(0)
else:
print("to_node_id not found in link.csv, please check it!")
sys.exit(0)
length=kwargs['length'] if 'length' in keys else ''
try:
self.length =float(length)
except:
self.length=''
lanes=kwargs['lanes'] if 'lanes' in keys else ''
try:
self.lanes =int(float(lanes))
except:
self.lanes=''
free_speed=kwargs['free_speed'] if 'free_speed' in keys else ''
try:
self.free_speed =float(free_speed)
except:
self.free_speed=''
capacity=kwargs['capacity'] if 'capacity' in keys else ''
try:
self.capacity = float(capacity)
except:
self.capacity=''
link_type_name=kwargs['link_type_name'] if 'link_type_name' in keys else ''
if link_type_name:
self.link_type_name=link_type_name
else:
self.link_type_name='unclassified'
link_geo=kwargs['geometry'] if 'geometry' in keys else ''
try:
self.geometry = loads(link_geo)
except:
self.geometry=''
if 'allowed_uses' in keys:
allowed_uses=kwargs['allowed_uses']
if allowed_uses:
if ',' in allowed_uses:
self.allowed_uses=[allowed_use_.lstrip() for allowed_use_ in allowed_uses.split(',')]
elif ';' in allowed_uses:
self.allowed_uses = [allowed_use_.lstrip() for allowed_use_ in allowed_uses.split(';')]
else:
self.allowed_uses = [allowed_uses]
else:
self.allowed_uses = ['unclassified']
else:
self.allowed_uses=['unclassified']
class Agent():
def __init__(self,kwargs,row):
keys=kwargs.keys()
agent_id=kwargs['agent_id'] if 'agent_id' in keys else ''
if agent_id:
self.agent_id=int(float(agent_id))
else:
self.agent_id=None
node_sequence=kwargs['node_sequence'] if 'node_sequence' in keys else ''
try:
self.node_sequence=[int(float(id)) for id in node_sequence.split(';')[:-1]]
except:
self.node_sequence=''
agent_geo=kwargs['geometry'] if 'geometry' in keys else ''
if ',)' in agent_geo:
agent_geo=agent_geo.replace(',)',')')
try:
self.geometry=loads(agent_geo)
except:
self.geometry=''
print("warning: can't load geometry at row{}".format(row))
class Demand():
def __init__(self,kwargs,row):
keys=kwargs.keys()
o_zone_id=kwargs['o_zone_id'] if 'o_zone_id' in keys else ''
if o_zone_id:
try:
self.o_zone_id=int(float(o_zone_id))
except Exception as e:
print('broken at row {},'.format(row),end=' ')
print(e)
sys.exit(0)
else:
print("o_zone_id is not defined in demand.csv, please check it!")
sys.exit(0)
d_zone_id=kwargs['d_zone_id'] if 'd_zone_id' in keys else ''
if d_zone_id:
try:
self.d_zone_id=int(float(d_zone_id))
except Exception as e:
print('broken at row {},'.format(row),end=' ')
print(e)
sys.exit(0)
else:
print("d_zone_id is not defined in demand.csv, please check it!")
sys.exit(0)
vol=kwargs['volume'] if 'volume' in keys else ''
if vol:
try:
self.volume=float(vol)
except Exception as e:
print('broken at row {},'.format(row),end=' ')
print(e)
sys.exit(0)
else:
print("volume is not defined in demand.csv, please check it!")
sys.exit(0)
demand_geo=kwargs['geometry'] if 'geometry' in keys else ''
if demand_geo:
try:
self.geometry=loads(demand_geo)
except Exception as e:
print('broken at row {},'.format(row),end=' ')
print(e)
sys.exit(0)
else:
print("geometry is not defined in demand.csv, please check it!")
sys.exit(0)
class POI():
def __init__(self,kwargs,row):
keys=kwargs.keys()
poi_id=kwargs['poi_id'] if 'poi_id' in keys else ''
try:
self.poi_id=int(float(poi_id))
except :
self.poi_id=None
self.name=kwargs['name'] if 'name' in keys else ''
building=kwargs['building'] if 'building' in keys else ''
if building:
self.building=building.split(';')
else:
self.building = ['unclassified']
poi_geo=kwargs['geometry'] if 'geometry' in keys else ''
if poi_geo:
try:
self.geometry=loads(poi_geo)
except Exception as e:
print('broken at row {}'.format(row),end=' ')
print(e)
sys.exit(0)
else:
print("geometry is not defined in poi.csv, please check it!")
sys.exit(0)
centroid=kwargs['centroid'] if 'centroid' in keys else ''
if centroid:
try:
self.centroid=loads(centroid)
except:
self.centroid = self.geometry.centroid
else:
self.centroid=self.geometry.centroid
activity_zone_id=kwargs['activity_zone_id'] if 'activity_zone_id' in keys else ''
try:
self.activity_zone_id=int(float(activity_zone_id))
except:
self.activity_zone_id=''
class POITrip():
def __init__(self,kwargs,row):
keys=kwargs.keys()
building = kwargs['building'] if 'building' in keys else ''
if building:
self.building = building.split(';')
else:
self.building =['unclassified']
production_rate1=kwargs['production_rate1'] if 'production_rate1' in keys else ''
if production_rate1:
try:
self.production_rate1=float(production_rate1)
except:
self.production_rate1=0
else:
print("production_rate1 is not defined in poi_trip_rate.csv, please check it!")
sys.exit(0)
attraction_rate1=kwargs['attraction_rate1'] if 'attraction_rate1' in keys else ''
if attraction_rate1:
try:
self.attraction_rate1=float(attraction_rate1)
except:
self.attraction_rate1=0
else:
print("attraction_rate1 is not defined in poi_trip_rate.csv, please check it!")
sys.exit(0)
class Zone():
def __init__(self,kwargs,row):
keys=kwargs.keys()
self.name=kwargs['name'] if 'name' in keys else ' '
activity_zone_id=kwargs['activity_zone_id'] if 'activity_zone_id' in keys else ''
if activity_zone_id:
try:
self.activity_zone_id=int(float(activity_zone_id))
except Exception as e:
print("broken at row{}".format(row),end=' ')
print(e)
sys.exit(0)
else:
print("activity_zone_id is not defined in zone.csv, please check it!")
sys.exit(0)
centroid_x=kwargs['centroid_x'] if 'centroid_x' in keys else ''
if centroid_x:
try:
self.centroid_x=float(centroid_x)
except Exception as e:
print("broken at row{}".format(row),end=' ')
print(e)
sys.exit(0)
else:
print("centroid_x is not defined in zone.csv, please check it!")
sys.exit(0)
centroid_y=kwargs['centroid_y'] if 'centroid_y' in keys else ''
if centroid_y:
try:
self.centroid_y=float(centroid_y)
except Exception as e:
print("broken at row{}".format(row), end=' ')
print(e)
sys.exit(0)
else:
print("centroid_y is not defined in zone.csv, please check it!")
sys.exit(0)
zone_geo=kwargs['geometry'] if 'geometry' in keys else ''
if zone_geo:
try:
self.geometry=loads(zone_geo)
except Exception as e:
print("broken at row{}".format(row), end=' ')
print(e)
sys.exit(0)
else:
self.geometry=''
centroid=kwargs['centroid'] if 'centroid' in keys else ''
if centroid:
try:
self.centroid=loads(centroid)
except Exception as e:
print("broken at row{}".format(row), end=' ')
print(e)
sys.exit(0)
else:
print("centroid is not defined in zone.csv, please check it!")
sys.exit(0)
total_poi_count=kwargs['total_poi_count'] if 'total_poi_count' in keys else ''
try:
self.total_poi_count=float(total_poi_count)
except:
self.total_poi_count=0
residential_poi_count=kwargs['residential_poi_count'] if 'residential_poi_count' in keys else ''
try:
self.residential_poi_count=float(residential_poi_count)
except:
self.residential_poi_count=0
office_poi_count=kwargs['office_poi_count'] if 'office_poi_count' in keys else ''
try:
self.office_poi_count=float(office_poi_count)
except:
self.office_poi_count=0
shopping_poi_count=kwargs['shopping_poi_count'] if 'shopping_poi_count' in keys else ''
try:
self.shopping_poi_count=float(shopping_poi_count)
except:
self.shopping_poi_count=0
school_poi_count=kwargs['school_poi_count'] if 'school_poi_count' in keys else ''
try:
self.school_poi_count=float(school_poi_count)
except:
self.school_poi_count=0
parking_poi_count=kwargs['parking_poi_count'] if 'parking_poi_count' in keys else ''
try:
self.parking_poi_count=float(parking_poi_count)
except:
self.parking_poi_count=0
boundary_node_count=kwargs['boundary_node_count'] if 'boundary_node_count' in keys else ''
try:
self.boundary_node_count=float(boundary_node_count)
except:
self.boundary_node_count=0
total_production=kwargs['total_production'] if 'total_production' in keys else ''
try:
self.total_production=float(total_production)
except:
self.total_production=0
total_attraction=kwargs['total_attraction'] if 'total_attraction' in keys else ''
try:
self.total_attraction=float(total_attraction)
except:
self.total_attraction=0
class Network():
def __init__(self):
self.node_dict={}
self.link_dict={}
self.agent_dict={}
self.demand_dict={}
self.poi_dict={}
self.poi_trip_dict={}
self.zone_dict={}
self.number_of_node=0
self.number_of_link=0
self.number_of_agent=0
self.number_of_demand=0
self.number_of_poi=0
self.number_of_zone=0
self.number_of_poi_type=0
self.node_coords=[]
self.link_coords=[]
self.poi_coords=[]
self.range_of_zone_ids=[]
self.min_lat=-90
self.max_lat=90
self.min_lng=-180
self.max_lng=180
def get_avl_node_attrs(self):
self.node_attr_dict = {
'ctrl_type': 'int',
'activity_type': 'str',
'production': 'float',
'attraction': 'float',
}
print('%-30s%-20s' % ('attr', 'type'))
for k, v in self.node_attr_dict.items():
print('%-30s%-20s' % (k, v))
def get_avl_link_attrs(self):
self.link_attr_dict = {
'length': 'float',
'lanes': 'int',
'free_speed': 'float',
'capacity': 'float',
'link_type_name': 'str',
'allowed_uses': 'str',
}
print('%-30s%-20s' % ('attr', 'type'))
for k, v in self.link_attr_dict.items():
print('%-30s%-20s' % (k, v))
def get_avl_poi_attrs(self):
self.poi_attr_dict = {
'building': 'str',
'activity_zone_id': 'int'
}
print('%-30s%-20s' % ('attr', 'type'))
for k, v in self.poi_attr_dict.items():
print('%-30s%-20s' % (k, v))
def get_avl_range_of_zone_ids(self):
if self.number_of_zone==0:
print("zone.csv doesn't exist")
else:
print('%-20s%-20s' % ('min zone id', 'max zone id'))
print('%-20s%-20s' % (self.range_of_zone_ids[0],self.range_of_zone_ids[1])) | plot4gmns/classes.py | import sys
from shapely.wkt import loads
from shapely import geometry
class Node():
def __init__(self,kwargs,row):
keys=kwargs.keys()
node_id=kwargs['node_id'] if 'node_id' in keys else ''
if node_id:
try:
self.node_id=int(float(node_id))
except Exception as e:
print("broken at row{},".format(row),end=' ')
print(e)
sys.exit(0)
else:
print("node_id is not defined in node.csv, please check it!")
sys.exit(0)
ctrl_type=kwargs['ctrl_type'] if 'ctrl_type' in keys else ''
try:
self.ctrl_type=int(float(ctrl_type))
except:
self.ctrl_type=0
self.activity_type = kwargs['activity_type'] if 'activity_type' in keys else 'unclassified'
x_coord=kwargs['x_coord'] if 'x_coord' in keys else ''
if x_coord:
try:
self.x_coord=float(x_coord)
except Exception as e:
print('broken at row {},'.format(row),end=' ')
print(e)
sys.exit(0)
else:
print("x_coord not found in node.csv, please check it!")
sys.exit(0)
y_coord=kwargs['y_coord'] if 'y_coord' in keys else ''
if y_coord:
try:
self.y_coord=float(y_coord)
except Exception as e:
print('broken at row {},'.format(row), end=' ')
print(e)
sys.exit(0)
else:
print("y_coord not found in node.csv, please check it!")
sys.exit(0)
self.geometry=geometry.Point(self.x_coord,self.y_coord)
production=kwargs['production'] if 'production' in keys else ''
try:
self.production=float(production)
except:
self.production=''
attraction=kwargs['attraction'] if 'attraction' in keys else ''
try:
self.attraction=float(attraction)
except:
self.attraction=''
self.out_link_list=[]
self.in_link_list=[]
class Link():
def __init__(self,kwargs,row):
keys=kwargs.keys()
link_id=kwargs['link_id'] if 'link_id' in keys else ''
if link_id:
try:
self.link_id=int(float(link_id))
except Exception as e:
print('broken at row {},'.format(row),end=' ')
print(e)
sys.exit(0)
else:
self.link_id=None
from_node_id=kwargs['from_node_id'] if 'from_node_id' in keys else ''
if from_node_id:
try:
self.from_node_id=int(float(from_node_id))
except Exception as e:
print('broken at row {},'.format(row), end=' ')
print(e)
sys.exit(0)
else:
print("from_node_id not found in link.csv, please check it!")
sys.exit(0)
to_node_id=kwargs['to_node_id'] if 'to_node_id' in keys else ''
if to_node_id:
try:
self.to_node_id=int(float(to_node_id))
except Exception as e:
print('broken at row {},'.format(row), end=' ')
print(e)
sys.exit(0)
else:
print("to_node_id not found in link.csv, please check it!")
sys.exit(0)
length=kwargs['length'] if 'length' in keys else ''
try:
self.length =float(length)
except:
self.length=''
lanes=kwargs['lanes'] if 'lanes' in keys else ''
try:
self.lanes =int(float(lanes))
except:
self.lanes=''
free_speed=kwargs['free_speed'] if 'free_speed' in keys else ''
try:
self.free_speed =float(free_speed)
except:
self.free_speed=''
capacity=kwargs['capacity'] if 'capacity' in keys else ''
try:
self.capacity = float(capacity)
except:
self.capacity=''
link_type_name=kwargs['link_type_name'] if 'link_type_name' in keys else ''
if link_type_name:
self.link_type_name=link_type_name
else:
self.link_type_name='unclassified'
link_geo=kwargs['geometry'] if 'geometry' in keys else ''
try:
self.geometry = loads(link_geo)
except:
self.geometry=''
if 'allowed_uses' in keys:
allowed_uses=kwargs['allowed_uses']
if allowed_uses:
if ',' in allowed_uses:
self.allowed_uses=[allowed_use_.lstrip() for allowed_use_ in allowed_uses.split(',')]
elif ';' in allowed_uses:
self.allowed_uses = [allowed_use_.lstrip() for allowed_use_ in allowed_uses.split(';')]
else:
self.allowed_uses = [allowed_uses]
else:
self.allowed_uses = ['unclassified']
else:
self.allowed_uses=['unclassified']
class Agent():
def __init__(self,kwargs,row):
keys=kwargs.keys()
agent_id=kwargs['agent_id'] if 'agent_id' in keys else ''
if agent_id:
self.agent_id=int(float(agent_id))
else:
self.agent_id=None
node_sequence=kwargs['node_sequence'] if 'node_sequence' in keys else ''
try:
self.node_sequence=[int(float(id)) for id in node_sequence.split(';')[:-1]]
except:
self.node_sequence=''
agent_geo=kwargs['geometry'] if 'geometry' in keys else ''
if ',)' in agent_geo:
agent_geo=agent_geo.replace(',)',')')
try:
self.geometry=loads(agent_geo)
except:
self.geometry=''
print("warning: can't load geometry at row{}".format(row))
class Demand():
def __init__(self,kwargs,row):
keys=kwargs.keys()
o_zone_id=kwargs['o_zone_id'] if 'o_zone_id' in keys else ''
if o_zone_id:
try:
self.o_zone_id=int(float(o_zone_id))
except Exception as e:
print('broken at row {},'.format(row),end=' ')
print(e)
sys.exit(0)
else:
print("o_zone_id is not defined in demand.csv, please check it!")
sys.exit(0)
d_zone_id=kwargs['d_zone_id'] if 'd_zone_id' in keys else ''
if d_zone_id:
try:
self.d_zone_id=int(float(d_zone_id))
except Exception as e:
print('broken at row {},'.format(row),end=' ')
print(e)
sys.exit(0)
else:
print("d_zone_id is not defined in demand.csv, please check it!")
sys.exit(0)
vol=kwargs['volume'] if 'volume' in keys else ''
if vol:
try:
self.volume=float(vol)
except Exception as e:
print('broken at row {},'.format(row),end=' ')
print(e)
sys.exit(0)
else:
print("volume is not defined in demand.csv, please check it!")
sys.exit(0)
demand_geo=kwargs['geometry'] if 'geometry' in keys else ''
if demand_geo:
try:
self.geometry=loads(demand_geo)
except Exception as e:
print('broken at row {},'.format(row),end=' ')
print(e)
sys.exit(0)
else:
print("geometry is not defined in demand.csv, please check it!")
sys.exit(0)
class POI():
def __init__(self,kwargs,row):
keys=kwargs.keys()
poi_id=kwargs['poi_id'] if 'poi_id' in keys else ''
try:
self.poi_id=int(float(poi_id))
except :
self.poi_id=None
self.name=kwargs['name'] if 'name' in keys else ''
building=kwargs['building'] if 'building' in keys else ''
if building:
self.building=building.split(';')
else:
self.building = ['unclassified']
poi_geo=kwargs['geometry'] if 'geometry' in keys else ''
if poi_geo:
try:
self.geometry=loads(poi_geo)
except Exception as e:
print('broken at row {}'.format(row),end=' ')
print(e)
sys.exit(0)
else:
print("geometry is not defined in poi.csv, please check it!")
sys.exit(0)
centroid=kwargs['centroid'] if 'centroid' in keys else ''
if centroid:
try:
self.centroid=loads(centroid)
except:
self.centroid = self.geometry.centroid
else:
self.centroid=self.geometry.centroid
activity_zone_id=kwargs['activity_zone_id'] if 'activity_zone_id' in keys else ''
try:
self.activity_zone_id=int(float(activity_zone_id))
except:
self.activity_zone_id=''
class POITrip():
def __init__(self,kwargs,row):
keys=kwargs.keys()
building = kwargs['building'] if 'building' in keys else ''
if building:
self.building = building.split(';')
else:
self.building =['unclassified']
production_rate1=kwargs['production_rate1'] if 'production_rate1' in keys else ''
if production_rate1:
try:
self.production_rate1=float(production_rate1)
except:
self.production_rate1=0
else:
print("production_rate1 is not defined in poi_trip_rate.csv, please check it!")
sys.exit(0)
attraction_rate1=kwargs['attraction_rate1'] if 'attraction_rate1' in keys else ''
if attraction_rate1:
try:
self.attraction_rate1=float(attraction_rate1)
except:
self.attraction_rate1=0
else:
print("attraction_rate1 is not defined in poi_trip_rate.csv, please check it!")
sys.exit(0)
class Zone():
def __init__(self,kwargs,row):
keys=kwargs.keys()
self.name=kwargs['name'] if 'name' in keys else ' '
activity_zone_id=kwargs['activity_zone_id'] if 'activity_zone_id' in keys else ''
if activity_zone_id:
try:
self.activity_zone_id=int(float(activity_zone_id))
except Exception as e:
print("broken at row{}".format(row),end=' ')
print(e)
sys.exit(0)
else:
print("activity_zone_id is not defined in zone.csv, please check it!")
sys.exit(0)
centroid_x=kwargs['centroid_x'] if 'centroid_x' in keys else ''
if centroid_x:
try:
self.centroid_x=float(centroid_x)
except Exception as e:
print("broken at row{}".format(row),end=' ')
print(e)
sys.exit(0)
else:
print("centroid_x is not defined in zone.csv, please check it!")
sys.exit(0)
centroid_y=kwargs['centroid_y'] if 'centroid_y' in keys else ''
if centroid_y:
try:
self.centroid_y=float(centroid_y)
except Exception as e:
print("broken at row{}".format(row), end=' ')
print(e)
sys.exit(0)
else:
print("centroid_y is not defined in zone.csv, please check it!")
sys.exit(0)
zone_geo=kwargs['geometry'] if 'geometry' in keys else ''
if zone_geo:
try:
self.geometry=loads(zone_geo)
except Exception as e:
print("broken at row{}".format(row), end=' ')
print(e)
sys.exit(0)
else:
self.geometry=''
centroid=kwargs['centroid'] if 'centroid' in keys else ''
if centroid:
try:
self.centroid=loads(centroid)
except Exception as e:
print("broken at row{}".format(row), end=' ')
print(e)
sys.exit(0)
else:
print("centroid is not defined in zone.csv, please check it!")
sys.exit(0)
total_poi_count=kwargs['total_poi_count'] if 'total_poi_count' in keys else ''
try:
self.total_poi_count=float(total_poi_count)
except:
self.total_poi_count=0
residential_poi_count=kwargs['residential_poi_count'] if 'residential_poi_count' in keys else ''
try:
self.residential_poi_count=float(residential_poi_count)
except:
self.residential_poi_count=0
office_poi_count=kwargs['office_poi_count'] if 'office_poi_count' in keys else ''
try:
self.office_poi_count=float(office_poi_count)
except:
self.office_poi_count=0
shopping_poi_count=kwargs['shopping_poi_count'] if 'shopping_poi_count' in keys else ''
try:
self.shopping_poi_count=float(shopping_poi_count)
except:
self.shopping_poi_count=0
school_poi_count=kwargs['school_poi_count'] if 'school_poi_count' in keys else ''
try:
self.school_poi_count=float(school_poi_count)
except:
self.school_poi_count=0
parking_poi_count=kwargs['parking_poi_count'] if 'parking_poi_count' in keys else ''
try:
self.parking_poi_count=float(parking_poi_count)
except:
self.parking_poi_count=0
boundary_node_count=kwargs['boundary_node_count'] if 'boundary_node_count' in keys else ''
try:
self.boundary_node_count=float(boundary_node_count)
except:
self.boundary_node_count=0
total_production=kwargs['total_production'] if 'total_production' in keys else ''
try:
self.total_production=float(total_production)
except:
self.total_production=0
total_attraction=kwargs['total_attraction'] if 'total_attraction' in keys else ''
try:
self.total_attraction=float(total_attraction)
except:
self.total_attraction=0
class Network():
def __init__(self):
self.node_dict={}
self.link_dict={}
self.agent_dict={}
self.demand_dict={}
self.poi_dict={}
self.poi_trip_dict={}
self.zone_dict={}
self.number_of_node=0
self.number_of_link=0
self.number_of_agent=0
self.number_of_demand=0
self.number_of_poi=0
self.number_of_zone=0
self.number_of_poi_type=0
self.node_coords=[]
self.link_coords=[]
self.poi_coords=[]
self.range_of_zone_ids=[]
self.min_lat=-90
self.max_lat=90
self.min_lng=-180
self.max_lng=180
def get_avl_node_attrs(self):
self.node_attr_dict = {
'ctrl_type': 'int',
'activity_type': 'str',
'production': 'float',
'attraction': 'float',
}
print('%-30s%-20s' % ('attr', 'type'))
for k, v in self.node_attr_dict.items():
print('%-30s%-20s' % (k, v))
def get_avl_link_attrs(self):
self.link_attr_dict = {
'length': 'float',
'lanes': 'int',
'free_speed': 'float',
'capacity': 'float',
'link_type_name': 'str',
'allowed_uses': 'str',
}
print('%-30s%-20s' % ('attr', 'type'))
for k, v in self.link_attr_dict.items():
print('%-30s%-20s' % (k, v))
def get_avl_poi_attrs(self):
self.poi_attr_dict = {
'building': 'str',
'activity_zone_id': 'int'
}
print('%-30s%-20s' % ('attr', 'type'))
for k, v in self.poi_attr_dict.items():
print('%-30s%-20s' % (k, v))
def get_avl_range_of_zone_ids(self):
if self.number_of_zone==0:
print("zone.csv doesn't exist")
else:
print('%-20s%-20s' % ('min zone id', 'max zone id'))
print('%-20s%-20s' % (self.range_of_zone_ids[0],self.range_of_zone_ids[1])) | 0.087621 | 0.118207 |
import os
import inspect
import flask
from werkzeug.utils import import_string
from gru.plugins.base import BasePlugin
from gru.plugins.base.page import PagePlugin
from gru.plugins.base.hostwidget import HostWidgetPlugin
from gru.plugins.base.auth import AuthenticationBackend
from gru.plugins.base.inventory import InventoryProvider
class PluginMetadata(object):
def __init__(self, module_path, plugin_class_name, plugin_class):
self.module_path = module_path
self.plugin_class_name = plugin_class_name
self.plugin_class = plugin_class
def __repr__(self):
return 'PluginMetadata(module_path="{}", plugin_class_name="{}", plugin_class={})'.format(
self.module_path,
self.plugin_class_name,
self.plugin_class
)
def subclasses(class_a, class_b):
"""
Checks whether class_a is a subclass of class_b. Will also make sure it's not the same class
:param class_a: Class to check
:param class_b: Reference class to check against
:return: True if A is an actual subclass of B
"""
return issubclass(class_a, class_b) and class_a != class_b
class PluginRegistry(object):
def __init__(self, app, settings):
self.app = app
self.settings = settings
# Host widgets are a flat list of plugin instances
self.host_widgets = []
# Page plugins are rendered on their own seperate page
self.pages = []
# Holds the instantiated authentication backend that was chosen
self.authentication_backend = None
# Holds the inventory provider that was chosen
self.inventory_provider = None
def register(self, module_path):
"""
Discover plugins under a given path
i.e. "contrib.monitoring_overview".
This looks for sub classes of the following:
- PagePlugin
- HostWidgetPlugin
- AuthenticationBackend
- InventoryBackend
:param module_path: a string representing the python module to load
"""
views = []
plugin_instances = []
module_ref = import_string(module_path)
for attr_name in dir(module_ref):
attr = getattr(module_ref, attr_name)
try:
if not subclasses(attr, BasePlugin):
continue # Not a plugin
except TypeError:
continue # Not a class
plugin_path = '.'.join([module_path, attr_name])
plugin = PluginMetadata(
module_path,
attr_name,
attr)
if subclasses(plugin.plugin_class, PagePlugin):
views.append(plugin)
elif subclasses(plugin.plugin_class, HostWidgetPlugin):
views.append(plugin)
elif subclasses(plugin.plugin_class, AuthenticationBackend) and \
plugin_path == self.settings.get('authentication.backend'):
instance = self._get_instance(plugin)
self.authentication_backend = instance
plugin_instances.append(instance)
elif subclasses(plugin.plugin_class, InventoryProvider) and \
plugin_path == self.settings.get('inventory.provider'):
instance = self._get_instance(plugin)
self.inventory_provider = instance
plugin_instances.append(instance)
# Create a blueprint and register it for all views in the plugin
if views:
view_instances = self._register_blueprint(module_path, module_ref, views)
for view in view_instances:
if isinstance(view, HostWidgetPlugin):
self.host_widgets.append(view)
elif isinstance(view, PagePlugin):
self.pages.append(view)
plugin_instances += view_instances
# Run all startup hooks
for plugin_instance in plugin_instances:
plugin_instance.on_load()
def _get_instance(self, plugin):
"""
Returns an initialized instance of a plugin class, passing in the required arguments
:param plugin: plugin class to initialize
:return: instance of plugin
"""
kwargs = {'app': self.app}
return plugin.plugin_class(**kwargs)
def _register_blueprint(self, module_path, module_ref, view_classes):
module_name = os.path.basename(module_path).lower()
module_blueprint = self._setup_blueprint(module_name, module_ref)
instances = []
for view_class in view_classes:
view_instance = self._get_instance(view_class)
module_blueprint.add_url_rule(
view_instance.path,
view_instance.get_name(),
view_instance._request_handler,
methods=view_instance.allowed_methods())
instances.append(view_instance)
self.app.register_blueprint(
module_blueprint,
url_prefix='/plugins/{}'.format(module_name))
return instances
def _setup_blueprint(self, module_name, module_ref):
root_dir = os.path.dirname(inspect.getabsfile(module_ref))
kwargs = {}
# Register templates
template_folder = os.path.join(root_dir, 'templates')
if os.path.isdir(template_folder):
kwargs.update({'template_folder': 'templates'})
# Register static files, if any
static_folder = os.path.join(root_dir, 'static')
if os.path.isdir(static_folder):
kwargs.update({
'static_folder': 'static',
'static_url_path': '/static/plugins/{}'.format(module_name)
})
# Generate blueprint
blueprint = flask.Blueprint(module_name, module_name, **kwargs)
# Add the plugin_static() helper function to the
# template context
@blueprint.context_processor
def static_processor():
def plugin_static(filename):
return flask.url_for('{}.static'.format(module_name), filename=filename)
return dict(plugin_static=plugin_static)
# Blueprint done, return it
return blueprint | gru/plugins/loader/registry.py | import os
import inspect
import flask
from werkzeug.utils import import_string
from gru.plugins.base import BasePlugin
from gru.plugins.base.page import PagePlugin
from gru.plugins.base.hostwidget import HostWidgetPlugin
from gru.plugins.base.auth import AuthenticationBackend
from gru.plugins.base.inventory import InventoryProvider
class PluginMetadata(object):
def __init__(self, module_path, plugin_class_name, plugin_class):
self.module_path = module_path
self.plugin_class_name = plugin_class_name
self.plugin_class = plugin_class
def __repr__(self):
return 'PluginMetadata(module_path="{}", plugin_class_name="{}", plugin_class={})'.format(
self.module_path,
self.plugin_class_name,
self.plugin_class
)
def subclasses(class_a, class_b):
"""
Checks whether class_a is a subclass of class_b. Will also make sure it's not the same class
:param class_a: Class to check
:param class_b: Reference class to check against
:return: True if A is an actual subclass of B
"""
return issubclass(class_a, class_b) and class_a != class_b
class PluginRegistry(object):
def __init__(self, app, settings):
self.app = app
self.settings = settings
# Host widgets are a flat list of plugin instances
self.host_widgets = []
# Page plugins are rendered on their own seperate page
self.pages = []
# Holds the instantiated authentication backend that was chosen
self.authentication_backend = None
# Holds the inventory provider that was chosen
self.inventory_provider = None
def register(self, module_path):
"""
Discover plugins under a given path
i.e. "contrib.monitoring_overview".
This looks for sub classes of the following:
- PagePlugin
- HostWidgetPlugin
- AuthenticationBackend
- InventoryBackend
:param module_path: a string representing the python module to load
"""
views = []
plugin_instances = []
module_ref = import_string(module_path)
for attr_name in dir(module_ref):
attr = getattr(module_ref, attr_name)
try:
if not subclasses(attr, BasePlugin):
continue # Not a plugin
except TypeError:
continue # Not a class
plugin_path = '.'.join([module_path, attr_name])
plugin = PluginMetadata(
module_path,
attr_name,
attr)
if subclasses(plugin.plugin_class, PagePlugin):
views.append(plugin)
elif subclasses(plugin.plugin_class, HostWidgetPlugin):
views.append(plugin)
elif subclasses(plugin.plugin_class, AuthenticationBackend) and \
plugin_path == self.settings.get('authentication.backend'):
instance = self._get_instance(plugin)
self.authentication_backend = instance
plugin_instances.append(instance)
elif subclasses(plugin.plugin_class, InventoryProvider) and \
plugin_path == self.settings.get('inventory.provider'):
instance = self._get_instance(plugin)
self.inventory_provider = instance
plugin_instances.append(instance)
# Create a blueprint and register it for all views in the plugin
if views:
view_instances = self._register_blueprint(module_path, module_ref, views)
for view in view_instances:
if isinstance(view, HostWidgetPlugin):
self.host_widgets.append(view)
elif isinstance(view, PagePlugin):
self.pages.append(view)
plugin_instances += view_instances
# Run all startup hooks
for plugin_instance in plugin_instances:
plugin_instance.on_load()
def _get_instance(self, plugin):
"""
Returns an initialized instance of a plugin class, passing in the required arguments
:param plugin: plugin class to initialize
:return: instance of plugin
"""
kwargs = {'app': self.app}
return plugin.plugin_class(**kwargs)
def _register_blueprint(self, module_path, module_ref, view_classes):
module_name = os.path.basename(module_path).lower()
module_blueprint = self._setup_blueprint(module_name, module_ref)
instances = []
for view_class in view_classes:
view_instance = self._get_instance(view_class)
module_blueprint.add_url_rule(
view_instance.path,
view_instance.get_name(),
view_instance._request_handler,
methods=view_instance.allowed_methods())
instances.append(view_instance)
self.app.register_blueprint(
module_blueprint,
url_prefix='/plugins/{}'.format(module_name))
return instances
def _setup_blueprint(self, module_name, module_ref):
root_dir = os.path.dirname(inspect.getabsfile(module_ref))
kwargs = {}
# Register templates
template_folder = os.path.join(root_dir, 'templates')
if os.path.isdir(template_folder):
kwargs.update({'template_folder': 'templates'})
# Register static files, if any
static_folder = os.path.join(root_dir, 'static')
if os.path.isdir(static_folder):
kwargs.update({
'static_folder': 'static',
'static_url_path': '/static/plugins/{}'.format(module_name)
})
# Generate blueprint
blueprint = flask.Blueprint(module_name, module_name, **kwargs)
# Add the plugin_static() helper function to the
# template context
@blueprint.context_processor
def static_processor():
def plugin_static(filename):
return flask.url_for('{}.static'.format(module_name), filename=filename)
return dict(plugin_static=plugin_static)
# Blueprint done, return it
return blueprint | 0.541409 | 0.069985 |
import json
import shutil
from argparse import ArgumentParser, Namespace
from pathlib import Path
from random import random
from typing import Dict, Tuple, List
from bidict import bidict
from tqdm import tqdm
TColor = Tuple[float, ...]
def rand_color() -> TColor:
return tuple([int(255 * random()) for _ in range(3)])
def rand_colors(n_colors: int) -> List[TColor]:
colors = [rand_color() for _ in range(n_colors)]
return colors
CLS_SELECT = {
'book': 1, 'vase': 2, 'scissors': 3,
'teddy bear': 4, 'hair drier': 5,
'toothbrush': 6, 'potted plant': 7,
'apple': 8, 'orange': 9, 'carrot': 10,
'banana': 11, 'sandwich': 12, 'broccoli': 13,
'hot dog': 14, 'pizza': 15, 'cake': 16,
'donut': 17, 'wine glass': 18, 'bottle': 19,
'cup': 20, 'fork': 21, 'spoon': 22,
'knife': 23, 'bowl': 24, 'sports ball': 25
}
COLORS = rand_colors(len(CLS_SELECT))
N_COCO_CLASSES = len(CLS_SELECT) + 1
def get_coco_mapping(annot_file: Path) -> Dict[str, int]:
with open(annot_file, 'r') as j:
data = json.load(j)
mapping = {cls['name']: cls['id'] for cls in data['categories']}
return mapping
def pad_image_id(image_id: str) -> str:
name_len = 12 # standart for coco_example
n_pad = name_len - len(image_id)
name = '0' * n_pad + image_id
return name
def convert_and_save(annot_file: Path,
coco_im_dir: Path,
save_dir: Path
) -> None:
coco_mapping = bidict(get_coco_mapping(annot_file))
with open(annot_file, 'r') as j:
data = json.load(j)
for n, obj in enumerate(tqdm(data['annotations'])):
name = coco_mapping.inv[obj['category_id']]
image_id = obj['image_id']
image_id_pad = pad_image_id(str(image_id))
if name in CLS_SELECT.keys():
with open(save_dir / 'annot' / f'{image_id_pad}.jsonl', 'w') as out:
annot = {
'bbox': [int(x) for x in obj['bbox']],
'label': CLS_SELECT[name], 'image_id': image_id,
'area': round(obj['area']), 'is_crowd': obj['iscrowd'],
}
out.write(json.dumps(annot) + '\n')
im_name = f'{image_id_pad}.jpg'
if not (save_dir / im_name).is_file():
shutil.copy(src=coco_im_dir / im_name,
dst=save_dir / 'images' / im_name)
def main(args: Namespace) -> None:
im_dir = args.save_dir / 'images'
annot_dir = args.save_dir / 'annots'
im_dir.mkdir(exist_ok=True)
annot_dir.mkdir(exist_ok=True)
for fold in ['train2017', 'val2017']:
im_dir = args.coco_dir / fold
annot_file = args.coco_dir / 'annotations' / f'instances_{fold}.json'
if im_dir.is_dir() and annot_file.is_file():
print(fold)
convert_and_save(annot_file=annot_file, coco_im_dir=im_dir,
save_dir=args.save_dir)
n_im = len(list((args.save_dir / 'images').glob('*.jpg')))
n_annot = len(list(annot_dir.glob('*.jsonl')))
assert n_im == n_annot, f'num im: {n_im}, num annot: {n_annot}'
def get_parser() -> ArgumentParser:
parser = ArgumentParser()
parser.add_argument('--coco_dir', type=Path)
parser.add_argument('--save_dir', type=Path)
return parser
if __name__ == '__main__':
main(args=get_parser().parse_args()) | detection/coco_subset.py | import json
import shutil
from argparse import ArgumentParser, Namespace
from pathlib import Path
from random import random
from typing import Dict, Tuple, List
from bidict import bidict
from tqdm import tqdm
TColor = Tuple[float, ...]
def rand_color() -> TColor:
return tuple([int(255 * random()) for _ in range(3)])
def rand_colors(n_colors: int) -> List[TColor]:
colors = [rand_color() for _ in range(n_colors)]
return colors
CLS_SELECT = {
'book': 1, 'vase': 2, 'scissors': 3,
'teddy bear': 4, 'hair drier': 5,
'toothbrush': 6, 'potted plant': 7,
'apple': 8, 'orange': 9, 'carrot': 10,
'banana': 11, 'sandwich': 12, 'broccoli': 13,
'hot dog': 14, 'pizza': 15, 'cake': 16,
'donut': 17, 'wine glass': 18, 'bottle': 19,
'cup': 20, 'fork': 21, 'spoon': 22,
'knife': 23, 'bowl': 24, 'sports ball': 25
}
COLORS = rand_colors(len(CLS_SELECT))
N_COCO_CLASSES = len(CLS_SELECT) + 1
def get_coco_mapping(annot_file: Path) -> Dict[str, int]:
with open(annot_file, 'r') as j:
data = json.load(j)
mapping = {cls['name']: cls['id'] for cls in data['categories']}
return mapping
def pad_image_id(image_id: str) -> str:
name_len = 12 # standart for coco_example
n_pad = name_len - len(image_id)
name = '0' * n_pad + image_id
return name
def convert_and_save(annot_file: Path,
coco_im_dir: Path,
save_dir: Path
) -> None:
coco_mapping = bidict(get_coco_mapping(annot_file))
with open(annot_file, 'r') as j:
data = json.load(j)
for n, obj in enumerate(tqdm(data['annotations'])):
name = coco_mapping.inv[obj['category_id']]
image_id = obj['image_id']
image_id_pad = pad_image_id(str(image_id))
if name in CLS_SELECT.keys():
with open(save_dir / 'annot' / f'{image_id_pad}.jsonl', 'w') as out:
annot = {
'bbox': [int(x) for x in obj['bbox']],
'label': CLS_SELECT[name], 'image_id': image_id,
'area': round(obj['area']), 'is_crowd': obj['iscrowd'],
}
out.write(json.dumps(annot) + '\n')
im_name = f'{image_id_pad}.jpg'
if not (save_dir / im_name).is_file():
shutil.copy(src=coco_im_dir / im_name,
dst=save_dir / 'images' / im_name)
def main(args: Namespace) -> None:
im_dir = args.save_dir / 'images'
annot_dir = args.save_dir / 'annots'
im_dir.mkdir(exist_ok=True)
annot_dir.mkdir(exist_ok=True)
for fold in ['train2017', 'val2017']:
im_dir = args.coco_dir / fold
annot_file = args.coco_dir / 'annotations' / f'instances_{fold}.json'
if im_dir.is_dir() and annot_file.is_file():
print(fold)
convert_and_save(annot_file=annot_file, coco_im_dir=im_dir,
save_dir=args.save_dir)
n_im = len(list((args.save_dir / 'images').glob('*.jpg')))
n_annot = len(list(annot_dir.glob('*.jsonl')))
assert n_im == n_annot, f'num im: {n_im}, num annot: {n_annot}'
def get_parser() -> ArgumentParser:
parser = ArgumentParser()
parser.add_argument('--coco_dir', type=Path)
parser.add_argument('--save_dir', type=Path)
return parser
if __name__ == '__main__':
main(args=get_parser().parse_args()) | 0.577853 | 0.206654 |
import stat
import ast
import os
import configparser
from .constants import *
from .exceptions import OAuthSSHError
class ConfigError(OAuthSSHError):
"""Base exception for all Config exceptions"""
def _check_permissions(path):
if os.path.exists(path):
if not os.path.isfile(path):
raise ConfigError(path + " is not a regular file")
if not os.access(path, os.R_OK | os.W_OK):
raise ConfigError(path + " has bad permissions, should be 0600")
# Don't allow Group/Other permissions
st = os.stat(path)
if st.st_mode & (stat.S_IRWXG | stat.S_IRWXO):
raise ConfigError(path + " is too permissive, should be 0600")
else:
dir = os.path.dirname(path)
if not os.path.isdir(dir):
raise ConfigError(
path + " is not a valid path: " + " parent is not a directory"
)
if not os.access(dir, os.X_OK | os.W_OK):
raise ConfigError(
"Can not create the config file in "
+ dir
+ "parent directory permissions are too "
+ "restrictive"
)
def _load_file(path):
_check_permissions(path)
config = configparser.ConfigParser()
config.optionxform = str # case-sensitive keys
try:
config.read(path)
except configparser.Error as e:
raise ConfigError("Error parsing " + path + ": " + e.message)
return config
def _save_file(path, config):
_check_permissions(path)
try:
mask = os.umask(0o077)
fd = os.open(path, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0o600)
except OSError as e:
raise ConfigError("Could not open " + path + ": " + e.strerror)
finally:
mask = os.umask(0o077)
with os.fdopen(fd, "w") as f:
config.write(f)
def load_section(section):
config = _load_file(CONFIG_FILE)
if not config.has_section(section):
return {}
return dict(config.items(section))
def save_section(section, values):
config = _load_file(CONFIG_FILE)
if config.has_section(section):
config.remove_section(section)
config.add_section(section)
for k, v in values.items():
config.set(section, k, str(v))
_save_file(CONFIG_FILE, config)
def delete_section(section):
config = _load_file(CONFIG_FILE)
if not config.has_section(section):
return
config.remove_section(section)
_save_file(CONFIG_FILE, config)
def load_object(section, cls):
values = load_section(section)
if cls.__name__ in values:
return cls(**ast.literal_eval(values[cls.__name__]))
return None
def save_object(section, inst):
values = load_section(section)
values[inst.__class__.__name__] = inst
save_section(section, values)
def delete_object(section, cls):
values = load_section(section)
if cls.__name__ in values:
del values[cls.__name__]
save_section(section, values) | client/oauth_ssh/config.py | import stat
import ast
import os
import configparser
from .constants import *
from .exceptions import OAuthSSHError
class ConfigError(OAuthSSHError):
"""Base exception for all Config exceptions"""
def _check_permissions(path):
if os.path.exists(path):
if not os.path.isfile(path):
raise ConfigError(path + " is not a regular file")
if not os.access(path, os.R_OK | os.W_OK):
raise ConfigError(path + " has bad permissions, should be 0600")
# Don't allow Group/Other permissions
st = os.stat(path)
if st.st_mode & (stat.S_IRWXG | stat.S_IRWXO):
raise ConfigError(path + " is too permissive, should be 0600")
else:
dir = os.path.dirname(path)
if not os.path.isdir(dir):
raise ConfigError(
path + " is not a valid path: " + " parent is not a directory"
)
if not os.access(dir, os.X_OK | os.W_OK):
raise ConfigError(
"Can not create the config file in "
+ dir
+ "parent directory permissions are too "
+ "restrictive"
)
def _load_file(path):
_check_permissions(path)
config = configparser.ConfigParser()
config.optionxform = str # case-sensitive keys
try:
config.read(path)
except configparser.Error as e:
raise ConfigError("Error parsing " + path + ": " + e.message)
return config
def _save_file(path, config):
_check_permissions(path)
try:
mask = os.umask(0o077)
fd = os.open(path, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0o600)
except OSError as e:
raise ConfigError("Could not open " + path + ": " + e.strerror)
finally:
mask = os.umask(0o077)
with os.fdopen(fd, "w") as f:
config.write(f)
def load_section(section):
config = _load_file(CONFIG_FILE)
if not config.has_section(section):
return {}
return dict(config.items(section))
def save_section(section, values):
config = _load_file(CONFIG_FILE)
if config.has_section(section):
config.remove_section(section)
config.add_section(section)
for k, v in values.items():
config.set(section, k, str(v))
_save_file(CONFIG_FILE, config)
def delete_section(section):
config = _load_file(CONFIG_FILE)
if not config.has_section(section):
return
config.remove_section(section)
_save_file(CONFIG_FILE, config)
def load_object(section, cls):
values = load_section(section)
if cls.__name__ in values:
return cls(**ast.literal_eval(values[cls.__name__]))
return None
def save_object(section, inst):
values = load_section(section)
values[inst.__class__.__name__] = inst
save_section(section, values)
def delete_object(section, cls):
values = load_section(section)
if cls.__name__ in values:
del values[cls.__name__]
save_section(section, values) | 0.252476 | 0.075346 |
from logan.runner import run_app
from sentry import environment
import base64
import os
import pkg_resources
KEY_LENGTH = 40
CONFIG_TEMPLATE = """
import os.path
CONF_ROOT = os.path.dirname(__file__)
DATABASES = {
'default': {
# You can swap out the engine for MySQL easily by changing this value
# to ``django.db.backends.mysql`` or to PostgreSQL with
# ``django.db.backends.postgresql_psycopg2``
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(CONF_ROOT, 'sentry.db'),
'USER': 'postgres',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
SENTRY_KEY = %(default_key)r
# Set this to false to require authentication
SENTRY_PUBLIC = True
# You should configure the absolute URI to Sentry. It will attempt to guess it if you don't
# but proxies may interfere with this.
# SENTRY_URL_PREFIX = 'http://sentry.example.com' # No trailing slash!
SENTRY_WEB_HOST = '0.0.0.0'
SENTRY_WEB_PORT = 9000
SENTRY_WEB_OPTIONS = {
'workers': 3, # the number of gunicorn workers
# 'worker_class': 'gevent',
}
# Mail server configuration
# For more information check Django's documentation:
# https://docs.djangoproject.com/en/1.3/topics/email/?from=olddocs#e-mail-backends
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'localhost'
EMAIL_HOST_PASSWORD = ''
EMAIL_HOST_USER = ''
EMAIL_PORT = 25
EMAIL_USE_TLS = False
# http://twitter.com/apps/new
# It's important that input a callback URL, even if its useless. We have no idea why, consult Twitter.
TWITTER_CONSUMER_KEY = ''
TWITTER_CONSUMER_SECRET = ''
# http://developers.facebook.com/setup/
FACEBOOK_APP_ID = ''
FACEBOOK_API_SECRET = ''
# http://code.google.com/apis/accounts/docs/OAuth2.html#Registering
GOOGLE_OAUTH2_CLIENT_ID = ''
GOOGLE_OAUTH2_CLIENT_SECRET = ''
# https://github.com/settings/applications/new
GITHUB_APP_ID = ''
GITHUB_API_SECRET = ''
# https://trello.com/1/appKey/generate
TRELLO_API_KEY = ''
TRELLO_API_SECRET = ''
"""
def generate_settings():
"""
This command is run when ``default_path`` doesn't exist, or ``init`` is
run and returns a string representing the default data to put into their
settings file.
"""
output = CONFIG_TEMPLATE % dict(
default_key=base64.b64encode(os.urandom(KEY_LENGTH)),
)
return output
def install_plugins(settings):
from sentry.plugins import register
# entry_points={
# 'sentry.plugins': [
# 'phabricator = sentry_phabricator.plugins:PhabricatorPlugin'
# ],
# },
installed_apps = list(settings.INSTALLED_APPS)
for ep in pkg_resources.iter_entry_points('sentry.apps'):
try:
plugin = ep.load()
except Exception:
import sys
import traceback
print >> sys.stderr, "Failed to load app %r:\n%s" % (ep.name, traceback.format_exc())
else:
installed_apps.append(ep.module_name)
settings.INSTALLED_APPS = tuple(installed_apps)
for ep in pkg_resources.iter_entry_points('sentry.plugins'):
try:
plugin = ep.load()
except Exception:
import sys
import traceback
print >> sys.stderr, "Failed to load plugin %r:\n%s" % (ep.name, traceback.format_exc())
else:
register(plugin)
def initialize_app(config):
from django.utils import timezone
environment['config'] = config.get('config_path')
environment['start_date'] = timezone.now()
install_plugins(config['settings'])
def main():
run_app(
project='sentry',
default_config_path='~/.sentry/sentry.conf.py',
default_settings='sentry.conf.server',
settings_initializer=generate_settings,
settings_envvar='SENTRY_CONF',
initializer=initialize_app,
)
if __name__ == '__main__':
main() | src/sentry/utils/runner.py | from logan.runner import run_app
from sentry import environment
import base64
import os
import pkg_resources
KEY_LENGTH = 40
CONFIG_TEMPLATE = """
import os.path
CONF_ROOT = os.path.dirname(__file__)
DATABASES = {
'default': {
# You can swap out the engine for MySQL easily by changing this value
# to ``django.db.backends.mysql`` or to PostgreSQL with
# ``django.db.backends.postgresql_psycopg2``
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(CONF_ROOT, 'sentry.db'),
'USER': 'postgres',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
SENTRY_KEY = %(default_key)r
# Set this to false to require authentication
SENTRY_PUBLIC = True
# You should configure the absolute URI to Sentry. It will attempt to guess it if you don't
# but proxies may interfere with this.
# SENTRY_URL_PREFIX = 'http://sentry.example.com' # No trailing slash!
SENTRY_WEB_HOST = '0.0.0.0'
SENTRY_WEB_PORT = 9000
SENTRY_WEB_OPTIONS = {
'workers': 3, # the number of gunicorn workers
# 'worker_class': 'gevent',
}
# Mail server configuration
# For more information check Django's documentation:
# https://docs.djangoproject.com/en/1.3/topics/email/?from=olddocs#e-mail-backends
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'localhost'
EMAIL_HOST_PASSWORD = ''
EMAIL_HOST_USER = ''
EMAIL_PORT = 25
EMAIL_USE_TLS = False
# http://twitter.com/apps/new
# It's important that input a callback URL, even if its useless. We have no idea why, consult Twitter.
TWITTER_CONSUMER_KEY = ''
TWITTER_CONSUMER_SECRET = ''
# http://developers.facebook.com/setup/
FACEBOOK_APP_ID = ''
FACEBOOK_API_SECRET = ''
# http://code.google.com/apis/accounts/docs/OAuth2.html#Registering
GOOGLE_OAUTH2_CLIENT_ID = ''
GOOGLE_OAUTH2_CLIENT_SECRET = ''
# https://github.com/settings/applications/new
GITHUB_APP_ID = ''
GITHUB_API_SECRET = ''
# https://trello.com/1/appKey/generate
TRELLO_API_KEY = ''
TRELLO_API_SECRET = ''
"""
def generate_settings():
"""
This command is run when ``default_path`` doesn't exist, or ``init`` is
run and returns a string representing the default data to put into their
settings file.
"""
output = CONFIG_TEMPLATE % dict(
default_key=base64.b64encode(os.urandom(KEY_LENGTH)),
)
return output
def install_plugins(settings):
from sentry.plugins import register
# entry_points={
# 'sentry.plugins': [
# 'phabricator = sentry_phabricator.plugins:PhabricatorPlugin'
# ],
# },
installed_apps = list(settings.INSTALLED_APPS)
for ep in pkg_resources.iter_entry_points('sentry.apps'):
try:
plugin = ep.load()
except Exception:
import sys
import traceback
print >> sys.stderr, "Failed to load app %r:\n%s" % (ep.name, traceback.format_exc())
else:
installed_apps.append(ep.module_name)
settings.INSTALLED_APPS = tuple(installed_apps)
for ep in pkg_resources.iter_entry_points('sentry.plugins'):
try:
plugin = ep.load()
except Exception:
import sys
import traceback
print >> sys.stderr, "Failed to load plugin %r:\n%s" % (ep.name, traceback.format_exc())
else:
register(plugin)
def initialize_app(config):
from django.utils import timezone
environment['config'] = config.get('config_path')
environment['start_date'] = timezone.now()
install_plugins(config['settings'])
def main():
run_app(
project='sentry',
default_config_path='~/.sentry/sentry.conf.py',
default_settings='sentry.conf.server',
settings_initializer=generate_settings,
settings_envvar='SENTRY_CONF',
initializer=initialize_app,
)
if __name__ == '__main__':
main() | 0.37605 | 0.044953 |
import bpy
from bpy import context
import numpy as np
from bpy_extras.object_utils import world_to_camera_view
from collections import defaultdict
import bmesh
import json
Body = ["DEF-nose",
"DEF-neck",
"DEF-deltoid.R",
"DEF-elbow_fan.R",
"DEF-palm_index.R",
"DEF-deltoid.L",
"DEF-elbow_fan.L",
"elbow.L",
"elbow.R",
"DEF-palm_index.L",
"DEF-palm_middle.L",
"DEF-palm_middle.R",
"DEF-forearm.01.L",
"DEF-forearm.01.R",
"DEF-gluteus.R",
"DEF-knee_fan.R",
"DEF-foot.R",
"DEF-gluteus.L",
"DEF-knee_fan.L",
"DEF-foot.L",
"DEF-ear.R",
"DEF-ear.L", ]
Figure_skating_dress = [
"DEF-hips",
]
Low_poly = [
"DEF-eye.R",
"DEF-eye.L",
]
Ice_skates = ["DEF-nose",
"DEF-toe.L",
"DEF-foot.L",
"DEF-toe.R",
"DEF-foot.R"]
vertex_group_names = ['Body', 'Figure_skating_dress', 'Low_poly', 'Ice_skates']
vertext_group_obj = [Body, Figure_skating_dress, Low_poly, Ice_skates]
def create_empties(obj_name, group_names):
ob = bpy.data.objects[f'Figureskater:{obj_name}']
me = ob.data
scene = bpy.context.scene
camera = bpy.data.objects['Camera']
scene.update()
print(scene.frame_current)
keypoints = []
for name in ob.vertex_groups.keys():
if name in group_names:
bpy.ops.object.empty_add(location=(0, 0, 0))
mt = context.object
mt.name = f"empty_{ob.name}_{name}"
cl = mt.constraints.new('COPY_LOCATION')
cl.target = ob
cl.subtarget = name
bpy.context.scene.update()
mt.matrix_world = mt.matrix_world.copy()
mt.constraints.clear()
co_2d = world_to_camera_view(
bpy.context.scene, bpy.context.scene.camera, mt.location)
# get pixel coords
render_scale = scene.render.resolution_percentage / 100
render_size = (
int(scene.render.resolution_x * render_scale),
-int(scene.render.resolution_y * render_scale),
)
keypoints.append(
[co_2d.x * render_size[0], co_2d.y * render_size[1]])
bpy.ops.object.select_all(action='DESELECT')
mt.select = True
bpy.ops.object.delete()
print(keypoints[0])
return keypoints
def delete_empties():
obj = bpy.data.objects
for ob in obj:
if 'empty' in ob.name and len(ob.name) > len('empty'):
print(ob.name)
bpy.ops.object.select_all(action='DESELECT')
ob.select = True
bpy.ops.object.delete()
delete_empties()
allFrames = []
for i in range(context.scene.frame_start, context.scene.frame_end):
print(i)
bpy.context.scene.frame_current = i
bpy.context.scene.frame_set(i)
print(bpy.context.scene.frame_current)
bpy.context.scene.update()
frame = []
for j, name in enumerate(vertex_group_names):
print(j, name)
frame += create_empties(name, vertext_group_obj[j])
print('*'*100)
print('frame', i, frame[0])
allFrames.append(frame)
with open('/home/nadin-katrin/awesome.skating.ai/keypoint_data/keypointsvv4.json', 'w', encoding='utf-8') as f:
json.dump(allFrames, f, ensure_ascii=False, indent=4) | skatingAI/blender/program_scripts/kp_from_avatar.py | import bpy
from bpy import context
import numpy as np
from bpy_extras.object_utils import world_to_camera_view
from collections import defaultdict
import bmesh
import json
Body = ["DEF-nose",
"DEF-neck",
"DEF-deltoid.R",
"DEF-elbow_fan.R",
"DEF-palm_index.R",
"DEF-deltoid.L",
"DEF-elbow_fan.L",
"elbow.L",
"elbow.R",
"DEF-palm_index.L",
"DEF-palm_middle.L",
"DEF-palm_middle.R",
"DEF-forearm.01.L",
"DEF-forearm.01.R",
"DEF-gluteus.R",
"DEF-knee_fan.R",
"DEF-foot.R",
"DEF-gluteus.L",
"DEF-knee_fan.L",
"DEF-foot.L",
"DEF-ear.R",
"DEF-ear.L", ]
Figure_skating_dress = [
"DEF-hips",
]
Low_poly = [
"DEF-eye.R",
"DEF-eye.L",
]
Ice_skates = ["DEF-nose",
"DEF-toe.L",
"DEF-foot.L",
"DEF-toe.R",
"DEF-foot.R"]
vertex_group_names = ['Body', 'Figure_skating_dress', 'Low_poly', 'Ice_skates']
vertext_group_obj = [Body, Figure_skating_dress, Low_poly, Ice_skates]
def create_empties(obj_name, group_names):
ob = bpy.data.objects[f'Figureskater:{obj_name}']
me = ob.data
scene = bpy.context.scene
camera = bpy.data.objects['Camera']
scene.update()
print(scene.frame_current)
keypoints = []
for name in ob.vertex_groups.keys():
if name in group_names:
bpy.ops.object.empty_add(location=(0, 0, 0))
mt = context.object
mt.name = f"empty_{ob.name}_{name}"
cl = mt.constraints.new('COPY_LOCATION')
cl.target = ob
cl.subtarget = name
bpy.context.scene.update()
mt.matrix_world = mt.matrix_world.copy()
mt.constraints.clear()
co_2d = world_to_camera_view(
bpy.context.scene, bpy.context.scene.camera, mt.location)
# get pixel coords
render_scale = scene.render.resolution_percentage / 100
render_size = (
int(scene.render.resolution_x * render_scale),
-int(scene.render.resolution_y * render_scale),
)
keypoints.append(
[co_2d.x * render_size[0], co_2d.y * render_size[1]])
bpy.ops.object.select_all(action='DESELECT')
mt.select = True
bpy.ops.object.delete()
print(keypoints[0])
return keypoints
def delete_empties():
obj = bpy.data.objects
for ob in obj:
if 'empty' in ob.name and len(ob.name) > len('empty'):
print(ob.name)
bpy.ops.object.select_all(action='DESELECT')
ob.select = True
bpy.ops.object.delete()
delete_empties()
allFrames = []
for i in range(context.scene.frame_start, context.scene.frame_end):
print(i)
bpy.context.scene.frame_current = i
bpy.context.scene.frame_set(i)
print(bpy.context.scene.frame_current)
bpy.context.scene.update()
frame = []
for j, name in enumerate(vertex_group_names):
print(j, name)
frame += create_empties(name, vertext_group_obj[j])
print('*'*100)
print('frame', i, frame[0])
allFrames.append(frame)
with open('/home/nadin-katrin/awesome.skating.ai/keypoint_data/keypointsvv4.json', 'w', encoding='utf-8') as f:
json.dump(allFrames, f, ensure_ascii=False, indent=4) | 0.413714 | 0.238916 |
from __future__ import unicode_literals
from functools import partial
from lunr.pipeline import Pipeline
# map from ISO-639-1 codes to SnowballStemmer.languages
SUPPORTED_LANGUAGES = {
'ar': 'arabic',
'da': 'danish',
'nl': 'dutch',
'en': 'english',
'fi': 'finnish',
'fr': 'french',
'de': 'german',
'hu': 'hungarian',
'it': 'italian',
'no': 'norwegian',
'pt': 'portuguese',
'ro': 'romanian',
'ru': 'russian',
'es': 'spanish',
'sv': 'swedish'
}
try: # pragma: no cover
from nltk.stem.snowball import SnowballStemmer
LANGUAGE_SUPPORT = True
except ImportError: # pragma: no cover
LANGUAGE_SUPPORT = False
def get_language_stemmer(language):
"""Retrieves the SnowballStemmer for a particular language.
Args:
language (str): ISO-639-1 code of the language.
"""
return SnowballStemmer(SUPPORTED_LANGUAGES[language])
def nltk_stemmer(stemmer, token, i=None, tokens=None):
"""Wrapper around a NLTK SnowballStemmer, which includes stop words for
each language.
Args:
stemmer (SnowballStemmer): Stemmer instance that performs the stemming.
token (lunr.Token): The token to stem.
i (int): The index of the token in a set.
tokens (list): A list of tokens representing the set.
"""
def wrapped_stem(token, metadata=None):
return stemmer.stem(token)
return token.update(wrapped_stem)
def register_languages():
"""Register all supported languages to ensure compatibility."""
for language in SUPPORTED_LANGUAGES:
language_stemmer = partial(
nltk_stemmer, get_language_stemmer(language))
Pipeline.register_function(
language_stemmer, 'stemmer-{}'.format(language))
if LANGUAGE_SUPPORT: # pragma: no cover
# TODO: registering all possible stemmers feels unnecessary but it solves
# deserializing with arbitrary language functions. Ideally the schema would
# provide the language(s) for the index and we could register the stemmers
# as needed
register_languages() | venv/Lib/site-packages/lunr/stemmer_languages.py | from __future__ import unicode_literals
from functools import partial
from lunr.pipeline import Pipeline
# map from ISO-639-1 codes to SnowballStemmer.languages
SUPPORTED_LANGUAGES = {
'ar': 'arabic',
'da': 'danish',
'nl': 'dutch',
'en': 'english',
'fi': 'finnish',
'fr': 'french',
'de': 'german',
'hu': 'hungarian',
'it': 'italian',
'no': 'norwegian',
'pt': 'portuguese',
'ro': 'romanian',
'ru': 'russian',
'es': 'spanish',
'sv': 'swedish'
}
try: # pragma: no cover
from nltk.stem.snowball import SnowballStemmer
LANGUAGE_SUPPORT = True
except ImportError: # pragma: no cover
LANGUAGE_SUPPORT = False
def get_language_stemmer(language):
"""Retrieves the SnowballStemmer for a particular language.
Args:
language (str): ISO-639-1 code of the language.
"""
return SnowballStemmer(SUPPORTED_LANGUAGES[language])
def nltk_stemmer(stemmer, token, i=None, tokens=None):
"""Wrapper around a NLTK SnowballStemmer, which includes stop words for
each language.
Args:
stemmer (SnowballStemmer): Stemmer instance that performs the stemming.
token (lunr.Token): The token to stem.
i (int): The index of the token in a set.
tokens (list): A list of tokens representing the set.
"""
def wrapped_stem(token, metadata=None):
return stemmer.stem(token)
return token.update(wrapped_stem)
def register_languages():
"""Register all supported languages to ensure compatibility."""
for language in SUPPORTED_LANGUAGES:
language_stemmer = partial(
nltk_stemmer, get_language_stemmer(language))
Pipeline.register_function(
language_stemmer, 'stemmer-{}'.format(language))
if LANGUAGE_SUPPORT: # pragma: no cover
# TODO: registering all possible stemmers feels unnecessary but it solves
# deserializing with arbitrary language functions. Ideally the schema would
# provide the language(s) for the index and we could register the stemmers
# as needed
register_languages() | 0.65368 | 0.148325 |
__author__ = '<EMAIL>'
import datetime
import logging
import pytz
from django import test
from services.common import serialization, helpers as db_tools
class TestSerialization(test.TestCase):
def setUp(self):
self.__verbose_testing = False
if not self.__verbose_testing:
logging.getLogger('configuration').setLevel(level=logging.CRITICAL)
self.__gs_1_id = 'gs-castrelos'
self.__gs_1_ch_1_id = 'chan-cas-1'
self.__band = db_tools.create_band()
self.__user_profile = db_tools.create_user_profile()
self.__gs = db_tools.create_gs(
user_profile=self.__user_profile, identifier=self.__gs_1_id,
)
self.__gs_1_ch_1 = db_tools.gs_add_channel(
self.__gs, self.__band, self.__gs_1_ch_1_id
)
def test_serialize_iso8601_date(self):
"""UNIT test: services.common.serialization.serialize_iso8601_date
Validates the function that transforms a Datetime object into a
ISO-8601 string with Time and TimeZone.
"""
if self.__verbose_testing:
print('>>> test_serialize_iso8601_date:')
dt = datetime.datetime.now(pytz.timezone('US/Pacific'))
if dt.tzname() == 'PDT':
birthday = dt.replace(
year=1984, month=7, day=17,
hour=0, minute=0, second=0, microsecond=0
)
expected = '1984-07-17T00:00:00-07:00'
else:
birthday = dt.replace(
year=1984, month=7, day=17,
hour=0, minute=0, second=0, microsecond=0
)
expected = '1984-07-17T00:00:00-08:00'
actual = serialization.serialize_iso8601_date(birthday)
if self.__verbose_testing:
print('e = ' + str(expected))
print('a = ' + str(actual))
self.assertEqual(actual, expected, 'Wrong ISO-8601 format.')
self.__verbose_testing = False
def test_deserialize_iso8601_date(self):
"""UNIT test: services.common.serialization.deserialize_iso8601_date
Validates the deserializaiton of an ISO-8601 string into a
datetime.datetime object.
"""
if self.__verbose_testing:
print('>>> test_deserialize_iso8601_date:')
if datetime.datetime.now(pytz.timezone('US/Pacific')).tzname() == 'PDT':
in_param = '1984-07-17T00:00:00-07:00'
expected = datetime.datetime.now(
pytz.timezone('US/Pacific')
).replace(
year=1984, month=7, day=17,
hour=0, minute=0, second=0, microsecond=0
)
else:
in_param = '1984-07-17T00:00:00-08:00'
expected = datetime.datetime.now(
pytz.timezone('US/Pacific')
).replace(
year=1984, month=7, day=17,
hour=0, minute=0, second=0, microsecond=0
)
actual = serialization.deserialize_iso8601_date(in_param)
if self.__verbose_testing:
print('e = ' + str(expected))
print('a = ' + str(actual))
self.assertEqual(actual, expected, 'Wrong ISO-8601 format.')
self.__verbose_testing = False
def test_serialize_iso8601_time(self):
"""UNIT test: services.common.serialization.serialize_iso8601_time
Validates the function that transforms a Datetime object into a
ISO-8601 string with Date and TimeZone.
"""
if self.__verbose_testing:
print('\n>>> test_serialize_iso8601_time:')
dt = datetime.datetime.now(pytz.timezone('US/Pacific'))
midnight = dt.replace(hour=0, minute=0, second=0, microsecond=0)
if midnight.tzname() == 'PDT':
expected = '00:00:00-07:00'
else:
expected = '00:00:00-08:00'
actual = serialization.serialize_iso8601_time(midnight)
if self.__verbose_testing:
print('e = ' + str(expected))
print('a = ' + str(actual))
self.assertEqual(actual, expected, 'Wrong ISO-8601 format.')
def test_deserialize_iso8601_time(self):
"""UNIT test: services.common.serialization.deserialize_iso8601_time
Validates the deserializaiton of an ISO-8601 string into a
datetime.datetime object.
"""
if self.__verbose_testing:
print('\n>>> test_deserialize_iso8601_time:')
if datetime.datetime.now(pytz.timezone('US/Pacific')).tzname() == 'PDT':
in_param = '01:00:00-07:00'
expected = '08:00:00'
else:
in_param = '01:00:00-08:00'
expected = '09:00:00'
actual = serialization.deserialize_iso8601_time(in_param)
if self.__verbose_testing:
print('e = ' + str(expected))
print('a = ' + str(actual))
self.assertEqual(
actual.isoformat(), expected, 'Wrong ISO-8601 format.'
)
self.__verbose_testing = False | services/common/tests/test_serialization.py | __author__ = '<EMAIL>'
import datetime
import logging
import pytz
from django import test
from services.common import serialization, helpers as db_tools
class TestSerialization(test.TestCase):
def setUp(self):
self.__verbose_testing = False
if not self.__verbose_testing:
logging.getLogger('configuration').setLevel(level=logging.CRITICAL)
self.__gs_1_id = 'gs-castrelos'
self.__gs_1_ch_1_id = 'chan-cas-1'
self.__band = db_tools.create_band()
self.__user_profile = db_tools.create_user_profile()
self.__gs = db_tools.create_gs(
user_profile=self.__user_profile, identifier=self.__gs_1_id,
)
self.__gs_1_ch_1 = db_tools.gs_add_channel(
self.__gs, self.__band, self.__gs_1_ch_1_id
)
def test_serialize_iso8601_date(self):
"""UNIT test: services.common.serialization.serialize_iso8601_date
Validates the function that transforms a Datetime object into a
ISO-8601 string with Time and TimeZone.
"""
if self.__verbose_testing:
print('>>> test_serialize_iso8601_date:')
dt = datetime.datetime.now(pytz.timezone('US/Pacific'))
if dt.tzname() == 'PDT':
birthday = dt.replace(
year=1984, month=7, day=17,
hour=0, minute=0, second=0, microsecond=0
)
expected = '1984-07-17T00:00:00-07:00'
else:
birthday = dt.replace(
year=1984, month=7, day=17,
hour=0, minute=0, second=0, microsecond=0
)
expected = '1984-07-17T00:00:00-08:00'
actual = serialization.serialize_iso8601_date(birthday)
if self.__verbose_testing:
print('e = ' + str(expected))
print('a = ' + str(actual))
self.assertEqual(actual, expected, 'Wrong ISO-8601 format.')
self.__verbose_testing = False
def test_deserialize_iso8601_date(self):
"""UNIT test: services.common.serialization.deserialize_iso8601_date
Validates the deserializaiton of an ISO-8601 string into a
datetime.datetime object.
"""
if self.__verbose_testing:
print('>>> test_deserialize_iso8601_date:')
if datetime.datetime.now(pytz.timezone('US/Pacific')).tzname() == 'PDT':
in_param = '1984-07-17T00:00:00-07:00'
expected = datetime.datetime.now(
pytz.timezone('US/Pacific')
).replace(
year=1984, month=7, day=17,
hour=0, minute=0, second=0, microsecond=0
)
else:
in_param = '1984-07-17T00:00:00-08:00'
expected = datetime.datetime.now(
pytz.timezone('US/Pacific')
).replace(
year=1984, month=7, day=17,
hour=0, minute=0, second=0, microsecond=0
)
actual = serialization.deserialize_iso8601_date(in_param)
if self.__verbose_testing:
print('e = ' + str(expected))
print('a = ' + str(actual))
self.assertEqual(actual, expected, 'Wrong ISO-8601 format.')
self.__verbose_testing = False
def test_serialize_iso8601_time(self):
"""UNIT test: services.common.serialization.serialize_iso8601_time
Validates the function that transforms a Datetime object into a
ISO-8601 string with Date and TimeZone.
"""
if self.__verbose_testing:
print('\n>>> test_serialize_iso8601_time:')
dt = datetime.datetime.now(pytz.timezone('US/Pacific'))
midnight = dt.replace(hour=0, minute=0, second=0, microsecond=0)
if midnight.tzname() == 'PDT':
expected = '00:00:00-07:00'
else:
expected = '00:00:00-08:00'
actual = serialization.serialize_iso8601_time(midnight)
if self.__verbose_testing:
print('e = ' + str(expected))
print('a = ' + str(actual))
self.assertEqual(actual, expected, 'Wrong ISO-8601 format.')
def test_deserialize_iso8601_time(self):
"""UNIT test: services.common.serialization.deserialize_iso8601_time
Validates the deserializaiton of an ISO-8601 string into a
datetime.datetime object.
"""
if self.__verbose_testing:
print('\n>>> test_deserialize_iso8601_time:')
if datetime.datetime.now(pytz.timezone('US/Pacific')).tzname() == 'PDT':
in_param = '01:00:00-07:00'
expected = '08:00:00'
else:
in_param = '01:00:00-08:00'
expected = '09:00:00'
actual = serialization.deserialize_iso8601_time(in_param)
if self.__verbose_testing:
print('e = ' + str(expected))
print('a = ' + str(actual))
self.assertEqual(
actual.isoformat(), expected, 'Wrong ISO-8601 format.'
)
self.__verbose_testing = False | 0.655557 | 0.263872 |
import jax.numpy as jnp
from jax.random import normal, split
from jax import lax, tree_map, vmap, value_and_grad
import optax
from vb_utils import clip
def compute_natural_gradients(b, c, grads):
'''
Computes natural gradients which is equivalent to (I^-1 x grad).
Parameters
----------
b : Array
The vector factor loading vector component of the variational covariance matrix
c : Array
The diagonal matrix component of the variational covariance matrix
grad : Tuple
It is triple containing the gradients of mean, b, c respectively.
Returns
-------
Tuple : Includes inverse fisher information times gradient for each gradient given.
'''
b_square, c_square = b ** 2, c ** 2
grad_mu, grad_b, grad_c = grads
v_1 = c_square - 2 * b_square * (1 / c_square) ** 2
v_2 = b_square - (1 / c_square * c)
kappa_1 = jnp.sum(b_square / c_square)
kappa_2 = (1 / (1 + jnp.sum(v_2 ** 2 / v_1))) / 2.
nat_grad_mu = (grad_mu.T @ b) * b + c_square * grad_mu
coef = (1 + kappa_1) / kappa_2
nat_grad_b = coef * (grad_b.T @ b) * b + c_square * grad_b
nat_grad_c = (grad_c / v_1) / 2.
tmp = (v_2 / v_1)
nat_grad_c += kappa_2 * (tmp.T @ grad_c) * tmp
return nat_grad_mu, nat_grad_b, nat_grad_c
def grad_log_q_function(b, c, theta, mu):
x = theta - mu
d = b / c ** 2
grad_log_q = -x / c ** 2 + (d.T @ x) / (1 + (d.T @ b)) * d
return grad_log_q
def vb_gauss_lowrank(key, logjoint_fn, data, nfeatures,
initial_mean=None, initial_std=0.1,
initial_scale=1., nsamples=20,
niters=200, optimizer=optax.adafactor(1e-3),
threshold=2500, window_size=None):
'''
Parameters
----------
key : jax.random.PRNGKey
logjoint_fn : Callable
Log joint function
data : Tuple
The data to which the model is fitted, specified as a table or matrix.
nfeatures :
Number of features
initial_mean :
initial_std :
Standard deviation of normal distribution for initialization
initial_scale : float
The constant factor to scale the initial values.
num_samples : int
Monte Carlo samples to estimate the lower bound
niters : int
Maximum number of iterations
optimizer : optax.optimizers
threshold : float
Gradient clipping threshold
window_size : int
Rolling window size to smooth the lower bound.
Default value of window size is None, which indicates that lower bounds won't be smoothed.
Returns
-------
Tuple: Consists of
1. mu : Estimation of variational mean
2. b : The vector factor loading vector component of the variational covariance matrix
3. c : The diagonal matrix component of the variational covariance matrix
Array : Estimation of the lower bound over iterations
'''
if initial_mean is None:
mu_key, key = split(key, 2)
mu = initial_std * normal(mu_key, shape=(nfeatures, 1))
else:
mu = initial_mean
b_key, key = split(key, 2)
b = initial_std * normal(b_key, shape=(nfeatures, 1))
c = initial_scale * jnp.ones((nfeatures, 1))
# Variational parameters vector
variational_params = (mu, b, c)
# Initial state of the optimizer
opt_state = optimizer.init(variational_params)
def sample_fn(variational_params, U_normal):
mu, b, c = variational_params
# Parameters in Normal distribution
epsilon1 = U_normal[0]
epsilon2 = U_normal[1:].reshape((-1, 1))
theta = mu + b * epsilon1 + c * epsilon2
h_theta, grad_h_theta = value_and_grad(logjoint_fn)(theta, data)
# Gradient of log variational distribution
grad_log_q = grad_log_q_function(b, c, theta, mu)
# Gradient of h(theta) and lower bound
grad_theta = grad_h_theta - grad_log_q
return grad_theta, epsilon1 * grad_theta, epsilon2 * grad_theta, h_theta
def iter_fn(all_params, key):
# Main VB iteration
variational_params, opt_state = all_params
mu, b, c = variational_params
samples = normal(key, shape=(nsamples, nfeatures + 1))
*grad_lb_iter, lb_first_term = vmap(sample_fn, in_axes=(None, 0))(variational_params, samples)
# Estimation of lowerbound
logdet = jnp.log(jnp.linalg.det(1 + (b / c ** 2).T @ b)) + jnp.sum(jnp.log(c ** 2))
# Mean of log-q -> mean(log q(theta))
lb_log_q = -0.5 * nfeatures * jnp.log(2 * jnp.pi) - 0.5 * logdet - nfeatures / 2
lower_bound = jnp.mean(lb_first_term) - lb_log_q
# Gradient of log variational distribution
grad_lb = tree_map(lambda x: x.mean(axis=0), grad_lb_iter)
grads = compute_natural_gradients(b, c, grad_lb)
# Gradient clipping
grads = clip(grads, threshold=threshold)
updates, opt_state = optimizer.update(grads, opt_state, variational_params)
variational_params = optax.apply_updates(variational_params, updates)
return (variational_params, opt_state), (variational_params, lower_bound)
keys = split(key, niters)
(best_params, _), (variational_params, lower_bounds) = lax.scan(iter_fn, (variational_params, opt_state), keys)
if window_size is not None:
def simple_moving_average(cur_sum, i):
diff = (lower_bounds[i] - lower_bounds[i - window_size]) / window_size
cur_sum += diff
return cur_sum, cur_sum
indices = jnp.arange(window_size, niters)
cur_sum = jnp.sum(lower_bounds[:window_size]) / window_size
_, lower_bounds = lax.scan(simple_moving_average, cur_sum, indices)
lower_bounds = jnp.append(jnp.array([cur_sum]), lower_bounds)
i = jnp.argmax(lower_bounds) + window_size - 1
best_params = tree_map(lambda x: x[i], variational_params)
return best_params, lower_bounds | scripts/vb_gauss_lowrank.py | import jax.numpy as jnp
from jax.random import normal, split
from jax import lax, tree_map, vmap, value_and_grad
import optax
from vb_utils import clip
def compute_natural_gradients(b, c, grads):
'''
Computes natural gradients which is equivalent to (I^-1 x grad).
Parameters
----------
b : Array
The vector factor loading vector component of the variational covariance matrix
c : Array
The diagonal matrix component of the variational covariance matrix
grad : Tuple
It is triple containing the gradients of mean, b, c respectively.
Returns
-------
Tuple : Includes inverse fisher information times gradient for each gradient given.
'''
b_square, c_square = b ** 2, c ** 2
grad_mu, grad_b, grad_c = grads
v_1 = c_square - 2 * b_square * (1 / c_square) ** 2
v_2 = b_square - (1 / c_square * c)
kappa_1 = jnp.sum(b_square / c_square)
kappa_2 = (1 / (1 + jnp.sum(v_2 ** 2 / v_1))) / 2.
nat_grad_mu = (grad_mu.T @ b) * b + c_square * grad_mu
coef = (1 + kappa_1) / kappa_2
nat_grad_b = coef * (grad_b.T @ b) * b + c_square * grad_b
nat_grad_c = (grad_c / v_1) / 2.
tmp = (v_2 / v_1)
nat_grad_c += kappa_2 * (tmp.T @ grad_c) * tmp
return nat_grad_mu, nat_grad_b, nat_grad_c
def grad_log_q_function(b, c, theta, mu):
x = theta - mu
d = b / c ** 2
grad_log_q = -x / c ** 2 + (d.T @ x) / (1 + (d.T @ b)) * d
return grad_log_q
def vb_gauss_lowrank(key, logjoint_fn, data, nfeatures,
initial_mean=None, initial_std=0.1,
initial_scale=1., nsamples=20,
niters=200, optimizer=optax.adafactor(1e-3),
threshold=2500, window_size=None):
'''
Parameters
----------
key : jax.random.PRNGKey
logjoint_fn : Callable
Log joint function
data : Tuple
The data to which the model is fitted, specified as a table or matrix.
nfeatures :
Number of features
initial_mean :
initial_std :
Standard deviation of normal distribution for initialization
initial_scale : float
The constant factor to scale the initial values.
num_samples : int
Monte Carlo samples to estimate the lower bound
niters : int
Maximum number of iterations
optimizer : optax.optimizers
threshold : float
Gradient clipping threshold
window_size : int
Rolling window size to smooth the lower bound.
Default value of window size is None, which indicates that lower bounds won't be smoothed.
Returns
-------
Tuple: Consists of
1. mu : Estimation of variational mean
2. b : The vector factor loading vector component of the variational covariance matrix
3. c : The diagonal matrix component of the variational covariance matrix
Array : Estimation of the lower bound over iterations
'''
if initial_mean is None:
mu_key, key = split(key, 2)
mu = initial_std * normal(mu_key, shape=(nfeatures, 1))
else:
mu = initial_mean
b_key, key = split(key, 2)
b = initial_std * normal(b_key, shape=(nfeatures, 1))
c = initial_scale * jnp.ones((nfeatures, 1))
# Variational parameters vector
variational_params = (mu, b, c)
# Initial state of the optimizer
opt_state = optimizer.init(variational_params)
def sample_fn(variational_params, U_normal):
mu, b, c = variational_params
# Parameters in Normal distribution
epsilon1 = U_normal[0]
epsilon2 = U_normal[1:].reshape((-1, 1))
theta = mu + b * epsilon1 + c * epsilon2
h_theta, grad_h_theta = value_and_grad(logjoint_fn)(theta, data)
# Gradient of log variational distribution
grad_log_q = grad_log_q_function(b, c, theta, mu)
# Gradient of h(theta) and lower bound
grad_theta = grad_h_theta - grad_log_q
return grad_theta, epsilon1 * grad_theta, epsilon2 * grad_theta, h_theta
def iter_fn(all_params, key):
# Main VB iteration
variational_params, opt_state = all_params
mu, b, c = variational_params
samples = normal(key, shape=(nsamples, nfeatures + 1))
*grad_lb_iter, lb_first_term = vmap(sample_fn, in_axes=(None, 0))(variational_params, samples)
# Estimation of lowerbound
logdet = jnp.log(jnp.linalg.det(1 + (b / c ** 2).T @ b)) + jnp.sum(jnp.log(c ** 2))
# Mean of log-q -> mean(log q(theta))
lb_log_q = -0.5 * nfeatures * jnp.log(2 * jnp.pi) - 0.5 * logdet - nfeatures / 2
lower_bound = jnp.mean(lb_first_term) - lb_log_q
# Gradient of log variational distribution
grad_lb = tree_map(lambda x: x.mean(axis=0), grad_lb_iter)
grads = compute_natural_gradients(b, c, grad_lb)
# Gradient clipping
grads = clip(grads, threshold=threshold)
updates, opt_state = optimizer.update(grads, opt_state, variational_params)
variational_params = optax.apply_updates(variational_params, updates)
return (variational_params, opt_state), (variational_params, lower_bound)
keys = split(key, niters)
(best_params, _), (variational_params, lower_bounds) = lax.scan(iter_fn, (variational_params, opt_state), keys)
if window_size is not None:
def simple_moving_average(cur_sum, i):
diff = (lower_bounds[i] - lower_bounds[i - window_size]) / window_size
cur_sum += diff
return cur_sum, cur_sum
indices = jnp.arange(window_size, niters)
cur_sum = jnp.sum(lower_bounds[:window_size]) / window_size
_, lower_bounds = lax.scan(simple_moving_average, cur_sum, indices)
lower_bounds = jnp.append(jnp.array([cur_sum]), lower_bounds)
i = jnp.argmax(lower_bounds) + window_size - 1
best_params = tree_map(lambda x: x[i], variational_params)
return best_params, lower_bounds | 0.860662 | 0.774839 |
from django.urls import path, re_path
from marks import views
urlpatterns = [
# Main marks pages
re_path(r'^(?P<type>unsafe|safe|unknown)/(?P<pk>[0-9]+)/$', views.MarkPage.as_view(), name='mark'),
re_path(r'^(?P<type>unsafe|safe|unknown)/association_changes/(?P<association_id>.*)/$',
views.AssociationChangesView.as_view()),
re_path(r'^(?P<type>unsafe|safe|unknown)/$', views.MarksListView.as_view(), name='list'),
# Mark form
re_path(r'^(?P<type>unsafe|safe|unknown)/(?P<pk>[0-9]+)/(?P<action>create|edit)/$',
views.MarkFormView.as_view(), name='mark_form'),
re_path(r'^(?P<type>unsafe|safe|unknown)/(?P<pk>[0-9]+)/(?P<action>create|edit)/inline/$',
views.InlineMarkForm.as_view()),
# Mark versions views
re_path(r'^(?P<type>unsafe|safe|unknown)/(?P<pk>[0-9]+)/remove_versions/$', views.RemoveVersionsView.as_view()),
re_path(r'^(?P<type>unsafe|safe|unknown)/(?P<pk>[0-9]+)/compare_versions/$', views.CompareVersionsView.as_view()),
# Download/Upload marks
re_path(r'^download/(?P<type>unsafe|safe|unknown)/(?P<pk>[0-9]+)/$',
views.DownloadMarkView.as_view(), name='download_mark'),
re_path(r'^download-preset/(?P<type>unsafe|safe|unknown)/(?P<pk>[0-9]+)/$',
views.DownloadPresetMarkView.as_view(), name='download_preset_mark'),
path('upload/', views.UploadMarksView.as_view()),
path('download-all/', views.DownloadAllMarksView.as_view(), name='download_all'),
path('upload-all/', views.UploadAllMarksView.as_view()),
# Tags
path('tags/save_tag/', views.SaveTagView.as_view()),
re_path(r'^tags/(?P<type>unsafe|safe)/$', views.TagsTreeView.as_view(), name='tags'),
re_path(r'^tags/(?P<type>unsafe|safe)/download/$', views.DownloadTagsView.as_view(), name='download_tags'),
re_path(r'^tags/(?P<type>unsafe|safe)/upload/$', views.UploadTagsView.as_view()),
re_path(r'^tags/(?P<type>unsafe|safe)/get_tag_data/$', views.TagDataView.as_view()),
re_path(r'^tags/(?P<type>unsafe|safe)/delete/(?P<pk>[0-9]+)/$', views.RemoveTagView.as_view()),
re_path(r'^(?P<type>unsafe|safe)/tags_data/$', views.MarkTagsView.as_view()),
# Action with associations
re_path(r'^association/(?P<type>unsafe|safe|unknown)/(?P<rid>[0-9]+)/(?P<mid>[0-9]+)/(?P<act>confirm|unconfirm)/$',
views.ChangeAssociationView.as_view()),
re_path(r'^association/(?P<type>unsafe|safe|unknown)/(?P<rid>[0-9]+)/(?P<mid>[0-9]+)/(?P<act>like|dislike)/$',
views.LikeAssociation.as_view()),
# Utils
path('delete/', views.DeleteMarksView.as_view()),
path('get_func_description/<int:pk>/', views.GetFuncDescription.as_view()),
path('check-unknown-mark/<int:pk>/', views.CheckUnknownMarkView.as_view()),
] | bridge/marks/urls.py |
from django.urls import path, re_path
from marks import views
urlpatterns = [
# Main marks pages
re_path(r'^(?P<type>unsafe|safe|unknown)/(?P<pk>[0-9]+)/$', views.MarkPage.as_view(), name='mark'),
re_path(r'^(?P<type>unsafe|safe|unknown)/association_changes/(?P<association_id>.*)/$',
views.AssociationChangesView.as_view()),
re_path(r'^(?P<type>unsafe|safe|unknown)/$', views.MarksListView.as_view(), name='list'),
# Mark form
re_path(r'^(?P<type>unsafe|safe|unknown)/(?P<pk>[0-9]+)/(?P<action>create|edit)/$',
views.MarkFormView.as_view(), name='mark_form'),
re_path(r'^(?P<type>unsafe|safe|unknown)/(?P<pk>[0-9]+)/(?P<action>create|edit)/inline/$',
views.InlineMarkForm.as_view()),
# Mark versions views
re_path(r'^(?P<type>unsafe|safe|unknown)/(?P<pk>[0-9]+)/remove_versions/$', views.RemoveVersionsView.as_view()),
re_path(r'^(?P<type>unsafe|safe|unknown)/(?P<pk>[0-9]+)/compare_versions/$', views.CompareVersionsView.as_view()),
# Download/Upload marks
re_path(r'^download/(?P<type>unsafe|safe|unknown)/(?P<pk>[0-9]+)/$',
views.DownloadMarkView.as_view(), name='download_mark'),
re_path(r'^download-preset/(?P<type>unsafe|safe|unknown)/(?P<pk>[0-9]+)/$',
views.DownloadPresetMarkView.as_view(), name='download_preset_mark'),
path('upload/', views.UploadMarksView.as_view()),
path('download-all/', views.DownloadAllMarksView.as_view(), name='download_all'),
path('upload-all/', views.UploadAllMarksView.as_view()),
# Tags
path('tags/save_tag/', views.SaveTagView.as_view()),
re_path(r'^tags/(?P<type>unsafe|safe)/$', views.TagsTreeView.as_view(), name='tags'),
re_path(r'^tags/(?P<type>unsafe|safe)/download/$', views.DownloadTagsView.as_view(), name='download_tags'),
re_path(r'^tags/(?P<type>unsafe|safe)/upload/$', views.UploadTagsView.as_view()),
re_path(r'^tags/(?P<type>unsafe|safe)/get_tag_data/$', views.TagDataView.as_view()),
re_path(r'^tags/(?P<type>unsafe|safe)/delete/(?P<pk>[0-9]+)/$', views.RemoveTagView.as_view()),
re_path(r'^(?P<type>unsafe|safe)/tags_data/$', views.MarkTagsView.as_view()),
# Action with associations
re_path(r'^association/(?P<type>unsafe|safe|unknown)/(?P<rid>[0-9]+)/(?P<mid>[0-9]+)/(?P<act>confirm|unconfirm)/$',
views.ChangeAssociationView.as_view()),
re_path(r'^association/(?P<type>unsafe|safe|unknown)/(?P<rid>[0-9]+)/(?P<mid>[0-9]+)/(?P<act>like|dislike)/$',
views.LikeAssociation.as_view()),
# Utils
path('delete/', views.DeleteMarksView.as_view()),
path('get_func_description/<int:pk>/', views.GetFuncDescription.as_view()),
path('check-unknown-mark/<int:pk>/', views.CheckUnknownMarkView.as_view()),
] | 0.397588 | 0.146942 |
import json
from datetime import timedelta, datetime
from airflow import DAG
from airflow.models import Variable
from airflow.contrib.operators.bigquery_operator import BigQueryOperator
from airflow.contrib.operators.bigquery_check_operator import BigQueryCheckOperator
# Config variables
dag_config = Variable.get("bigquery_github_trends_variables", deserialize_json=True)
BQ_CONN_ID = dag_config["bq_conn_id"]
BQ_PROJECT = dag_config["bq_project"]
BQ_DATASET = dag_config["bq_dataset"]
default_args = {
'owner': 'airflow',
'depends_on_past': True,
'start_date': datetime(2020, 12, 1),
'end_date': datetime(2020, 12, 5),
'email': ['<EMAIL>'],
'email_on_failure': True,
'email_on_retry': False,
'retries': 2,
'retry_delay': timedelta(minutes=5),
}
# Set Schedule: Run pipeline once a day.
# Use cron to define exact time. Eg. 8:15am would be "15 08 * * *"
schedule_interval = "00 21 * * *"
# Define DAG: Set ID and assign default args and schedule interval
dag = DAG(
'bigquery_github_trends',
default_args=default_args,
schedule_interval=schedule_interval
)
## Task 1: check that the github archive data has a dated table created for that date
# To test this task, run this command:
# docker-compose -f docker-compose-gcloud.yml run --rm webserver airflow test bigquery_github_trends bq_check_githubarchive_day 2018-12-01
t1 = BigQueryCheckOperator(
task_id='bq_check_githubarchive_day',
sql='''
#standardSQL
SELECT
table_id
FROM
`githubarchive.day.__TABLES_SUMMARY__`
WHERE
table_id = "{{ yesterday_ds_nodash }}"
''',
use_legacy_sql=False,
bigquery_conn_id=BQ_CONN_ID,
dag=dag
)
## Task 2: check that the hacker news table contains data for that date.
t2 = BigQueryCheckOperator(
task_id='bq_check_hackernews_full',
sql='''
#standardSQL
SELECT
FORMAT_TIMESTAMP("%Y%m%d", timestamp ) AS date
FROM
`bigquery-public-data.hacker_news.full`
WHERE
type = 'story'
AND FORMAT_TIMESTAMP("%Y%m%d", timestamp ) = "{{ yesterday_ds_nodash }}"
LIMIT
1
''',
use_legacy_sql=False,
bigquery_conn_id=BQ_CONN_ID,
dag=dag
)
## Task 3: create a github daily metrics partition table
t3 = BigQueryOperator(
task_id='bq_write_to_github_daily_metrics',
sql='''
#standardSQL
SELECT
date,
repo,
SUM(IF(type='WatchEvent', 1, NULL)) AS stars,
SUM(IF(type='ForkEvent', 1, NULL)) AS forks
FROM (
SELECT
FORMAT_TIMESTAMP("%Y%m%d", created_at) AS date,
actor.id as actor_id,
repo.name as repo,
type
FROM
`githubarchive.day.{{ yesterday_ds_nodash }}`
WHERE type IN ('WatchEvent','ForkEvent')
)
GROUP BY
date,
repo
''',
destination_dataset_table='{0}.{1}.github_daily_metrics${2}'.format(
BQ_PROJECT, BQ_DATASET, '{{ yesterday_ds_nodash }}'
),
write_disposition='WRITE_TRUNCATE',
allow_large_results=True,
use_legacy_sql=False,
bigquery_conn_id=BQ_CONN_ID,
dag=dag
)
## Task 4: aggregate past github events to daily partition table
t4 = BigQueryOperator(
task_id='bq_write_to_github_agg',
sql='''
#standardSQL
SELECT
"{2}" as date,
repo,
SUM(stars) as stars_last_28_days,
SUM(IF(_PARTITIONTIME BETWEEN TIMESTAMP("{4}")
AND TIMESTAMP("{3}") ,
stars, null)) as stars_last_7_days,
SUM(IF(_PARTITIONTIME BETWEEN TIMESTAMP("{3}")
AND TIMESTAMP("{3}") ,
stars, null)) as stars_last_1_day,
SUM(forks) as forks_last_28_days,
SUM(IF(_PARTITIONTIME BETWEEN TIMESTAMP("{4}")
AND TIMESTAMP("{3}") ,
forks, null)) as forks_last_7_days,
SUM(IF(_PARTITIONTIME BETWEEN TIMESTAMP("{3}")
AND TIMESTAMP("{3}") ,
forks, null)) as forks_last_1_day
FROM
`{0}.{1}.github_daily_metrics`
WHERE _PARTITIONTIME BETWEEN TIMESTAMP("{5}")
AND TIMESTAMP("{3}")
GROUP BY
date,
repo
'''.format(BQ_PROJECT, BQ_DATASET,
"{{ yesterday_ds_nodash }}", "{{ yesterday_ds }}",
"{{ macros.ds_add(ds, -6) }}",
"{{ macros.ds_add(ds, -27) }}"
)
,
destination_dataset_table='{0}.{1}.github_agg${2}'.format(
BQ_PROJECT, BQ_DATASET, '{{ yesterday_ds_nodash }}'
),
write_disposition='WRITE_TRUNCATE',
allow_large_results=True,
use_legacy_sql=False,
bigquery_conn_id=BQ_CONN_ID,
dag=dag
)
# Task 5: aggregate hacker news data to a daily partition table
t5 = BigQueryOperator(
task_id='bq_write_to_hackernews_agg',
sql='''
#standardSQL
SELECT
FORMAT_TIMESTAMP("%Y%m%d", timestamp) AS date,
`by` AS submitter,
id as story_id,
REGEXP_EXTRACT(url, "(https?://github.com/[^/]*/[^/#?]*)") as url,
SUM(score) as score
FROM
`bigquery-public-data.hacker_news.full`
WHERE
type = 'story'
AND timestamp>'{{ yesterday_ds }}'
AND timestamp<'{{ ds }}'
AND url LIKE '%https://github.com%'
AND url NOT LIKE '%github.com/blog/%'
GROUP BY
date,
submitter,
story_id,
url
''',
destination_dataset_table='{0}.{1}.hackernews_agg${2}'.format(
BQ_PROJECT, BQ_DATASET, '{{ yesterday_ds_nodash }}'
),
write_disposition='WRITE_TRUNCATE',
allow_large_results=True,
use_legacy_sql=False,
bigquery_conn_id=BQ_CONN_ID,
dag=dag
)
# Task 6: join the aggregate tables
t6 = BigQueryOperator(
task_id='bq_write_to_hackernews_github_agg',
sql='''
#standardSQL
SELECT
a.date as date,
a.url as github_url,
b.repo as github_repo,
a.score as hn_score,
a.story_id as hn_story_id,
b.stars_last_28_days as stars_last_28_days,
b.stars_last_7_days as stars_last_7_days,
b.stars_last_1_day as stars_last_1_day,
b.forks_last_28_days as forks_last_28_days,
b.forks_last_7_days as forks_last_7_days,
b.forks_last_1_day as forks_last_1_day
FROM
(SELECT
*
FROM
`{0}.{1}.hackernews_agg`
WHERE _PARTITIONTIME BETWEEN TIMESTAMP("{2}") AND TIMESTAMP("{2}")
)as a
LEFT JOIN
(
SELECT
repo,
CONCAT('https://github.com/', repo) as url,
stars_last_28_days,
stars_last_7_days,
stars_last_1_day,
forks_last_28_days,
forks_last_7_days,
forks_last_1_day
FROM
`{0}.{1}.github_agg`
WHERE _PARTITIONTIME BETWEEN TIMESTAMP("{2}") AND TIMESTAMP("{2}")
) as b
ON a.url = b.url
'''.format(
BQ_PROJECT, BQ_DATASET, "{{ yesterday_ds }}"
),
destination_dataset_table='{0}.{1}.hackernews_github_agg${2}'.format(
BQ_PROJECT, BQ_DATASET, '{{ yesterday_ds_nodash }}'
),
write_disposition='WRITE_TRUNCATE',
allow_large_results=True,
use_legacy_sql=False,
bigquery_conn_id=BQ_CONN_ID,
dag=dag
)
# Task 7: Check if partition data is written successfully
t7 = BigQueryCheckOperator(
task_id='bq_check_hackernews_github_agg',
sql='''
#standardSQL
SELECT
COUNT(*) AS rows_in_partition
FROM `{0}.{1}.hackernews_github_agg`
WHERE _PARTITIONDATE = "{2}"
'''.format(BQ_PROJECT, BQ_DATASET, '{{ yesterday_ds }}'
),
use_legacy_sql=False,
bigquery_conn_id=BQ_CONN_ID,
dag=dag)
# Setting up Dependencies
t3.set_upstream(t1)
t4.set_upstream(t3)
t5.set_upstream(t2)
t6.set_upstream(t4)
t6.set_upstream(t5)
t7.set_upstream(t6)
# t1 >> t3
# t3 >> t4
# t2 >> t5
# t6 << [t4, t5]
# # t6 >> t7 | examples/gcloud-example/dags/bigquery_github/bigquery_github_trends.py | import json
from datetime import timedelta, datetime
from airflow import DAG
from airflow.models import Variable
from airflow.contrib.operators.bigquery_operator import BigQueryOperator
from airflow.contrib.operators.bigquery_check_operator import BigQueryCheckOperator
# Config variables
dag_config = Variable.get("bigquery_github_trends_variables", deserialize_json=True)
BQ_CONN_ID = dag_config["bq_conn_id"]
BQ_PROJECT = dag_config["bq_project"]
BQ_DATASET = dag_config["bq_dataset"]
default_args = {
'owner': 'airflow',
'depends_on_past': True,
'start_date': datetime(2020, 12, 1),
'end_date': datetime(2020, 12, 5),
'email': ['<EMAIL>'],
'email_on_failure': True,
'email_on_retry': False,
'retries': 2,
'retry_delay': timedelta(minutes=5),
}
# Set Schedule: Run pipeline once a day.
# Use cron to define exact time. Eg. 8:15am would be "15 08 * * *"
schedule_interval = "00 21 * * *"
# Define DAG: Set ID and assign default args and schedule interval
dag = DAG(
'bigquery_github_trends',
default_args=default_args,
schedule_interval=schedule_interval
)
## Task 1: check that the github archive data has a dated table created for that date
# To test this task, run this command:
# docker-compose -f docker-compose-gcloud.yml run --rm webserver airflow test bigquery_github_trends bq_check_githubarchive_day 2018-12-01
t1 = BigQueryCheckOperator(
task_id='bq_check_githubarchive_day',
sql='''
#standardSQL
SELECT
table_id
FROM
`githubarchive.day.__TABLES_SUMMARY__`
WHERE
table_id = "{{ yesterday_ds_nodash }}"
''',
use_legacy_sql=False,
bigquery_conn_id=BQ_CONN_ID,
dag=dag
)
## Task 2: check that the hacker news table contains data for that date.
t2 = BigQueryCheckOperator(
task_id='bq_check_hackernews_full',
sql='''
#standardSQL
SELECT
FORMAT_TIMESTAMP("%Y%m%d", timestamp ) AS date
FROM
`bigquery-public-data.hacker_news.full`
WHERE
type = 'story'
AND FORMAT_TIMESTAMP("%Y%m%d", timestamp ) = "{{ yesterday_ds_nodash }}"
LIMIT
1
''',
use_legacy_sql=False,
bigquery_conn_id=BQ_CONN_ID,
dag=dag
)
## Task 3: create a github daily metrics partition table
t3 = BigQueryOperator(
task_id='bq_write_to_github_daily_metrics',
sql='''
#standardSQL
SELECT
date,
repo,
SUM(IF(type='WatchEvent', 1, NULL)) AS stars,
SUM(IF(type='ForkEvent', 1, NULL)) AS forks
FROM (
SELECT
FORMAT_TIMESTAMP("%Y%m%d", created_at) AS date,
actor.id as actor_id,
repo.name as repo,
type
FROM
`githubarchive.day.{{ yesterday_ds_nodash }}`
WHERE type IN ('WatchEvent','ForkEvent')
)
GROUP BY
date,
repo
''',
destination_dataset_table='{0}.{1}.github_daily_metrics${2}'.format(
BQ_PROJECT, BQ_DATASET, '{{ yesterday_ds_nodash }}'
),
write_disposition='WRITE_TRUNCATE',
allow_large_results=True,
use_legacy_sql=False,
bigquery_conn_id=BQ_CONN_ID,
dag=dag
)
## Task 4: aggregate past github events to daily partition table
t4 = BigQueryOperator(
task_id='bq_write_to_github_agg',
sql='''
#standardSQL
SELECT
"{2}" as date,
repo,
SUM(stars) as stars_last_28_days,
SUM(IF(_PARTITIONTIME BETWEEN TIMESTAMP("{4}")
AND TIMESTAMP("{3}") ,
stars, null)) as stars_last_7_days,
SUM(IF(_PARTITIONTIME BETWEEN TIMESTAMP("{3}")
AND TIMESTAMP("{3}") ,
stars, null)) as stars_last_1_day,
SUM(forks) as forks_last_28_days,
SUM(IF(_PARTITIONTIME BETWEEN TIMESTAMP("{4}")
AND TIMESTAMP("{3}") ,
forks, null)) as forks_last_7_days,
SUM(IF(_PARTITIONTIME BETWEEN TIMESTAMP("{3}")
AND TIMESTAMP("{3}") ,
forks, null)) as forks_last_1_day
FROM
`{0}.{1}.github_daily_metrics`
WHERE _PARTITIONTIME BETWEEN TIMESTAMP("{5}")
AND TIMESTAMP("{3}")
GROUP BY
date,
repo
'''.format(BQ_PROJECT, BQ_DATASET,
"{{ yesterday_ds_nodash }}", "{{ yesterday_ds }}",
"{{ macros.ds_add(ds, -6) }}",
"{{ macros.ds_add(ds, -27) }}"
)
,
destination_dataset_table='{0}.{1}.github_agg${2}'.format(
BQ_PROJECT, BQ_DATASET, '{{ yesterday_ds_nodash }}'
),
write_disposition='WRITE_TRUNCATE',
allow_large_results=True,
use_legacy_sql=False,
bigquery_conn_id=BQ_CONN_ID,
dag=dag
)
# Task 5: aggregate hacker news data to a daily partition table
t5 = BigQueryOperator(
task_id='bq_write_to_hackernews_agg',
sql='''
#standardSQL
SELECT
FORMAT_TIMESTAMP("%Y%m%d", timestamp) AS date,
`by` AS submitter,
id as story_id,
REGEXP_EXTRACT(url, "(https?://github.com/[^/]*/[^/#?]*)") as url,
SUM(score) as score
FROM
`bigquery-public-data.hacker_news.full`
WHERE
type = 'story'
AND timestamp>'{{ yesterday_ds }}'
AND timestamp<'{{ ds }}'
AND url LIKE '%https://github.com%'
AND url NOT LIKE '%github.com/blog/%'
GROUP BY
date,
submitter,
story_id,
url
''',
destination_dataset_table='{0}.{1}.hackernews_agg${2}'.format(
BQ_PROJECT, BQ_DATASET, '{{ yesterday_ds_nodash }}'
),
write_disposition='WRITE_TRUNCATE',
allow_large_results=True,
use_legacy_sql=False,
bigquery_conn_id=BQ_CONN_ID,
dag=dag
)
# Task 6: join the aggregate tables
t6 = BigQueryOperator(
task_id='bq_write_to_hackernews_github_agg',
sql='''
#standardSQL
SELECT
a.date as date,
a.url as github_url,
b.repo as github_repo,
a.score as hn_score,
a.story_id as hn_story_id,
b.stars_last_28_days as stars_last_28_days,
b.stars_last_7_days as stars_last_7_days,
b.stars_last_1_day as stars_last_1_day,
b.forks_last_28_days as forks_last_28_days,
b.forks_last_7_days as forks_last_7_days,
b.forks_last_1_day as forks_last_1_day
FROM
(SELECT
*
FROM
`{0}.{1}.hackernews_agg`
WHERE _PARTITIONTIME BETWEEN TIMESTAMP("{2}") AND TIMESTAMP("{2}")
)as a
LEFT JOIN
(
SELECT
repo,
CONCAT('https://github.com/', repo) as url,
stars_last_28_days,
stars_last_7_days,
stars_last_1_day,
forks_last_28_days,
forks_last_7_days,
forks_last_1_day
FROM
`{0}.{1}.github_agg`
WHERE _PARTITIONTIME BETWEEN TIMESTAMP("{2}") AND TIMESTAMP("{2}")
) as b
ON a.url = b.url
'''.format(
BQ_PROJECT, BQ_DATASET, "{{ yesterday_ds }}"
),
destination_dataset_table='{0}.{1}.hackernews_github_agg${2}'.format(
BQ_PROJECT, BQ_DATASET, '{{ yesterday_ds_nodash }}'
),
write_disposition='WRITE_TRUNCATE',
allow_large_results=True,
use_legacy_sql=False,
bigquery_conn_id=BQ_CONN_ID,
dag=dag
)
# Task 7: Check if partition data is written successfully
t7 = BigQueryCheckOperator(
task_id='bq_check_hackernews_github_agg',
sql='''
#standardSQL
SELECT
COUNT(*) AS rows_in_partition
FROM `{0}.{1}.hackernews_github_agg`
WHERE _PARTITIONDATE = "{2}"
'''.format(BQ_PROJECT, BQ_DATASET, '{{ yesterday_ds }}'
),
use_legacy_sql=False,
bigquery_conn_id=BQ_CONN_ID,
dag=dag)
# Setting up Dependencies
t3.set_upstream(t1)
t4.set_upstream(t3)
t5.set_upstream(t2)
t6.set_upstream(t4)
t6.set_upstream(t5)
t7.set_upstream(t6)
# t1 >> t3
# t3 >> t4
# t2 >> t5
# t6 << [t4, t5]
# # t6 >> t7 | 0.428233 | 0.223356 |
import hashlib
from typing import Any, Optional, Union, List
from .. import rdltypes
from .. import node
def normalize(value: Any, owner_node: Optional[node.Node]=None) -> str:
"""
Flatten an RDL value into a unique string that is used for type
normalization.
"""
# Determine what type is being flattened
if isinstance(value, bool):
return normalize_boolean(value)
elif isinstance(value, int):
return normalize_scalar(value)
elif isinstance(value, str):
return normalize_string(value)
elif isinstance(value, list):
return normalize_array(value)
elif isinstance(value, (rdltypes.BuiltinEnum, rdltypes.UserEnum)):
return normalize_enum(value)
elif isinstance(value, rdltypes.UserStruct):
return normalize_struct(value)
elif isinstance(value, node.Node):
return normalize_component_ref(value, owner_node)
elif isinstance(value, rdltypes.PropertyReference):
return normalize_property_ref(value, owner_node)
elif rdltypes.is_user_enum(value):
return normalize_user_enum_type(value)
else:
# Should never get here
raise RuntimeError(value)
def normalize_scalar(value: int) -> str:
"""
5.1.1.4 - c.1:
Scalar values shall be rendered using their hexadecimal representation.
"""
return "%x" % value
def normalize_boolean(value: bool) -> str:
"""
5.1.1.4 - c.2:
Boolean values shall be rendered using either t for true or f for false.
"""
if value:
return "t"
else:
return "f"
def normalize_string(value: str) -> str:
"""
5.1.1.4 - c.3:
String values shall be rendered using the first eight characters of
their md5 (Message-Digest Algorithm) checksum.
"""
md5 = hashlib.md5(value.encode('utf-8')).hexdigest()
return md5[:8]
def normalize_enum(value: Union[rdltypes.BuiltinEnum, rdltypes.UserEnum]) -> str:
"""
5.1.1.4 - c.4:
Enum values shall be rendered using their enumerator literal.
"""
return value.name
def normalize_array(value: List[Any]) -> str:
"""
5.1.1.4 - c.5:
Arrays shall be rendered by:
1. generating the normalized values of its elements,
2. joining these elements with single underscores (_) into a single
character sequence, and
3. using the first eight characters of the md5 checksum of this
character sequence
... which can be semi-formalized as:
subsequence( md5( join( normalized_values, '_' ), 0, 8 )
"""
norm_elements = []
for element in value:
norm_elements.append(normalize(element))
norm_str = "_".join(norm_elements)
md5 = hashlib.md5(norm_str.encode('utf-8')).hexdigest()
return md5[:8]
def normalize_struct(value: rdltypes.UserStruct) -> str:
"""
5.1.1.4 - c.6:
Structs shall be rendered by:
1. generating the normalized value of each member,
2. joining each member’s name with its normalized value, separated by
a single underscore (_),
3. joining the member character sequences with single underscores,
4. using the first eight characters of the md5 checksum of this
character sequence
... which can be semi-formalized as:
member_normalization = concat( member_name, '_', normalized_member_value )
subsequence( md5( join( apply( struct_members, member_normalization ) ), 0, 8)
"""
norm_elements = []
for member_name, member_value in value._values.items():
norm_elements.append("%s_%s" % (member_name, normalize(member_value)))
norm_str = "_".join(norm_elements)
md5 = hashlib.md5(norm_str.encode('utf-8')).hexdigest()
return md5[:8]
def normalize_component_ref(value: node.Node, owner_node: node.Node) -> str:
"""
Hash of relative path from owner of the property to the target component
"""
path = value.get_rel_path(owner_node)
md5 = hashlib.md5(path.encode('utf-8')).hexdigest()
return md5[:8]
def normalize_property_ref(value: rdltypes.PropertyReference, owner_node: node.Node) -> str:
"""
Hash of relative path from owner of the property to the target component's
property
"""
path = "%s->%s" % (value.node.get_rel_path(owner_node), value.name)
md5 = hashlib.md5(path.encode('utf-8')).hexdigest()
return md5[:8]
def normalize_user_enum_type(value: type) -> str:
"""
Enum type references shall be rendered using their enumeration type name.
"""
return value.__name__ | systemrdl/core/value_normalization.py | import hashlib
from typing import Any, Optional, Union, List
from .. import rdltypes
from .. import node
def normalize(value: Any, owner_node: Optional[node.Node]=None) -> str:
"""
Flatten an RDL value into a unique string that is used for type
normalization.
"""
# Determine what type is being flattened
if isinstance(value, bool):
return normalize_boolean(value)
elif isinstance(value, int):
return normalize_scalar(value)
elif isinstance(value, str):
return normalize_string(value)
elif isinstance(value, list):
return normalize_array(value)
elif isinstance(value, (rdltypes.BuiltinEnum, rdltypes.UserEnum)):
return normalize_enum(value)
elif isinstance(value, rdltypes.UserStruct):
return normalize_struct(value)
elif isinstance(value, node.Node):
return normalize_component_ref(value, owner_node)
elif isinstance(value, rdltypes.PropertyReference):
return normalize_property_ref(value, owner_node)
elif rdltypes.is_user_enum(value):
return normalize_user_enum_type(value)
else:
# Should never get here
raise RuntimeError(value)
def normalize_scalar(value: int) -> str:
"""
5.1.1.4 - c.1:
Scalar values shall be rendered using their hexadecimal representation.
"""
return "%x" % value
def normalize_boolean(value: bool) -> str:
"""
5.1.1.4 - c.2:
Boolean values shall be rendered using either t for true or f for false.
"""
if value:
return "t"
else:
return "f"
def normalize_string(value: str) -> str:
"""
5.1.1.4 - c.3:
String values shall be rendered using the first eight characters of
their md5 (Message-Digest Algorithm) checksum.
"""
md5 = hashlib.md5(value.encode('utf-8')).hexdigest()
return md5[:8]
def normalize_enum(value: Union[rdltypes.BuiltinEnum, rdltypes.UserEnum]) -> str:
"""
5.1.1.4 - c.4:
Enum values shall be rendered using their enumerator literal.
"""
return value.name
def normalize_array(value: List[Any]) -> str:
"""
5.1.1.4 - c.5:
Arrays shall be rendered by:
1. generating the normalized values of its elements,
2. joining these elements with single underscores (_) into a single
character sequence, and
3. using the first eight characters of the md5 checksum of this
character sequence
... which can be semi-formalized as:
subsequence( md5( join( normalized_values, '_' ), 0, 8 )
"""
norm_elements = []
for element in value:
norm_elements.append(normalize(element))
norm_str = "_".join(norm_elements)
md5 = hashlib.md5(norm_str.encode('utf-8')).hexdigest()
return md5[:8]
def normalize_struct(value: rdltypes.UserStruct) -> str:
"""
5.1.1.4 - c.6:
Structs shall be rendered by:
1. generating the normalized value of each member,
2. joining each member’s name with its normalized value, separated by
a single underscore (_),
3. joining the member character sequences with single underscores,
4. using the first eight characters of the md5 checksum of this
character sequence
... which can be semi-formalized as:
member_normalization = concat( member_name, '_', normalized_member_value )
subsequence( md5( join( apply( struct_members, member_normalization ) ), 0, 8)
"""
norm_elements = []
for member_name, member_value in value._values.items():
norm_elements.append("%s_%s" % (member_name, normalize(member_value)))
norm_str = "_".join(norm_elements)
md5 = hashlib.md5(norm_str.encode('utf-8')).hexdigest()
return md5[:8]
def normalize_component_ref(value: node.Node, owner_node: node.Node) -> str:
"""
Hash of relative path from owner of the property to the target component
"""
path = value.get_rel_path(owner_node)
md5 = hashlib.md5(path.encode('utf-8')).hexdigest()
return md5[:8]
def normalize_property_ref(value: rdltypes.PropertyReference, owner_node: node.Node) -> str:
"""
Hash of relative path from owner of the property to the target component's
property
"""
path = "%s->%s" % (value.node.get_rel_path(owner_node), value.name)
md5 = hashlib.md5(path.encode('utf-8')).hexdigest()
return md5[:8]
def normalize_user_enum_type(value: type) -> str:
"""
Enum type references shall be rendered using their enumeration type name.
"""
return value.__name__ | 0.859074 | 0.428293 |
class ResultParser(object):
MARK_SEGMENTS = True
def __init__(self, doc):
self.doc = doc
self.parsed = False
def parse_content(self):
"""
:return:
"""
for page in self.doc:
# sort clusters
bbox = self.doc[page]["bounding_box"]
del self.doc[page]["bounding_box"]
page_content = list(self.doc[page].values())
page_clusters = list(self.doc[page].keys())
for i, elt in enumerate(page_content):
elt["cluster"] = page_clusters[i]
page_content = sorted(page_content,
key=lambda x: [x["bounding_box"][2], x["bounding_box"][0]],
reverse=False)
page_content = {x["cluster"]: x for x in page_content}
self.doc[page] = page_content
# iterate and convert
for cluster in self.doc[page]:
contents = self.doc[page][cluster]["content"]
contents = sorted(contents,
key=lambda x: [round(x["y_0"]), round(x["x_0"])],
reverse=False)
element = self.doc[page][cluster]["element"]
if element in ["text", "none"]:
self.doc[page][cluster]["text"] = "\n".join(list(map(lambda x: x["text"], contents)))
else:
text = ""
prev_y = 0
for el in contents:
# print(str(round(el["y_0"])) + " " + str(round(el["x_0"])))
text += ("\n" if round(el["y_0"]) != prev_y else ";") + el["text"].strip()
prev_y = round(el["y_0"])
self.doc[page][cluster]["text"] = text[1:]
self.doc[page]["bounding_box"] = bbox
self.parsed = True
return self.doc
def get_text(self):
"""
:return:
"""
if not self.parsed:
self.parse_content()
text = ""
for page in self.doc:
for cluster in self.doc[page]:
if cluster != "bounding_box":
text += self.get_segment_marker(self.doc[page][cluster])
text += self.doc[page][cluster]["text"] + "\n"
return text
def get_segment_marker(self, segment):
"""
:param segment:
:return:
"""
return "\n[!" + segment["element"].upper() + "]\n" if self.MARK_SEGMENTS else "" | structure_recognition/ResultParser.py | class ResultParser(object):
MARK_SEGMENTS = True
def __init__(self, doc):
self.doc = doc
self.parsed = False
def parse_content(self):
"""
:return:
"""
for page in self.doc:
# sort clusters
bbox = self.doc[page]["bounding_box"]
del self.doc[page]["bounding_box"]
page_content = list(self.doc[page].values())
page_clusters = list(self.doc[page].keys())
for i, elt in enumerate(page_content):
elt["cluster"] = page_clusters[i]
page_content = sorted(page_content,
key=lambda x: [x["bounding_box"][2], x["bounding_box"][0]],
reverse=False)
page_content = {x["cluster"]: x for x in page_content}
self.doc[page] = page_content
# iterate and convert
for cluster in self.doc[page]:
contents = self.doc[page][cluster]["content"]
contents = sorted(contents,
key=lambda x: [round(x["y_0"]), round(x["x_0"])],
reverse=False)
element = self.doc[page][cluster]["element"]
if element in ["text", "none"]:
self.doc[page][cluster]["text"] = "\n".join(list(map(lambda x: x["text"], contents)))
else:
text = ""
prev_y = 0
for el in contents:
# print(str(round(el["y_0"])) + " " + str(round(el["x_0"])))
text += ("\n" if round(el["y_0"]) != prev_y else ";") + el["text"].strip()
prev_y = round(el["y_0"])
self.doc[page][cluster]["text"] = text[1:]
self.doc[page]["bounding_box"] = bbox
self.parsed = True
return self.doc
def get_text(self):
"""
:return:
"""
if not self.parsed:
self.parse_content()
text = ""
for page in self.doc:
for cluster in self.doc[page]:
if cluster != "bounding_box":
text += self.get_segment_marker(self.doc[page][cluster])
text += self.doc[page][cluster]["text"] + "\n"
return text
def get_segment_marker(self, segment):
"""
:param segment:
:return:
"""
return "\n[!" + segment["element"].upper() + "]\n" if self.MARK_SEGMENTS else "" | 0.427636 | 0.248024 |
from __future__ import print_function
import sys
import os
import errno
from fontTools.ttLib import TTFont
from compressor import Compressor
from cff_lib import CharSet, decompileDict, DictINDEX, FDSelect, INDEX
from StringIO import StringIO
import argparse
from rle_font import RleFont
from cleanup import cleanup
from base_fonter import BaseFonter
from font_info import FontInfo
from base_header import BaseHeaderPrepare
def main(args):
"""Main program to run preprocessing of the font and dump the base parts
Arguments:
font-file
--output= Output folder of the files, default is current folder
--hinting=(False|True) ,default is false
"""
parser = argparse.ArgumentParser(prog='pyprepfnt')
parser.add_argument('fontfile',help='Input font file')
parser.add_argument('--changefont', default=False , action='store_true', help='Font structure has changed, default is True')
parser.add_argument('--changebase', default=False , action='store_true', help='Base structure has changed, default is True')
parser.add_argument('--hinting',default=False, action='store_true', help='Enable hinting if specified, no hinting if not present')
parser.add_argument('--output', default='.' , help='Output folder, default is current folder')
cmd_args = parser.parse_args(args)
fontfile = cmd_args.fontfile
# TODO(bstell) use Logger
print('preprocess {0}'.format(cmd_args.fontfile))
basename = os.path.basename(fontfile)
filename, extension = os.path.splitext(basename)
output_folder = cmd_args.output+'/'+filename
try:
os.makedirs(output_folder)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
cleanfile = output_folder+'/'+filename + '_clean' + extension
is_clean = os.path.isfile(cleanfile)
if not is_clean:
cleanup(fontfile, cmd_args.hinting, cleanfile)
dump_tables(cleanfile, output_folder)
print('done')
def dump_tables(fontfile, output):
font = TTFont(fontfile,lazy=True)
dump_folder = output + '_tables'
print('dump results in {0}'.format(dump_folder))
try:
os.makedirs(dump_folder)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
header_dict = FontInfo.getInformation(fontfile, FontInfo.TAGS.keys())
bin_header = BaseHeaderPrepare.prepare(BaseFonter.BASE_VERSION, header_dict)
print('Base header total size=',len(bin_header))
base_fonter = BaseFonter(fontfile)
base_dump = dump_folder + '/base_dump'
base_fonter.dump_base(base_dump)
# OpenType tables.
dump_file = open(base_dump,'r+b')
tables = font.reader.tables
for name in font.reader.tables:
table = tables[name]
offset = table.offset
length = table.length
#print('{0}: offset={1}, length={2}'.format(name, offset, length))
table_file_name = dump_folder + '/' + name.replace('/', '_')
table_file = open(table_file_name,'w+b')
dump_file.seek(offset);
table_file.write(dump_file.read(length))
table_file.close()
rle_table = RleFont(table_file_name)
rle_table.encode()
rle_table.write(table_file_name)
compressor = Compressor(Compressor.GZIP_INPLACE_CMD)
compressor.compress(table_file_name)
print('{0}: offset={1:9d}\tlen={2:9d}\tcmp_len={3:9d}'.format(name, offset, length,os.path.getsize(table_file_name+'.gz')))
print('TODO(bstell) save and compress the CFF parts.')
if 'CFF ' in font:
dumpCFFTable(font)
font.close()
def dumpCFFTable(font):
cff_reader = font.reader.tables['CFF ']
cff_data = font.reader['CFF ']
cff_file = StringIO(cff_data)
print('cff_reader.offset={0}'.format(cff_reader.offset))
print('cff_reader.length={0}'.format(cff_reader.length))
cff_file.seek(4) # seek past header
nameIndex = INDEX(cff_file)
start, count, offSize, past_end = nameIndex.getInfo()
print('Name INDEX: start={0}, count={1}, end={2}'.format(start, count, past_end))
nameIndex.showItems('Name INDEX', 0, 3)
topDictIndex = DictINDEX(cff_file)
start, count, offSize, past_end = topDictIndex.getInfo()
print('Top DICT INDEX: start={0}, count={1}, end={2}'.format(start, count, past_end))
topDictIndex.showItems('Top DICT INDEX', 0, 0, 3)
# There is only one font in a CID font
font_dict = topDictIndex.getDict(0)
stringIndex = INDEX(cff_file)
start, count, offSize, past_end = stringIndex.getInfo()
print('String INDEX: start={0}, count={1}, end={2}'.format(start, count, past_end))
stringIndex.showItems('String INDEX', 0, 3)
globalSubrIndex = INDEX(cff_file)
start, count, offSize, past_end = globalSubrIndex.getInfo()
print('Global Subr INDEX: start={0}, count={1}, end={2}'.format(start, count, past_end))
globalSubrIndex.showItems('Global Subr INDEX', 0, 3)
print("CIDFonts do not have an Encodings value")
char_strings_offset = font_dict['CharStrings']
print('CharStrings = {0}'.format(char_strings_offset))
cff_file.seek(char_strings_offset)
charStringsIndex = INDEX(cff_file)
start, count, offSize, past_end = charStringsIndex.getInfo()
print('CharStrings INDEX: start={0}, count={1}, end={2}'.format(start, count, past_end))
num_glyphs = count
charset_offset = font_dict['charset']
print('charset = {0}'.format(charset_offset))
cff_file.seek(charset_offset)
charset = CharSet(cff_file, num_glyphs)
print('charset: size = {0}'.format(charset.get_size()))
fdselect_offset = font_dict['FDSelect']
print('FDSelect = {0}'.format(fdselect_offset))
cff_file.seek(fdselect_offset)
fdselect = FDSelect(cff_file, num_glyphs)
print('FDSelect: size = {0}'.format(fdselect.get_size()))
fdarray_offset = font_dict['FDArray']
print('FDArray = {0}'.format(fdarray_offset))
cff_file.seek(fdarray_offset)
fdarray = DictINDEX(cff_file)
start, count, offSize, past_end = fdarray.getInfo()
print('Top DICT INDEX: start={0}, count={1}, end={2}'.format(start, count, past_end))
fdarray.showItems('FDArray', 0, 0, 3)
fdarray.showItems('FDArray', 1, 0, 3)
fdcount = count
subr_len = 0
for i in range(fdcount):
private_dict = fdarray.getDict(i)
length, offset = private_dict['Private']
#print('private dict {0}: offset={1}, end={2}, length={3}'.format(
# i, offset, offset+length, length))
cff_file.seek(offset)
data = cff_file.read(length)
dict = decompileDict(data)
if 'Subrs' in dict:
subrs_offset = dict['Subrs']
cff_file.seek(offset + subrs_offset)
subrsIndex = INDEX(cff_file)
start, count, offSize, past_end = subrsIndex.getInfo()
length = past_end - start
subr_len += length
#print(' subrs: start={0}, count={1}, end={2}'.format(
# start, count, past_end))
print('total subr length = {0}'.format(subr_len))
def console_msg(msg):
pass
if __name__ == '__main__':
main(sys.argv[1:]) | build_time/src/dump_base_parts.py | from __future__ import print_function
import sys
import os
import errno
from fontTools.ttLib import TTFont
from compressor import Compressor
from cff_lib import CharSet, decompileDict, DictINDEX, FDSelect, INDEX
from StringIO import StringIO
import argparse
from rle_font import RleFont
from cleanup import cleanup
from base_fonter import BaseFonter
from font_info import FontInfo
from base_header import BaseHeaderPrepare
def main(args):
"""Main program to run preprocessing of the font and dump the base parts
Arguments:
font-file
--output= Output folder of the files, default is current folder
--hinting=(False|True) ,default is false
"""
parser = argparse.ArgumentParser(prog='pyprepfnt')
parser.add_argument('fontfile',help='Input font file')
parser.add_argument('--changefont', default=False , action='store_true', help='Font structure has changed, default is True')
parser.add_argument('--changebase', default=False , action='store_true', help='Base structure has changed, default is True')
parser.add_argument('--hinting',default=False, action='store_true', help='Enable hinting if specified, no hinting if not present')
parser.add_argument('--output', default='.' , help='Output folder, default is current folder')
cmd_args = parser.parse_args(args)
fontfile = cmd_args.fontfile
# TODO(bstell) use Logger
print('preprocess {0}'.format(cmd_args.fontfile))
basename = os.path.basename(fontfile)
filename, extension = os.path.splitext(basename)
output_folder = cmd_args.output+'/'+filename
try:
os.makedirs(output_folder)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
cleanfile = output_folder+'/'+filename + '_clean' + extension
is_clean = os.path.isfile(cleanfile)
if not is_clean:
cleanup(fontfile, cmd_args.hinting, cleanfile)
dump_tables(cleanfile, output_folder)
print('done')
def dump_tables(fontfile, output):
font = TTFont(fontfile,lazy=True)
dump_folder = output + '_tables'
print('dump results in {0}'.format(dump_folder))
try:
os.makedirs(dump_folder)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
header_dict = FontInfo.getInformation(fontfile, FontInfo.TAGS.keys())
bin_header = BaseHeaderPrepare.prepare(BaseFonter.BASE_VERSION, header_dict)
print('Base header total size=',len(bin_header))
base_fonter = BaseFonter(fontfile)
base_dump = dump_folder + '/base_dump'
base_fonter.dump_base(base_dump)
# OpenType tables.
dump_file = open(base_dump,'r+b')
tables = font.reader.tables
for name in font.reader.tables:
table = tables[name]
offset = table.offset
length = table.length
#print('{0}: offset={1}, length={2}'.format(name, offset, length))
table_file_name = dump_folder + '/' + name.replace('/', '_')
table_file = open(table_file_name,'w+b')
dump_file.seek(offset);
table_file.write(dump_file.read(length))
table_file.close()
rle_table = RleFont(table_file_name)
rle_table.encode()
rle_table.write(table_file_name)
compressor = Compressor(Compressor.GZIP_INPLACE_CMD)
compressor.compress(table_file_name)
print('{0}: offset={1:9d}\tlen={2:9d}\tcmp_len={3:9d}'.format(name, offset, length,os.path.getsize(table_file_name+'.gz')))
print('TODO(bstell) save and compress the CFF parts.')
if 'CFF ' in font:
dumpCFFTable(font)
font.close()
def dumpCFFTable(font):
cff_reader = font.reader.tables['CFF ']
cff_data = font.reader['CFF ']
cff_file = StringIO(cff_data)
print('cff_reader.offset={0}'.format(cff_reader.offset))
print('cff_reader.length={0}'.format(cff_reader.length))
cff_file.seek(4) # seek past header
nameIndex = INDEX(cff_file)
start, count, offSize, past_end = nameIndex.getInfo()
print('Name INDEX: start={0}, count={1}, end={2}'.format(start, count, past_end))
nameIndex.showItems('Name INDEX', 0, 3)
topDictIndex = DictINDEX(cff_file)
start, count, offSize, past_end = topDictIndex.getInfo()
print('Top DICT INDEX: start={0}, count={1}, end={2}'.format(start, count, past_end))
topDictIndex.showItems('Top DICT INDEX', 0, 0, 3)
# There is only one font in a CID font
font_dict = topDictIndex.getDict(0)
stringIndex = INDEX(cff_file)
start, count, offSize, past_end = stringIndex.getInfo()
print('String INDEX: start={0}, count={1}, end={2}'.format(start, count, past_end))
stringIndex.showItems('String INDEX', 0, 3)
globalSubrIndex = INDEX(cff_file)
start, count, offSize, past_end = globalSubrIndex.getInfo()
print('Global Subr INDEX: start={0}, count={1}, end={2}'.format(start, count, past_end))
globalSubrIndex.showItems('Global Subr INDEX', 0, 3)
print("CIDFonts do not have an Encodings value")
char_strings_offset = font_dict['CharStrings']
print('CharStrings = {0}'.format(char_strings_offset))
cff_file.seek(char_strings_offset)
charStringsIndex = INDEX(cff_file)
start, count, offSize, past_end = charStringsIndex.getInfo()
print('CharStrings INDEX: start={0}, count={1}, end={2}'.format(start, count, past_end))
num_glyphs = count
charset_offset = font_dict['charset']
print('charset = {0}'.format(charset_offset))
cff_file.seek(charset_offset)
charset = CharSet(cff_file, num_glyphs)
print('charset: size = {0}'.format(charset.get_size()))
fdselect_offset = font_dict['FDSelect']
print('FDSelect = {0}'.format(fdselect_offset))
cff_file.seek(fdselect_offset)
fdselect = FDSelect(cff_file, num_glyphs)
print('FDSelect: size = {0}'.format(fdselect.get_size()))
fdarray_offset = font_dict['FDArray']
print('FDArray = {0}'.format(fdarray_offset))
cff_file.seek(fdarray_offset)
fdarray = DictINDEX(cff_file)
start, count, offSize, past_end = fdarray.getInfo()
print('Top DICT INDEX: start={0}, count={1}, end={2}'.format(start, count, past_end))
fdarray.showItems('FDArray', 0, 0, 3)
fdarray.showItems('FDArray', 1, 0, 3)
fdcount = count
subr_len = 0
for i in range(fdcount):
private_dict = fdarray.getDict(i)
length, offset = private_dict['Private']
#print('private dict {0}: offset={1}, end={2}, length={3}'.format(
# i, offset, offset+length, length))
cff_file.seek(offset)
data = cff_file.read(length)
dict = decompileDict(data)
if 'Subrs' in dict:
subrs_offset = dict['Subrs']
cff_file.seek(offset + subrs_offset)
subrsIndex = INDEX(cff_file)
start, count, offSize, past_end = subrsIndex.getInfo()
length = past_end - start
subr_len += length
#print(' subrs: start={0}, count={1}, end={2}'.format(
# start, count, past_end))
print('total subr length = {0}'.format(subr_len))
def console_msg(msg):
pass
if __name__ == '__main__':
main(sys.argv[1:]) | 0.138958 | 0.089494 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("interactions", "0015_auto_20210312_0507"),
]
operations = [
migrations.CreateModel(
name="ExcludeFromNotifcation",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"created_on",
models.DateTimeField(auto_now_add=True, db_index=True, null=True),
),
("modified_on", models.DateTimeField(auto_now=True, null=True)),
("exclude_email", models.EmailField(max_length=254)),
(
"created_by",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to=settings.AUTH_USER_MODEL,
),
),
(
"excluded_user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
(
"modified_by",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"abstract": False,
},
),
] | api/interactions/migrations/0016_excludefromnotifcation.py |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("interactions", "0015_auto_20210312_0507"),
]
operations = [
migrations.CreateModel(
name="ExcludeFromNotifcation",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"created_on",
models.DateTimeField(auto_now_add=True, db_index=True, null=True),
),
("modified_on", models.DateTimeField(auto_now=True, null=True)),
("exclude_email", models.EmailField(max_length=254)),
(
"created_by",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to=settings.AUTH_USER_MODEL,
),
),
(
"excluded_user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
(
"modified_by",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"abstract": False,
},
),
] | 0.481698 | 0.128143 |
import matplotlib.pyplot as plt
import numpy as np
from ..util.constants import *
"""
MPL 2.0 License
Copyright (c) 2022, <NAME>
All rights reserved.
"""
def plot_intensity(self, I, square_root = False, figsize=(7, 6),
xlim=None, ylim=None, grid = False, text = None, units = mm,
slice_y_pos = None, slice_x_pos = None):
"""visualize the diffraction pattern intesity with matplotlib"""
from ..util.backend_functions import backend as bd
plt.style.use("dark_background")
if square_root == False:
if bd != np:
I = I.get()
else:
I = I
else:
if bd != np:
I = np.sqrt(I.get())
else:
I = np.sqrt(I)
fig = plt.figure(figsize=figsize)
if (slice_y_pos == None) and (slice_x_pos == None):
ax = fig.add_subplot(1, 1, 1)
else:
ax = fig.add_subplot(1, 2, 1)
if grid == True:
ax.grid(alpha =0.2)
if xlim != None:
ax.set_xlim(np.array(xlim)/units)
if ylim != None:
ax.set_ylim(np.array(ylim)/units)
if units == mm:
ax.set_xlabel("[mm]")
ax.set_ylabel("[mm]")
elif units == um:
ax.set_xlabel("[um]")
ax.set_ylabel("[um]")
elif units == cm:
ax.set_xlabel("[cm]")
ax.set_ylabel("[cm]")
elif units == nm:
ax.set_xlabel("[nm]")
ax.set_ylabel("[nm]")
elif units == m:
ax.set_xlabel("[m]")
ax.set_ylabel("[m]")
if text == None:
ax.set_title("Screen distance = " + str(self.z * 100) + " cm")
else:
ax.set_title(text)
im = ax.imshow(
I, cmap= 'inferno',
extent=[
float(self.x[0]) / units,
float(self.x[-1] + self.dx) / units,
float(self.y[0] )/ units,
float(self.y[-1] + self.dy) / units,
],
interpolation="spline36", origin = "lower"
)
cb = fig.colorbar(im, orientation = 'vertical')
if square_root == False:
cb.set_label(r'Intensity $\left[W / m^2 \right]$', fontsize=10, labelpad = 10 )
else:
cb.set_label(r'Square Root Intensity $\left[ \sqrt{W / m^2 } \right]$', fontsize=10, labelpad = 10 )
ax.set_aspect('equal')
if slice_y_pos != None:
ax_slice = fig.add_subplot(1, 2, 2)
plt.subplots_adjust(wspace=0.3)
ax_slice.set_title("X slice")
#plt.subplots_adjust(right=2)
if bd != np:
x = self.x.get()
y = self.y.get()
else:
x = self.x
y = self.y
ax_slice.plot(x/units, I[np.argmin(abs(y-slice_y_pos)),:]**2)
ax_slice.set_ylabel(r'Intensity $\left[W / m^2 \right]$')
if grid == True:
ax_slice.grid(alpha =0.2)
if xlim != None:
ax_slice.set_xlim(np.array(xlim)/units)
if units == mm:
ax_slice.set_xlabel("[mm]")
elif units == um:
ax_slice.set_xlabel("[um]")
elif units == cm:
ax_slice.set_xlabel("[cm]")
elif units == nm:
ax_slice.set_xlabel("[nm]")
elif units == m:
ax_slice.set_xlabel("[m]")
if slice_x_pos != None:
ax_slice = fig.add_subplot(1, 2, 2)
plt.subplots_adjust(wspace=0.3)
ax_slice.set_title("Y slice")
#plt.subplots_adjust(right=2)
if bd != np:
x = self.x.get()
y = self.y.get()
else:
x = self.x
y = self.y
ax_slice.plot(y/units, I[:, np.argmin(abs(x-slice_x_pos))]**2)
ax_slice.set_ylabel(r'Intensity $\left[W / m^2 \right]$')
if grid == True:
ax_slice.grid(alpha =0.2)
if xlim != None:
ax_slice.set_xlim(np.array(ylim)/units)
if units == mm:
ax_slice.set_xlabel("[mm]")
elif units == um:
ax_slice.set_xlabel("[um]")
elif units == cm:
ax_slice.set_xlabel("[cm]")
elif units == nm:
ax_slice.set_xlabel("[nm]")
elif units == m:
ax_slice.set_xlabel("[m]")
plt.show() | diffractsim/visualization/plot_intensity.py | import matplotlib.pyplot as plt
import numpy as np
from ..util.constants import *
"""
MPL 2.0 License
Copyright (c) 2022, <NAME>
All rights reserved.
"""
def plot_intensity(self, I, square_root = False, figsize=(7, 6),
xlim=None, ylim=None, grid = False, text = None, units = mm,
slice_y_pos = None, slice_x_pos = None):
"""visualize the diffraction pattern intesity with matplotlib"""
from ..util.backend_functions import backend as bd
plt.style.use("dark_background")
if square_root == False:
if bd != np:
I = I.get()
else:
I = I
else:
if bd != np:
I = np.sqrt(I.get())
else:
I = np.sqrt(I)
fig = plt.figure(figsize=figsize)
if (slice_y_pos == None) and (slice_x_pos == None):
ax = fig.add_subplot(1, 1, 1)
else:
ax = fig.add_subplot(1, 2, 1)
if grid == True:
ax.grid(alpha =0.2)
if xlim != None:
ax.set_xlim(np.array(xlim)/units)
if ylim != None:
ax.set_ylim(np.array(ylim)/units)
if units == mm:
ax.set_xlabel("[mm]")
ax.set_ylabel("[mm]")
elif units == um:
ax.set_xlabel("[um]")
ax.set_ylabel("[um]")
elif units == cm:
ax.set_xlabel("[cm]")
ax.set_ylabel("[cm]")
elif units == nm:
ax.set_xlabel("[nm]")
ax.set_ylabel("[nm]")
elif units == m:
ax.set_xlabel("[m]")
ax.set_ylabel("[m]")
if text == None:
ax.set_title("Screen distance = " + str(self.z * 100) + " cm")
else:
ax.set_title(text)
im = ax.imshow(
I, cmap= 'inferno',
extent=[
float(self.x[0]) / units,
float(self.x[-1] + self.dx) / units,
float(self.y[0] )/ units,
float(self.y[-1] + self.dy) / units,
],
interpolation="spline36", origin = "lower"
)
cb = fig.colorbar(im, orientation = 'vertical')
if square_root == False:
cb.set_label(r'Intensity $\left[W / m^2 \right]$', fontsize=10, labelpad = 10 )
else:
cb.set_label(r'Square Root Intensity $\left[ \sqrt{W / m^2 } \right]$', fontsize=10, labelpad = 10 )
ax.set_aspect('equal')
if slice_y_pos != None:
ax_slice = fig.add_subplot(1, 2, 2)
plt.subplots_adjust(wspace=0.3)
ax_slice.set_title("X slice")
#plt.subplots_adjust(right=2)
if bd != np:
x = self.x.get()
y = self.y.get()
else:
x = self.x
y = self.y
ax_slice.plot(x/units, I[np.argmin(abs(y-slice_y_pos)),:]**2)
ax_slice.set_ylabel(r'Intensity $\left[W / m^2 \right]$')
if grid == True:
ax_slice.grid(alpha =0.2)
if xlim != None:
ax_slice.set_xlim(np.array(xlim)/units)
if units == mm:
ax_slice.set_xlabel("[mm]")
elif units == um:
ax_slice.set_xlabel("[um]")
elif units == cm:
ax_slice.set_xlabel("[cm]")
elif units == nm:
ax_slice.set_xlabel("[nm]")
elif units == m:
ax_slice.set_xlabel("[m]")
if slice_x_pos != None:
ax_slice = fig.add_subplot(1, 2, 2)
plt.subplots_adjust(wspace=0.3)
ax_slice.set_title("Y slice")
#plt.subplots_adjust(right=2)
if bd != np:
x = self.x.get()
y = self.y.get()
else:
x = self.x
y = self.y
ax_slice.plot(y/units, I[:, np.argmin(abs(x-slice_x_pos))]**2)
ax_slice.set_ylabel(r'Intensity $\left[W / m^2 \right]$')
if grid == True:
ax_slice.grid(alpha =0.2)
if xlim != None:
ax_slice.set_xlim(np.array(ylim)/units)
if units == mm:
ax_slice.set_xlabel("[mm]")
elif units == um:
ax_slice.set_xlabel("[um]")
elif units == cm:
ax_slice.set_xlabel("[cm]")
elif units == nm:
ax_slice.set_xlabel("[nm]")
elif units == m:
ax_slice.set_xlabel("[m]")
plt.show() | 0.444565 | 0.740022 |
import proto # type: ignore
from google.cloud.aiplatform_v1beta1.types import machine_resources
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1beta1",
manifest={
"IndexEndpoint",
"DeployedIndex",
"DeployedIndexAuthConfig",
"IndexPrivateEndpoints",
},
)
class IndexEndpoint(proto.Message):
r"""Indexes are deployed into it. An IndexEndpoint can have
multiple DeployedIndexes.
Attributes:
name (str):
Output only. The resource name of the
IndexEndpoint.
display_name (str):
Required. The display name of the
IndexEndpoint. The name can be up to 128
characters long and can consist of any UTF-8
characters.
description (str):
The description of the IndexEndpoint.
deployed_indexes (Sequence[google.cloud.aiplatform_v1beta1.types.DeployedIndex]):
Output only. The indexes deployed in this
endpoint.
etag (str):
Used to perform consistent read-modify-write
updates. If not set, a blind "overwrite" update
happens.
labels (Sequence[google.cloud.aiplatform_v1beta1.types.IndexEndpoint.LabelsEntry]):
The labels with user-defined metadata to
organize your IndexEndpoints.
Label keys and values can be no longer than 64
characters (Unicode codepoints), can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Timestamp when this
IndexEndpoint was created.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Timestamp when this
IndexEndpoint was last updated. This timestamp
is not updated when the endpoint's
DeployedIndexes are updated, e.g. due to updates
of the original Indexes they are the deployments
of.
network (str):
Required. Immutable. The full name of the Google Compute
Engine
`network <https://cloud.google.com/compute/docs/networks-and-firewalls#networks>`__
to which the IndexEndpoint should be peered.
Private services access must already be configured for the
network. If left unspecified, the Endpoint is not peered
with any network.
`Format <https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert>`__:
projects/{project}/global/networks/{network}. Where
{project} is a project number, as in '12345', and {network}
is network name.
"""
name = proto.Field(proto.STRING, number=1,)
display_name = proto.Field(proto.STRING, number=2,)
description = proto.Field(proto.STRING, number=3,)
deployed_indexes = proto.RepeatedField(
proto.MESSAGE, number=4, message="DeployedIndex",
)
etag = proto.Field(proto.STRING, number=5,)
labels = proto.MapField(proto.STRING, proto.STRING, number=6,)
create_time = proto.Field(proto.MESSAGE, number=7, message=timestamp_pb2.Timestamp,)
update_time = proto.Field(proto.MESSAGE, number=8, message=timestamp_pb2.Timestamp,)
network = proto.Field(proto.STRING, number=9,)
class DeployedIndex(proto.Message):
r"""A deployment of an Index. IndexEndpoints contain one or more
DeployedIndexes.
Attributes:
id (str):
Required. The user specified ID of the
DeployedIndex. The ID can be up to 128
characters long and must start with a letter and
only contain letters, numbers, and underscores.
The ID must be unique within the project it is
created in.
index (str):
Required. The name of the Index this is the
deployment of. We may refer to this Index as the
DeployedIndex's "original" Index.
display_name (str):
The display name of the DeployedIndex. If not provided upon
creation, the Index's display_name is used.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Timestamp when the DeployedIndex
was created.
private_endpoints (google.cloud.aiplatform_v1beta1.types.IndexPrivateEndpoints):
Output only. Provides paths for users to send requests
directly to the deployed index services running on Cloud via
private services access. This field is populated if
[network][google.cloud.aiplatform.v1beta1.IndexEndpoint.network]
is configured.
index_sync_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. The DeployedIndex may depend on various data on
its original Index. Additionally when certain changes to the
original Index are being done (e.g. when what the Index
contains is being changed) the DeployedIndex may be
asynchronously updated in the background to reflect this
changes. If this timestamp's value is at least the
[Index.update_time][google.cloud.aiplatform.v1beta1.Index.update_time]
of the original Index, it means that this DeployedIndex and
the original Index are in sync. If this timestamp is older,
then to see which updates this DeployedIndex already
contains (and which not), one must
[list][Operations.ListOperations] [Operations][Operation]
[working][Operation.name] on the original Index. Only the
successfully completed Operations with
[Operations.metadata.generic_metadata.update_time]
[google.cloud.aiplatform.v1beta1.GenericOperationMetadata.update_time]
equal or before this sync time are contained in this
DeployedIndex.
automatic_resources (google.cloud.aiplatform_v1beta1.types.AutomaticResources):
Optional. A description of resources that the DeployedIndex
uses, which to large degree are decided by Vertex AI, and
optionally allows only a modest additional configuration. If
min_replica_count is not set, the default value is 1. If
max_replica_count is not set, the default value is
min_replica_count. The max allowed replica count is 1000.
The user is billed for the resources (at least their minimal
amount) even if the DeployedIndex receives no traffic.
enable_access_logging (bool):
Optional. If true, private endpoint's access
logs are sent to StackDriver Logging.
These logs are like standard server access logs,
containing information like timestamp and
latency for each MatchRequest.
Note that Stackdriver logs may incur a cost,
especially if the deployed index receives a high
queries per second rate (QPS). Estimate your
costs before enabling this option.
deployed_index_auth_config (google.cloud.aiplatform_v1beta1.types.DeployedIndexAuthConfig):
Optional. If set, the authentication is
enabled for the private endpoint.
reserved_ip_ranges (Sequence[str]):
Optional. A list of reserved ip ranges under
the VPC network that can be used for this
DeployedIndex.
If set, we will deploy the index within the
provided ip ranges. Otherwise, the index might
be deployed to any ip ranges under the provided
VPC network.
The value sohuld be the name of the address
(https://cloud.google.com/compute/docs/reference/rest/v1/addresses)
Example: 'vertex-ai-ip-range'.
deployment_group (str):
Optional. The deployment group can be no longer than 64
characters (eg: 'test', 'prod'). If not set, we will use the
'default' deployment group.
Creating ``deployment_groups`` with ``reserved_ip_ranges``
is a recommended practice when the peered network has
multiple peering ranges. This creates your deployments from
predictable IP spaces for easier traffic administration.
Also, one deployment_group (except 'default') can only be
used with the same reserved_ip_ranges which means if the
deployment_group has been used with reserved_ip_ranges: [a,
b, c], using it with [a, b] or [d, e] is disallowed.
Note: we only support up to 5 deployment groups(not
including 'default').
"""
id = proto.Field(proto.STRING, number=1,)
index = proto.Field(proto.STRING, number=2,)
display_name = proto.Field(proto.STRING, number=3,)
create_time = proto.Field(proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp,)
private_endpoints = proto.Field(
proto.MESSAGE, number=5, message="IndexPrivateEndpoints",
)
index_sync_time = proto.Field(
proto.MESSAGE, number=6, message=timestamp_pb2.Timestamp,
)
automatic_resources = proto.Field(
proto.MESSAGE, number=7, message=machine_resources.AutomaticResources,
)
enable_access_logging = proto.Field(proto.BOOL, number=8,)
deployed_index_auth_config = proto.Field(
proto.MESSAGE, number=9, message="DeployedIndexAuthConfig",
)
reserved_ip_ranges = proto.RepeatedField(proto.STRING, number=10,)
deployment_group = proto.Field(proto.STRING, number=11,)
class DeployedIndexAuthConfig(proto.Message):
r"""Used to set up the auth on the DeployedIndex's private
endpoint.
Attributes:
auth_provider (google.cloud.aiplatform_v1beta1.types.DeployedIndexAuthConfig.AuthProvider):
Defines the authentication provider that the
DeployedIndex uses.
"""
class AuthProvider(proto.Message):
r"""Configuration for an authentication provider, including support for
`JSON Web Token
(JWT) <https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32>`__.
Attributes:
audiences (Sequence[str]):
The list of JWT
`audiences <https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.3>`__.
that are allowed to access. A JWT containing any of these
audiences will be accepted.
allowed_issuers (Sequence[str]):
A list of allowed JWT issuers. Each entry must be a valid
Google service account, in the following format:
``<EMAIL>``
"""
audiences = proto.RepeatedField(proto.STRING, number=1,)
allowed_issuers = proto.RepeatedField(proto.STRING, number=2,)
auth_provider = proto.Field(proto.MESSAGE, number=1, message=AuthProvider,)
class IndexPrivateEndpoints(proto.Message):
r"""IndexPrivateEndpoints proto is used to provide paths for
users to send requests via private services access.
Attributes:
match_grpc_address (str):
Output only. The ip address used to send
match gRPC requests.
"""
match_grpc_address = proto.Field(proto.STRING, number=1,)
__all__ = tuple(sorted(__protobuf__.manifest)) | google/cloud/aiplatform_v1beta1/types/index_endpoint.py | import proto # type: ignore
from google.cloud.aiplatform_v1beta1.types import machine_resources
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1beta1",
manifest={
"IndexEndpoint",
"DeployedIndex",
"DeployedIndexAuthConfig",
"IndexPrivateEndpoints",
},
)
class IndexEndpoint(proto.Message):
r"""Indexes are deployed into it. An IndexEndpoint can have
multiple DeployedIndexes.
Attributes:
name (str):
Output only. The resource name of the
IndexEndpoint.
display_name (str):
Required. The display name of the
IndexEndpoint. The name can be up to 128
characters long and can consist of any UTF-8
characters.
description (str):
The description of the IndexEndpoint.
deployed_indexes (Sequence[google.cloud.aiplatform_v1beta1.types.DeployedIndex]):
Output only. The indexes deployed in this
endpoint.
etag (str):
Used to perform consistent read-modify-write
updates. If not set, a blind "overwrite" update
happens.
labels (Sequence[google.cloud.aiplatform_v1beta1.types.IndexEndpoint.LabelsEntry]):
The labels with user-defined metadata to
organize your IndexEndpoints.
Label keys and values can be no longer than 64
characters (Unicode codepoints), can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Timestamp when this
IndexEndpoint was created.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Timestamp when this
IndexEndpoint was last updated. This timestamp
is not updated when the endpoint's
DeployedIndexes are updated, e.g. due to updates
of the original Indexes they are the deployments
of.
network (str):
Required. Immutable. The full name of the Google Compute
Engine
`network <https://cloud.google.com/compute/docs/networks-and-firewalls#networks>`__
to which the IndexEndpoint should be peered.
Private services access must already be configured for the
network. If left unspecified, the Endpoint is not peered
with any network.
`Format <https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert>`__:
projects/{project}/global/networks/{network}. Where
{project} is a project number, as in '12345', and {network}
is network name.
"""
name = proto.Field(proto.STRING, number=1,)
display_name = proto.Field(proto.STRING, number=2,)
description = proto.Field(proto.STRING, number=3,)
deployed_indexes = proto.RepeatedField(
proto.MESSAGE, number=4, message="DeployedIndex",
)
etag = proto.Field(proto.STRING, number=5,)
labels = proto.MapField(proto.STRING, proto.STRING, number=6,)
create_time = proto.Field(proto.MESSAGE, number=7, message=timestamp_pb2.Timestamp,)
update_time = proto.Field(proto.MESSAGE, number=8, message=timestamp_pb2.Timestamp,)
network = proto.Field(proto.STRING, number=9,)
class DeployedIndex(proto.Message):
r"""A deployment of an Index. IndexEndpoints contain one or more
DeployedIndexes.
Attributes:
id (str):
Required. The user specified ID of the
DeployedIndex. The ID can be up to 128
characters long and must start with a letter and
only contain letters, numbers, and underscores.
The ID must be unique within the project it is
created in.
index (str):
Required. The name of the Index this is the
deployment of. We may refer to this Index as the
DeployedIndex's "original" Index.
display_name (str):
The display name of the DeployedIndex. If not provided upon
creation, the Index's display_name is used.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Timestamp when the DeployedIndex
was created.
private_endpoints (google.cloud.aiplatform_v1beta1.types.IndexPrivateEndpoints):
Output only. Provides paths for users to send requests
directly to the deployed index services running on Cloud via
private services access. This field is populated if
[network][google.cloud.aiplatform.v1beta1.IndexEndpoint.network]
is configured.
index_sync_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. The DeployedIndex may depend on various data on
its original Index. Additionally when certain changes to the
original Index are being done (e.g. when what the Index
contains is being changed) the DeployedIndex may be
asynchronously updated in the background to reflect this
changes. If this timestamp's value is at least the
[Index.update_time][google.cloud.aiplatform.v1beta1.Index.update_time]
of the original Index, it means that this DeployedIndex and
the original Index are in sync. If this timestamp is older,
then to see which updates this DeployedIndex already
contains (and which not), one must
[list][Operations.ListOperations] [Operations][Operation]
[working][Operation.name] on the original Index. Only the
successfully completed Operations with
[Operations.metadata.generic_metadata.update_time]
[google.cloud.aiplatform.v1beta1.GenericOperationMetadata.update_time]
equal or before this sync time are contained in this
DeployedIndex.
automatic_resources (google.cloud.aiplatform_v1beta1.types.AutomaticResources):
Optional. A description of resources that the DeployedIndex
uses, which to large degree are decided by Vertex AI, and
optionally allows only a modest additional configuration. If
min_replica_count is not set, the default value is 1. If
max_replica_count is not set, the default value is
min_replica_count. The max allowed replica count is 1000.
The user is billed for the resources (at least their minimal
amount) even if the DeployedIndex receives no traffic.
enable_access_logging (bool):
Optional. If true, private endpoint's access
logs are sent to StackDriver Logging.
These logs are like standard server access logs,
containing information like timestamp and
latency for each MatchRequest.
Note that Stackdriver logs may incur a cost,
especially if the deployed index receives a high
queries per second rate (QPS). Estimate your
costs before enabling this option.
deployed_index_auth_config (google.cloud.aiplatform_v1beta1.types.DeployedIndexAuthConfig):
Optional. If set, the authentication is
enabled for the private endpoint.
reserved_ip_ranges (Sequence[str]):
Optional. A list of reserved ip ranges under
the VPC network that can be used for this
DeployedIndex.
If set, we will deploy the index within the
provided ip ranges. Otherwise, the index might
be deployed to any ip ranges under the provided
VPC network.
The value sohuld be the name of the address
(https://cloud.google.com/compute/docs/reference/rest/v1/addresses)
Example: 'vertex-ai-ip-range'.
deployment_group (str):
Optional. The deployment group can be no longer than 64
characters (eg: 'test', 'prod'). If not set, we will use the
'default' deployment group.
Creating ``deployment_groups`` with ``reserved_ip_ranges``
is a recommended practice when the peered network has
multiple peering ranges. This creates your deployments from
predictable IP spaces for easier traffic administration.
Also, one deployment_group (except 'default') can only be
used with the same reserved_ip_ranges which means if the
deployment_group has been used with reserved_ip_ranges: [a,
b, c], using it with [a, b] or [d, e] is disallowed.
Note: we only support up to 5 deployment groups(not
including 'default').
"""
id = proto.Field(proto.STRING, number=1,)
index = proto.Field(proto.STRING, number=2,)
display_name = proto.Field(proto.STRING, number=3,)
create_time = proto.Field(proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp,)
private_endpoints = proto.Field(
proto.MESSAGE, number=5, message="IndexPrivateEndpoints",
)
index_sync_time = proto.Field(
proto.MESSAGE, number=6, message=timestamp_pb2.Timestamp,
)
automatic_resources = proto.Field(
proto.MESSAGE, number=7, message=machine_resources.AutomaticResources,
)
enable_access_logging = proto.Field(proto.BOOL, number=8,)
deployed_index_auth_config = proto.Field(
proto.MESSAGE, number=9, message="DeployedIndexAuthConfig",
)
reserved_ip_ranges = proto.RepeatedField(proto.STRING, number=10,)
deployment_group = proto.Field(proto.STRING, number=11,)
class DeployedIndexAuthConfig(proto.Message):
r"""Used to set up the auth on the DeployedIndex's private
endpoint.
Attributes:
auth_provider (google.cloud.aiplatform_v1beta1.types.DeployedIndexAuthConfig.AuthProvider):
Defines the authentication provider that the
DeployedIndex uses.
"""
class AuthProvider(proto.Message):
r"""Configuration for an authentication provider, including support for
`JSON Web Token
(JWT) <https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32>`__.
Attributes:
audiences (Sequence[str]):
The list of JWT
`audiences <https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.3>`__.
that are allowed to access. A JWT containing any of these
audiences will be accepted.
allowed_issuers (Sequence[str]):
A list of allowed JWT issuers. Each entry must be a valid
Google service account, in the following format:
``<EMAIL>``
"""
audiences = proto.RepeatedField(proto.STRING, number=1,)
allowed_issuers = proto.RepeatedField(proto.STRING, number=2,)
auth_provider = proto.Field(proto.MESSAGE, number=1, message=AuthProvider,)
class IndexPrivateEndpoints(proto.Message):
r"""IndexPrivateEndpoints proto is used to provide paths for
users to send requests via private services access.
Attributes:
match_grpc_address (str):
Output only. The ip address used to send
match gRPC requests.
"""
match_grpc_address = proto.Field(proto.STRING, number=1,)
__all__ = tuple(sorted(__protobuf__.manifest)) | 0.716516 | 0.317188 |
import sys
sys.path.insert(1, '..')
import myImpl
import util
class SearchProblem:
"""
This class outlines the structure of a search problem, but doesn't implement
any of the methods (in object-oriented terminology: an abstract class).
You do not need to change anything in this class, ever.
"""
def getStartState(self):
"""
Returns the start state for the search problem.
"""
util.raiseNotDefined()
def isGoalState(self, state):
"""
state: Search state
Returns True if and only if the state is a valid goal state.
"""
util.raiseNotDefined()
def expand(self, state):
"""
state: Search state
For a given state, this should return a list of triples, (child,
action, stepCost), where 'child' is a child to the current
state, 'action' is the action required to get there, and 'stepCost' is
the incremental cost of expanding to that child.
"""
util.raiseNotDefined()
def getActions(self, state):
"""
state: Search state
For a given state, this should return a list of possible actions.
"""
util.raiseNotDefined()
def getActionCost(self, state, action, next_state):
"""
state: Search state
action: action taken at state.
next_state: next Search state after taking action.
For a given state, this should return the cost of the (s, a, s') transition.
"""
util.raiseNotDefined()
def getNextState(self, state, action):
"""
state: Search state
action: action taken at state
For a given state, this should return the next state after taking action from state.
"""
util.raiseNotDefined()
def getCostOfActionSequence(self, actions):
"""
actions: A list of actions to take
This method returns the total cost of a particular sequence of actions.
The sequence must be composed of legal moves.
"""
util.raiseNotDefined()
class myProblem:
def __init__(self, problem):
self.__problem = problem
def getStartState(self):
return self.__problem.getStartState()
def isGoalState(self, state):
return self.__problem.isGoalState(state)
def getChildren(self, state):
children = self.__problem.expand(state)
return [(child[0], child[2]) for child in children]
def tinyMazeSearch(problem):
"""
Returns a sequence of moves that solves tinyMaze. For any other maze, the
sequence of moves will be incorrect, so only use this for tinyMaze.
"""
from game import Directions
s = Directions.SOUTH
w = Directions.WEST
return [s, s, w, s, w, w, s, w]
def statesToActions(problem, states):
result = []
for i in range(len(states) - 1):
actions = problem.getActions(states[i])
for action in actions:
if problem.getNextState(states[i], action) == states[i + 1]:
result.append(action)
break
return result
def depthFirstSearch(problem):
"""
Search the deepest nodes in the search tree first.
Your search algorithm needs to return a list of actions that reaches the
goal. Make sure to implement a graph search algorithm.
To get started, you might want to try some of these simple commands to
understand the search problem that is being passed in:
print("Start:", problem.getStartState())
print("Is the start a goal?", problem.isGoalState(problem.getStartState()))
"""
return statesToActions(problem, myImpl.myDepthFirstSearch(myProblem(problem)))
def breadthFirstSearch(problem):
"""Search the shallowest nodes in the search tree first."""
return statesToActions(problem, myImpl.myBreadthFirstSearch(myProblem(problem)))
def nullHeuristic(state, problem=None):
"""
A heuristic function estimates the cost from the current state to the nearest
goal in the provided SearchProblem. This heuristic is trivial.
"""
return 0
def aStarSearch(problem, heuristic=nullHeuristic):
"""Search the node that has the lowest combined cost and heuristic first."""
def myHeuristic(state):
return heuristic(state, problem)
return statesToActions(problem, myImpl.myAStarSearch(myProblem(problem), myHeuristic))
# Abbreviations
bfs = breadthFirstSearch
dfs = depthFirstSearch
astar = aStarSearch | LAB1/search/search.py | import sys
sys.path.insert(1, '..')
import myImpl
import util
class SearchProblem:
"""
This class outlines the structure of a search problem, but doesn't implement
any of the methods (in object-oriented terminology: an abstract class).
You do not need to change anything in this class, ever.
"""
def getStartState(self):
"""
Returns the start state for the search problem.
"""
util.raiseNotDefined()
def isGoalState(self, state):
"""
state: Search state
Returns True if and only if the state is a valid goal state.
"""
util.raiseNotDefined()
def expand(self, state):
"""
state: Search state
For a given state, this should return a list of triples, (child,
action, stepCost), where 'child' is a child to the current
state, 'action' is the action required to get there, and 'stepCost' is
the incremental cost of expanding to that child.
"""
util.raiseNotDefined()
def getActions(self, state):
"""
state: Search state
For a given state, this should return a list of possible actions.
"""
util.raiseNotDefined()
def getActionCost(self, state, action, next_state):
"""
state: Search state
action: action taken at state.
next_state: next Search state after taking action.
For a given state, this should return the cost of the (s, a, s') transition.
"""
util.raiseNotDefined()
def getNextState(self, state, action):
"""
state: Search state
action: action taken at state
For a given state, this should return the next state after taking action from state.
"""
util.raiseNotDefined()
def getCostOfActionSequence(self, actions):
"""
actions: A list of actions to take
This method returns the total cost of a particular sequence of actions.
The sequence must be composed of legal moves.
"""
util.raiseNotDefined()
class myProblem:
def __init__(self, problem):
self.__problem = problem
def getStartState(self):
return self.__problem.getStartState()
def isGoalState(self, state):
return self.__problem.isGoalState(state)
def getChildren(self, state):
children = self.__problem.expand(state)
return [(child[0], child[2]) for child in children]
def tinyMazeSearch(problem):
"""
Returns a sequence of moves that solves tinyMaze. For any other maze, the
sequence of moves will be incorrect, so only use this for tinyMaze.
"""
from game import Directions
s = Directions.SOUTH
w = Directions.WEST
return [s, s, w, s, w, w, s, w]
def statesToActions(problem, states):
result = []
for i in range(len(states) - 1):
actions = problem.getActions(states[i])
for action in actions:
if problem.getNextState(states[i], action) == states[i + 1]:
result.append(action)
break
return result
def depthFirstSearch(problem):
"""
Search the deepest nodes in the search tree first.
Your search algorithm needs to return a list of actions that reaches the
goal. Make sure to implement a graph search algorithm.
To get started, you might want to try some of these simple commands to
understand the search problem that is being passed in:
print("Start:", problem.getStartState())
print("Is the start a goal?", problem.isGoalState(problem.getStartState()))
"""
return statesToActions(problem, myImpl.myDepthFirstSearch(myProblem(problem)))
def breadthFirstSearch(problem):
"""Search the shallowest nodes in the search tree first."""
return statesToActions(problem, myImpl.myBreadthFirstSearch(myProblem(problem)))
def nullHeuristic(state, problem=None):
"""
A heuristic function estimates the cost from the current state to the nearest
goal in the provided SearchProblem. This heuristic is trivial.
"""
return 0
def aStarSearch(problem, heuristic=nullHeuristic):
"""Search the node that has the lowest combined cost and heuristic first."""
def myHeuristic(state):
return heuristic(state, problem)
return statesToActions(problem, myImpl.myAStarSearch(myProblem(problem), myHeuristic))
# Abbreviations
bfs = breadthFirstSearch
dfs = depthFirstSearch
astar = aStarSearch | 0.656878 | 0.739869 |
import os
import random
import calendar
import datetime
import textwrap
import cgi
import urllib.parse
import operator
import contextlib
import functools
from py31compat.functools import lru_cache
import cherrypy
import importlib_resources
import jinja2.loaders
import pytz
import inflect
import pmxbot.core
import pmxbot.logging
import pmxbot.util
jenv = jinja2.Environment(loader=jinja2.loaders.PackageLoader('pmxbot.web'))
TIMEOUT = 10.0
colors = [
"06F", "900", "093", "F0C", "C30", "0C9", "666", "C90", "C36",
"F60", "639", "630", "966", "69C", "039", '7e1e9c', '15b01a', '0343df',
'ff81c0', '653700', 'e50000', '029386', 'f97306', 'c20078', '75bbfd']
random.shuffle(colors)
def get_context():
c = pmxbot.config
d = dict(
request=cherrypy.request,
name=c.bot_nickname,
config=c,
base=c.web_base,
logo=c.logo,
)
if 'web byline' in c:
d['byline'] = c['web byline']
return d
def make_anchor(line):
time, nick = line
return "%s.%s" % (str(time).replace(':', '.'), nick)
def pmon(month):
"""
P the month
>>> print(pmon('2012-08'))
August, 2012
"""
year, month = month.split('-')
return '{month_name}, {year}'.format(
month_name=calendar.month_name[int(month)],
year=year,
)
def pday(dayfmt):
"""
P the day
>>> print(pday('2012-08-24'))
Friday the 24th
"""
year, month, day = map(int, dayfmt.split('-'))
return '{day} the {number}'.format(
day=calendar.day_name[calendar.weekday(year, month, day)],
number=inflect.engine().ordinal(day),
)
class ChannelPage:
month_ordinal = dict(
(calendar.month_name[m_ord], m_ord)
for m_ord in range(1, 13)
)
@cherrypy.expose
def default(self, channel):
page = jenv.get_template('channel.html')
db = pmxbot.logging.Logger.store
context = get_context()
contents = db.get_channel_days(channel)
months = {}
for fn in sorted(contents, reverse=True):
mon_des, day = fn.rsplit('-', 1)
months.setdefault(pmon(mon_des), []).append((pday(fn), fn))
context['months'] = sorted(months.items(), key=self.by_date, reverse=True)
context['channel'] = channel
return page.render(**context).encode('utf-8')
@classmethod
def by_date(cls, month_item):
month_string, days = month_item
return cls.date_key(month_string)
@classmethod
def date_key(cls, month_string):
"""
Return a key suitable for sorting by month.
>>> k1 = ChannelPage.date_key('September, 2012')
>>> k2 = ChannelPage.date_key('August, 2013')
>>> k2 > k1
True
"""
month, year = month_string.split(',')
month_ord = cls.month_ordinal[month]
return year, month_ord
class DayPage:
@cherrypy.expose
def default(self, channel, day):
page = jenv.get_template('day.html')
db = pmxbot.logging.Logger.store
context = get_context()
day_logs = db.get_day_logs(channel, day)
data = [(t, n, make_anchor((t, n)), cgi.escape(m)) for (t, n, m) in day_logs]
usernames = [x[1] for x in data]
color_map = {}
clrs = colors[:]
for u in usernames:
if u not in color_map:
try:
color = clrs.pop(0)
except IndexError:
color = "000"
color_map[u] = color
context['color_map'] = color_map
context['history'] = data
context['channel'] = channel
context['pdate'] = "{pday} of {days}".format(
pday=pday(day),
days=pmon(day.rsplit('-', 1)[0]),
)
return page.render(**context).encode('utf-8')
class KarmaPage:
@cherrypy.expose
def default(self, term=""):
page = jenv.get_template('karma.html')
context = get_context()
karma = pmxbot.karma.Karma.store
term = term.strip()
if term:
context['lookup'] = (
self.karma_comma(karma.search(term))
or [('NO RESULTS FOUND', '')]
)
context['top100'] = self.karma_comma(karma.list(select=100))
context['bottom100'] = self.karma_comma(karma.list(select=-100))
return page.render(**context).encode('utf-8')
@staticmethod
def karma_comma(karma_results):
"""
(say that 5 times fast)
Take the results of a karma query (keys, value) and return the same
result with the keys joined by commas.
"""
return [
(', '.join(keys), value)
for keys, value in karma_results
]
class SearchPage:
@cherrypy.expose
def default(self, term=''):
page = jenv.get_template('search.html')
context = get_context()
db = pmxbot.logging.Logger.store
# a hack to enable the database to create anchors when building search
# results
db.make_anchor = make_anchor
if not term:
raise cherrypy.HTTPRedirect(cherrypy.request.base)
terms = term.strip().split()
results = sorted(db.search(*terms), key=lambda x: x[1], reverse=True)
context['search_results'] = results
context['num_results'] = len(results)
context['term'] = term
return page.render(**context).encode('utf-8')
class HelpPage:
@cherrypy.expose
def default(self):
page = jenv.get_template('help.html')
return page.render(**self.get_context()).encode('utf-8')
@staticmethod
@lru_cache()
def get_context():
context = get_context()
commands = []
contains = []
by_name = operator.attrgetter('name')
for handler in sorted(pmxbot.core.Handler._registry, key=by_name):
if type(handler) is pmxbot.core.CommandHandler:
commands.append(handler)
elif isinstance(handler, pmxbot.core.ContainsHandler):
contains.append(handler)
context['commands'] = commands
context['contains'] = contains
return context
class LegacyPage():
"""
Forwards legacy /day/{channel}/{date}#{time}.{nick} in local time to
the proper page at /day (in UTC).
"""
timezone = pytz.timezone('US/Pacific')
@cherrypy.expose
def default(self, channel, date_s):
"""
Return a web page that will get the fragment out and pass it to
us so we can parse it.
"""
return textwrap.dedent("""
<html>
<head>
<script type="text/javascript">
window.onload = function() {
fragment = parent.location.hash;
window.location.pathname=window.location.pathname.replace(
'legacy', 'legacy/forward') + "/" + window.location.hash.slice(1);
};
</script>
</head>
<body></body>
</html>
""").lstrip()
@cherrypy.expose
def forward(self, channel, date_s, fragment):
"""
Given an HREF in the legacy timezone, redirect to an href for UTC.
"""
time_s, sep, nick = fragment.rpartition('.')
time = datetime.datetime.strptime(time_s, '%H.%M.%S')
date = datetime.datetime.strptime(date_s, '%Y-%m-%d')
dt = datetime.datetime.combine(date, time.time())
loc_dt = self.timezone.localize(dt)
utc_dt = loc_dt.astimezone(pytz.utc)
url_tmpl = '/day/{channel}/{target_date}#{target_time}.{nick}'
url = url_tmpl.format(
target_date=utc_dt.date().isoformat(),
target_time=utc_dt.time().strftime('%H.%M.%S'),
**locals()
)
raise cherrypy.HTTPRedirect(url, 301)
class PmxbotPages:
channel = ChannelPage()
day = DayPage()
karma = KarmaPage()
search = SearchPage()
help = HelpPage()
legacy = LegacyPage()
@cherrypy.expose
def default(self):
page = jenv.get_template('index.html')
db = pmxbot.logging.Logger.store
context = get_context()
chans = []
for chan in sorted(db.list_channels(), key=str.lower):
last = db.last_message(chan)
summary = [
chan,
last['datetime'].strftime("%Y-%m-%d %H:%M"),
last['datetime'].date(),
last['datetime'].time(),
last['nick'],
cgi.escape(last['message'][:75]),
make_anchor([last['datetime'].time(), last['nick']]),
]
chans.append(summary)
context['chans'] = chans
return page.render(**context).encode('utf-8')
def patch_compat(config):
"""
Support older config values.
"""
if 'web_host' in config:
config['host'] = config.pop('web_host')
if 'web_port' in config:
config['port'] = config.pop('web_port')
def _setup_logging():
cherrypy.log.error_log.propagate = False
cherrypy.log.access_log.propagate = False
pmxbot.core._setup_logging()
def init_config(config={}):
config.setdefault('web_base', '/')
config.setdefault('host', '::0')
config.setdefault('port', int(os.environ.get('PORT', 8080)))
config = pmxbot.core.init_config(config)
if not config.web_base.startswith('/'):
config['web_base'] = '/' + config.web_base
if config.web_base.endswith('/'):
config['web_base'] = config.web_base.rstrip('/')
if 'logo' not in config:
web_base = config.web_base or '/'
config['logo'] = urllib.parse.urljoin(web_base, 'pmxbot.png')
return config
def resolve_file(mgr, filename):
"""
Given a file manager (ExitStack), load the filename
and set the exit stack to clean up. See
https://importlib-resources.readthedocs.io/en/latest/migration.html#pkg-resources-resource-filename
for more details.
"""
path = importlib_resources.path('pmxbot.web.templates', filename)
return str(mgr.enter_context(path))
def startup(config):
patch_compat(config)
config = init_config(config)
_setup_logging()
pmxbot.core._load_library_extensions()
file_manager = contextlib.ExitStack()
static = functools.partial(resolve_file, file_manager)
# Cherrypy configuration here
app_conf = {
'global': {
'server.socket_port': config.port,
'server.socket_host': config.host,
'server.environment': 'production',
'engine.autoreload.on': False,
# 'tools.encode.on': True,
'tools.encode.encoding': 'utf-8',
},
'/pmxbot.png': {
'tools.staticfile.on': True,
'tools.staticfile.filename': static('pmxbot.png'),
},
'/Autolinker.js': {
'tools.staticfile.on': True,
'tools.staticfile.filename': static('Autolinker.js'),
},
}
with file_manager:
cherrypy.quickstart(PmxbotPages(), config.web_base, config=app_conf)
def run():
startup(pmxbot.core.get_args().config) | pmxbot/web/viewer.py | import os
import random
import calendar
import datetime
import textwrap
import cgi
import urllib.parse
import operator
import contextlib
import functools
from py31compat.functools import lru_cache
import cherrypy
import importlib_resources
import jinja2.loaders
import pytz
import inflect
import pmxbot.core
import pmxbot.logging
import pmxbot.util
jenv = jinja2.Environment(loader=jinja2.loaders.PackageLoader('pmxbot.web'))
TIMEOUT = 10.0
colors = [
"06F", "900", "093", "F0C", "C30", "0C9", "666", "C90", "C36",
"F60", "639", "630", "966", "69C", "039", '7e1e9c', '15b01a', '0343df',
'ff81c0', '653700', 'e50000', '029386', 'f97306', 'c20078', '75bbfd']
random.shuffle(colors)
def get_context():
c = pmxbot.config
d = dict(
request=cherrypy.request,
name=c.bot_nickname,
config=c,
base=c.web_base,
logo=c.logo,
)
if 'web byline' in c:
d['byline'] = c['web byline']
return d
def make_anchor(line):
time, nick = line
return "%s.%s" % (str(time).replace(':', '.'), nick)
def pmon(month):
"""
P the month
>>> print(pmon('2012-08'))
August, 2012
"""
year, month = month.split('-')
return '{month_name}, {year}'.format(
month_name=calendar.month_name[int(month)],
year=year,
)
def pday(dayfmt):
"""
P the day
>>> print(pday('2012-08-24'))
Friday the 24th
"""
year, month, day = map(int, dayfmt.split('-'))
return '{day} the {number}'.format(
day=calendar.day_name[calendar.weekday(year, month, day)],
number=inflect.engine().ordinal(day),
)
class ChannelPage:
month_ordinal = dict(
(calendar.month_name[m_ord], m_ord)
for m_ord in range(1, 13)
)
@cherrypy.expose
def default(self, channel):
page = jenv.get_template('channel.html')
db = pmxbot.logging.Logger.store
context = get_context()
contents = db.get_channel_days(channel)
months = {}
for fn in sorted(contents, reverse=True):
mon_des, day = fn.rsplit('-', 1)
months.setdefault(pmon(mon_des), []).append((pday(fn), fn))
context['months'] = sorted(months.items(), key=self.by_date, reverse=True)
context['channel'] = channel
return page.render(**context).encode('utf-8')
@classmethod
def by_date(cls, month_item):
month_string, days = month_item
return cls.date_key(month_string)
@classmethod
def date_key(cls, month_string):
"""
Return a key suitable for sorting by month.
>>> k1 = ChannelPage.date_key('September, 2012')
>>> k2 = ChannelPage.date_key('August, 2013')
>>> k2 > k1
True
"""
month, year = month_string.split(',')
month_ord = cls.month_ordinal[month]
return year, month_ord
class DayPage:
@cherrypy.expose
def default(self, channel, day):
page = jenv.get_template('day.html')
db = pmxbot.logging.Logger.store
context = get_context()
day_logs = db.get_day_logs(channel, day)
data = [(t, n, make_anchor((t, n)), cgi.escape(m)) for (t, n, m) in day_logs]
usernames = [x[1] for x in data]
color_map = {}
clrs = colors[:]
for u in usernames:
if u not in color_map:
try:
color = clrs.pop(0)
except IndexError:
color = "000"
color_map[u] = color
context['color_map'] = color_map
context['history'] = data
context['channel'] = channel
context['pdate'] = "{pday} of {days}".format(
pday=pday(day),
days=pmon(day.rsplit('-', 1)[0]),
)
return page.render(**context).encode('utf-8')
class KarmaPage:
@cherrypy.expose
def default(self, term=""):
page = jenv.get_template('karma.html')
context = get_context()
karma = pmxbot.karma.Karma.store
term = term.strip()
if term:
context['lookup'] = (
self.karma_comma(karma.search(term))
or [('NO RESULTS FOUND', '')]
)
context['top100'] = self.karma_comma(karma.list(select=100))
context['bottom100'] = self.karma_comma(karma.list(select=-100))
return page.render(**context).encode('utf-8')
@staticmethod
def karma_comma(karma_results):
"""
(say that 5 times fast)
Take the results of a karma query (keys, value) and return the same
result with the keys joined by commas.
"""
return [
(', '.join(keys), value)
for keys, value in karma_results
]
class SearchPage:
@cherrypy.expose
def default(self, term=''):
page = jenv.get_template('search.html')
context = get_context()
db = pmxbot.logging.Logger.store
# a hack to enable the database to create anchors when building search
# results
db.make_anchor = make_anchor
if not term:
raise cherrypy.HTTPRedirect(cherrypy.request.base)
terms = term.strip().split()
results = sorted(db.search(*terms), key=lambda x: x[1], reverse=True)
context['search_results'] = results
context['num_results'] = len(results)
context['term'] = term
return page.render(**context).encode('utf-8')
class HelpPage:
@cherrypy.expose
def default(self):
page = jenv.get_template('help.html')
return page.render(**self.get_context()).encode('utf-8')
@staticmethod
@lru_cache()
def get_context():
context = get_context()
commands = []
contains = []
by_name = operator.attrgetter('name')
for handler in sorted(pmxbot.core.Handler._registry, key=by_name):
if type(handler) is pmxbot.core.CommandHandler:
commands.append(handler)
elif isinstance(handler, pmxbot.core.ContainsHandler):
contains.append(handler)
context['commands'] = commands
context['contains'] = contains
return context
class LegacyPage():
"""
Forwards legacy /day/{channel}/{date}#{time}.{nick} in local time to
the proper page at /day (in UTC).
"""
timezone = pytz.timezone('US/Pacific')
@cherrypy.expose
def default(self, channel, date_s):
"""
Return a web page that will get the fragment out and pass it to
us so we can parse it.
"""
return textwrap.dedent("""
<html>
<head>
<script type="text/javascript">
window.onload = function() {
fragment = parent.location.hash;
window.location.pathname=window.location.pathname.replace(
'legacy', 'legacy/forward') + "/" + window.location.hash.slice(1);
};
</script>
</head>
<body></body>
</html>
""").lstrip()
@cherrypy.expose
def forward(self, channel, date_s, fragment):
"""
Given an HREF in the legacy timezone, redirect to an href for UTC.
"""
time_s, sep, nick = fragment.rpartition('.')
time = datetime.datetime.strptime(time_s, '%H.%M.%S')
date = datetime.datetime.strptime(date_s, '%Y-%m-%d')
dt = datetime.datetime.combine(date, time.time())
loc_dt = self.timezone.localize(dt)
utc_dt = loc_dt.astimezone(pytz.utc)
url_tmpl = '/day/{channel}/{target_date}#{target_time}.{nick}'
url = url_tmpl.format(
target_date=utc_dt.date().isoformat(),
target_time=utc_dt.time().strftime('%H.%M.%S'),
**locals()
)
raise cherrypy.HTTPRedirect(url, 301)
class PmxbotPages:
channel = ChannelPage()
day = DayPage()
karma = KarmaPage()
search = SearchPage()
help = HelpPage()
legacy = LegacyPage()
@cherrypy.expose
def default(self):
page = jenv.get_template('index.html')
db = pmxbot.logging.Logger.store
context = get_context()
chans = []
for chan in sorted(db.list_channels(), key=str.lower):
last = db.last_message(chan)
summary = [
chan,
last['datetime'].strftime("%Y-%m-%d %H:%M"),
last['datetime'].date(),
last['datetime'].time(),
last['nick'],
cgi.escape(last['message'][:75]),
make_anchor([last['datetime'].time(), last['nick']]),
]
chans.append(summary)
context['chans'] = chans
return page.render(**context).encode('utf-8')
def patch_compat(config):
"""
Support older config values.
"""
if 'web_host' in config:
config['host'] = config.pop('web_host')
if 'web_port' in config:
config['port'] = config.pop('web_port')
def _setup_logging():
cherrypy.log.error_log.propagate = False
cherrypy.log.access_log.propagate = False
pmxbot.core._setup_logging()
def init_config(config={}):
config.setdefault('web_base', '/')
config.setdefault('host', '::0')
config.setdefault('port', int(os.environ.get('PORT', 8080)))
config = pmxbot.core.init_config(config)
if not config.web_base.startswith('/'):
config['web_base'] = '/' + config.web_base
if config.web_base.endswith('/'):
config['web_base'] = config.web_base.rstrip('/')
if 'logo' not in config:
web_base = config.web_base or '/'
config['logo'] = urllib.parse.urljoin(web_base, 'pmxbot.png')
return config
def resolve_file(mgr, filename):
"""
Given a file manager (ExitStack), load the filename
and set the exit stack to clean up. See
https://importlib-resources.readthedocs.io/en/latest/migration.html#pkg-resources-resource-filename
for more details.
"""
path = importlib_resources.path('pmxbot.web.templates', filename)
return str(mgr.enter_context(path))
def startup(config):
patch_compat(config)
config = init_config(config)
_setup_logging()
pmxbot.core._load_library_extensions()
file_manager = contextlib.ExitStack()
static = functools.partial(resolve_file, file_manager)
# Cherrypy configuration here
app_conf = {
'global': {
'server.socket_port': config.port,
'server.socket_host': config.host,
'server.environment': 'production',
'engine.autoreload.on': False,
# 'tools.encode.on': True,
'tools.encode.encoding': 'utf-8',
},
'/pmxbot.png': {
'tools.staticfile.on': True,
'tools.staticfile.filename': static('pmxbot.png'),
},
'/Autolinker.js': {
'tools.staticfile.on': True,
'tools.staticfile.filename': static('Autolinker.js'),
},
}
with file_manager:
cherrypy.quickstart(PmxbotPages(), config.web_base, config=app_conf)
def run():
startup(pmxbot.core.get_args().config) | 0.460532 | 0.151655 |
"""LicenseManagerAgentCharm."""
import logging
from pathlib import Path
from ops.charm import CharmBase
from ops.framework import StoredState
from ops.main import main
from ops.model import ActiveStatus, BlockedStatus
from interface_prolog_epilog import PrologEpilog
from license_manager_agent_ops import LicenseManagerAgentOps
from charms.fluentbit.v0.fluentbit import FluentbitClient
logger = logging.getLogger()
class LicenseManagerAgentCharm(CharmBase):
"""Facilitate License Manager Agent lifecycle."""
_stored = StoredState()
def __init__(self, *args):
"""Initialize and observe."""
super().__init__(*args)
self._stored.set_default(
installed=False,
init_started=False,
)
self._prolog_epilog = PrologEpilog(self, 'prolog-epilog')
self._license_manager_agent_ops = LicenseManagerAgentOps(self)
self._fluentbit = FluentbitClient(self, "fluentbit")
event_handler_bindings = {
self.on.install: self._on_install,
self.on.start: self._on_start,
self.on.config_changed: self._on_config_changed,
self.on.remove: self._on_remove,
self.on.upgrade_to_latest_action: self._upgrade_to_latest,
self.on["fluentbit"].relation_created: self._on_fluentbit_relation_created,
}
for event, handler in event_handler_bindings.items():
self.framework.observe(event, handler)
def _on_install(self, event):
"""Install license-manager-agent."""
try:
self._license_manager_agent_ops.install()
except Exception as e:
logger.error(f"Error installing agent: {e}")
self.unit.status = BlockedStatus("Installation error")
event.defer()
raise
self.unit.set_workload_version(Path("version").read_text().strip())
# Log and set status
logger.debug("license-manager agent installed")
self.unit.status = ActiveStatus("license-manager-agent installed")
self._stored.installed = True
def _on_start(self, event):
"""Start the license-manager-agent service."""
if self._stored.installed:
self._license_manager_agent_ops.license_manager_agent_systemctl("start")
self.unit.status = ActiveStatus("license-manager-agent started")
self._stored.init_started = True
def _on_config_changed(self, event):
"""Configure license-manager-agent with charm config."""
# Write out the /etc/default/license-manage-agent config
self._license_manager_agent_ops.configure_etc_default()
self._license_manager_agent_ops.setup_systemd_service()
# Make sure the start hook has ran before we are restarting the service
if self._stored.init_started:
self._license_manager_agent_ops.restart_license_manager_agent()
def _on_remove(self, event):
"""Remove directories and files created by license-manager-agent charm."""
self._license_manager_agent_ops.license_manager_agent_systemctl("stop")
self._license_manager_agent_ops.remove_license_manager_agent()
def _upgrade_to_latest(self, event):
version = event.params["version"]
self._license_manager_agent_ops.upgrade(version)
def _on_fluentbit_relation_created(self, event):
"""Set up Fluentbit log forwarding."""
cfg = list()
cfg.extend(self._license_manager_agent_ops.fluentbit_config_lm_log)
self._fluentbit.configure(cfg)
@property
def prolog_path(self) -> str:
"""Return the path to the prolog script."""
return self._license_manager_agent_ops.PROLOG_PATH.as_posix()
@property
def epilog_path(self) -> str:
"""Return the path to the epilog script."""
return self._license_manager_agent_ops.EPILOG_PATH.as_posix()
if __name__ == "__main__":
main(LicenseManagerAgentCharm) | src/charm.py | """LicenseManagerAgentCharm."""
import logging
from pathlib import Path
from ops.charm import CharmBase
from ops.framework import StoredState
from ops.main import main
from ops.model import ActiveStatus, BlockedStatus
from interface_prolog_epilog import PrologEpilog
from license_manager_agent_ops import LicenseManagerAgentOps
from charms.fluentbit.v0.fluentbit import FluentbitClient
logger = logging.getLogger()
class LicenseManagerAgentCharm(CharmBase):
"""Facilitate License Manager Agent lifecycle."""
_stored = StoredState()
def __init__(self, *args):
"""Initialize and observe."""
super().__init__(*args)
self._stored.set_default(
installed=False,
init_started=False,
)
self._prolog_epilog = PrologEpilog(self, 'prolog-epilog')
self._license_manager_agent_ops = LicenseManagerAgentOps(self)
self._fluentbit = FluentbitClient(self, "fluentbit")
event_handler_bindings = {
self.on.install: self._on_install,
self.on.start: self._on_start,
self.on.config_changed: self._on_config_changed,
self.on.remove: self._on_remove,
self.on.upgrade_to_latest_action: self._upgrade_to_latest,
self.on["fluentbit"].relation_created: self._on_fluentbit_relation_created,
}
for event, handler in event_handler_bindings.items():
self.framework.observe(event, handler)
def _on_install(self, event):
"""Install license-manager-agent."""
try:
self._license_manager_agent_ops.install()
except Exception as e:
logger.error(f"Error installing agent: {e}")
self.unit.status = BlockedStatus("Installation error")
event.defer()
raise
self.unit.set_workload_version(Path("version").read_text().strip())
# Log and set status
logger.debug("license-manager agent installed")
self.unit.status = ActiveStatus("license-manager-agent installed")
self._stored.installed = True
def _on_start(self, event):
"""Start the license-manager-agent service."""
if self._stored.installed:
self._license_manager_agent_ops.license_manager_agent_systemctl("start")
self.unit.status = ActiveStatus("license-manager-agent started")
self._stored.init_started = True
def _on_config_changed(self, event):
"""Configure license-manager-agent with charm config."""
# Write out the /etc/default/license-manage-agent config
self._license_manager_agent_ops.configure_etc_default()
self._license_manager_agent_ops.setup_systemd_service()
# Make sure the start hook has ran before we are restarting the service
if self._stored.init_started:
self._license_manager_agent_ops.restart_license_manager_agent()
def _on_remove(self, event):
"""Remove directories and files created by license-manager-agent charm."""
self._license_manager_agent_ops.license_manager_agent_systemctl("stop")
self._license_manager_agent_ops.remove_license_manager_agent()
def _upgrade_to_latest(self, event):
version = event.params["version"]
self._license_manager_agent_ops.upgrade(version)
def _on_fluentbit_relation_created(self, event):
"""Set up Fluentbit log forwarding."""
cfg = list()
cfg.extend(self._license_manager_agent_ops.fluentbit_config_lm_log)
self._fluentbit.configure(cfg)
@property
def prolog_path(self) -> str:
"""Return the path to the prolog script."""
return self._license_manager_agent_ops.PROLOG_PATH.as_posix()
@property
def epilog_path(self) -> str:
"""Return the path to the epilog script."""
return self._license_manager_agent_ops.EPILOG_PATH.as_posix()
if __name__ == "__main__":
main(LicenseManagerAgentCharm) | 0.788909 | 0.075007 |
import os
import torch
import utils
import random
import numpy as np
from transformers import BertTokenizer
class DataLoader(object):
def __init__(self, data_dir, bert_class, params, token_pad_idx=0, tag_pad_idx=-1):
self.data_dir = data_dir
self.batch_size = params.batch_size
self.max_len = params.max_len
self.device = params.device
self.seed = params.seed
self.token_pad_idx = token_pad_idx
self.tag_pad_idx = tag_pad_idx
tags = self.load_tags()
self.tag2idx = {tag: idx for idx, tag in enumerate(tags)}
self.idx2tag = {idx: tag for idx, tag in enumerate(tags)}
params.tag2idx = self.tag2idx
params.idx2tag = self.idx2tag
self.tokenizer = BertTokenizer.from_pretrained(bert_class, do_lower_case=False)
def load_tags(self):
tags = []
file_path = os.path.join(self.data_dir, 'tags.txt')
with open(file_path, 'r') as file:
for tag in file:
tags.append(tag.strip())
return tags
def load_sentences_tags(self, sentences_file, tags_file, d):
"""Loads sentences and tags from their corresponding files.
Maps tokens and tags to their indices and stores them in the provided dict d.
"""
sentences = []
tags = []
with open(sentences_file, 'r') as file:
for line in file:
# replace each token by its index
tokens = line.strip().split(' ')
subwords = list(map(self.tokenizer.tokenize, tokens))
subword_lengths = list(map(len, subwords))
subwords = ['CLS'] + [item for indices in subwords for item in indices]
token_start_idxs = 1 + np.cumsum([0] + subword_lengths[:-1])
sentences.append((self.tokenizer.convert_tokens_to_ids(subwords),token_start_idxs))
if tags_file != None:
with open(tags_file, 'r') as file:
for line in file:
# replace each tag by its index
tag_seq = [self.tag2idx.get(tag) for tag in line.strip().split(' ')]
tags.append(tag_seq)
# checks to ensure there is a tag for each token
assert len(sentences) == len(tags)
for i in range(len(sentences)):
assert len(tags[i]) == len(sentences[i][-1])
d['tags'] = tags
# storing sentences and tags in dict d
d['data'] = sentences
d['size'] = len(sentences)
def load_data(self, data_type):
"""Loads the data for each type in types from data_dir.
Args:
data_type: (str) has one of 'train', 'val', 'test' depending on which data is required.
Returns:
data: (dict) contains the data with tags for each type in types.
"""
data = {}
if data_type in ['train', 'val', 'test']:
print('Loading ' + data_type)
sentences_file = os.path.join(self.data_dir, data_type, 'sentences.txt')
tags_path = os.path.join(self.data_dir, data_type, 'tags.txt')
self.load_sentences_tags(sentences_file, tags_path, data)
elif data_type == 'interactive':
sentences_file = os.path.join(self.data_dir, data_type, 'sentences.txt')
self.load_sentences_tags(sentences_file, tags_file=None, d=data)
else:
raise ValueError("data type not in ['train', 'val', 'test']")
return data
def data_iterator(self, data, shuffle=False):
"""Returns a generator that yields batches data with tags.
Args:
data: (dict) contains data which has keys 'data', 'tags' and 'size'
shuffle: (bool) whether the data should be shuffled
Yields:
batch_data: (tensor) shape: (batch_size, max_len)
batch_tags: (tensor) shape: (batch_size, max_len)
"""
# make a list that decides the order in which we go over the data- this avoids explicit shuffling of data
order = list(range(data['size']))
if shuffle:
random.seed(self.seed)
random.shuffle(order)
interMode = False if 'tags' in data else True
if data['size'] % self.batch_size == 0:
BATCH_NUM = data['size']//self.batch_size
else:
BATCH_NUM = data['size']//self.batch_size + 1
# one pass over data
for i in range(BATCH_NUM):
# fetch sentences and tags
if i * self.batch_size < data['size'] < (i+1) * self.batch_size:
sentences = [data['data'][idx] for idx in order[i*self.batch_size:]]
if not interMode:
tags = [data['tags'][idx] for idx in order[i*self.batch_size:]]
else:
sentences = [data['data'][idx] for idx in order[i*self.batch_size:(i+1)*self.batch_size]]
if not interMode:
tags = [data['tags'][idx] for idx in order[i*self.batch_size:(i+1)*self.batch_size]]
# batch length
batch_len = len(sentences)
# compute length of longest sentence in batch
batch_max_subwords_len = max([len(s[0]) for s in sentences])
max_subwords_len = min(batch_max_subwords_len, self.max_len)
max_token_len = 0
# prepare a numpy array with the data, initialising the data with pad_idx
batch_data = self.token_pad_idx * np.ones((batch_len, max_subwords_len))
batch_token_starts = []
# copy the data to the numpy array
for j in range(batch_len):
cur_subwords_len = len(sentences[j][0])
if cur_subwords_len <= max_subwords_len:
batch_data[j][:cur_subwords_len] = sentences[j][0]
else:
batch_data[j] = sentences[j][0][:max_subwords_len]
token_start_idx = sentences[j][-1]
token_starts = np.zeros(max_subwords_len)
token_starts[[idx for idx in token_start_idx if idx < max_subwords_len]] = 1
batch_token_starts.append(token_starts)
max_token_len = max(int(sum(token_starts)), max_token_len)
if not interMode:
batch_tags = self.tag_pad_idx * np.ones((batch_len, max_token_len))
for j in range(batch_len):
cur_tags_len = len(tags[j])
if cur_tags_len <= max_token_len:
batch_tags[j][:cur_tags_len] = tags[j]
else:
batch_tags[j] = tags[j][:max_token_len]
# since all data are indices, we convert them to torch LongTensors
batch_data = torch.tensor(batch_data, dtype=torch.long)
batch_token_starts = torch.tensor(batch_token_starts, dtype=torch.long)
if not interMode:
batch_tags = torch.tensor(batch_tags, dtype=torch.long)
# shift tensors to GPU if available
batch_data, batch_token_starts = batch_data.to(self.device), batch_token_starts.to(self.device)
if not interMode:
batch_tags = batch_tags.to(self.device)
yield batch_data, batch_token_starts, batch_tags
else:
yield batch_data, batch_token_starts | data_loader.py | import os
import torch
import utils
import random
import numpy as np
from transformers import BertTokenizer
class DataLoader(object):
def __init__(self, data_dir, bert_class, params, token_pad_idx=0, tag_pad_idx=-1):
self.data_dir = data_dir
self.batch_size = params.batch_size
self.max_len = params.max_len
self.device = params.device
self.seed = params.seed
self.token_pad_idx = token_pad_idx
self.tag_pad_idx = tag_pad_idx
tags = self.load_tags()
self.tag2idx = {tag: idx for idx, tag in enumerate(tags)}
self.idx2tag = {idx: tag for idx, tag in enumerate(tags)}
params.tag2idx = self.tag2idx
params.idx2tag = self.idx2tag
self.tokenizer = BertTokenizer.from_pretrained(bert_class, do_lower_case=False)
def load_tags(self):
tags = []
file_path = os.path.join(self.data_dir, 'tags.txt')
with open(file_path, 'r') as file:
for tag in file:
tags.append(tag.strip())
return tags
def load_sentences_tags(self, sentences_file, tags_file, d):
"""Loads sentences and tags from their corresponding files.
Maps tokens and tags to their indices and stores them in the provided dict d.
"""
sentences = []
tags = []
with open(sentences_file, 'r') as file:
for line in file:
# replace each token by its index
tokens = line.strip().split(' ')
subwords = list(map(self.tokenizer.tokenize, tokens))
subword_lengths = list(map(len, subwords))
subwords = ['CLS'] + [item for indices in subwords for item in indices]
token_start_idxs = 1 + np.cumsum([0] + subword_lengths[:-1])
sentences.append((self.tokenizer.convert_tokens_to_ids(subwords),token_start_idxs))
if tags_file != None:
with open(tags_file, 'r') as file:
for line in file:
# replace each tag by its index
tag_seq = [self.tag2idx.get(tag) for tag in line.strip().split(' ')]
tags.append(tag_seq)
# checks to ensure there is a tag for each token
assert len(sentences) == len(tags)
for i in range(len(sentences)):
assert len(tags[i]) == len(sentences[i][-1])
d['tags'] = tags
# storing sentences and tags in dict d
d['data'] = sentences
d['size'] = len(sentences)
def load_data(self, data_type):
"""Loads the data for each type in types from data_dir.
Args:
data_type: (str) has one of 'train', 'val', 'test' depending on which data is required.
Returns:
data: (dict) contains the data with tags for each type in types.
"""
data = {}
if data_type in ['train', 'val', 'test']:
print('Loading ' + data_type)
sentences_file = os.path.join(self.data_dir, data_type, 'sentences.txt')
tags_path = os.path.join(self.data_dir, data_type, 'tags.txt')
self.load_sentences_tags(sentences_file, tags_path, data)
elif data_type == 'interactive':
sentences_file = os.path.join(self.data_dir, data_type, 'sentences.txt')
self.load_sentences_tags(sentences_file, tags_file=None, d=data)
else:
raise ValueError("data type not in ['train', 'val', 'test']")
return data
def data_iterator(self, data, shuffle=False):
"""Returns a generator that yields batches data with tags.
Args:
data: (dict) contains data which has keys 'data', 'tags' and 'size'
shuffle: (bool) whether the data should be shuffled
Yields:
batch_data: (tensor) shape: (batch_size, max_len)
batch_tags: (tensor) shape: (batch_size, max_len)
"""
# make a list that decides the order in which we go over the data- this avoids explicit shuffling of data
order = list(range(data['size']))
if shuffle:
random.seed(self.seed)
random.shuffle(order)
interMode = False if 'tags' in data else True
if data['size'] % self.batch_size == 0:
BATCH_NUM = data['size']//self.batch_size
else:
BATCH_NUM = data['size']//self.batch_size + 1
# one pass over data
for i in range(BATCH_NUM):
# fetch sentences and tags
if i * self.batch_size < data['size'] < (i+1) * self.batch_size:
sentences = [data['data'][idx] for idx in order[i*self.batch_size:]]
if not interMode:
tags = [data['tags'][idx] for idx in order[i*self.batch_size:]]
else:
sentences = [data['data'][idx] for idx in order[i*self.batch_size:(i+1)*self.batch_size]]
if not interMode:
tags = [data['tags'][idx] for idx in order[i*self.batch_size:(i+1)*self.batch_size]]
# batch length
batch_len = len(sentences)
# compute length of longest sentence in batch
batch_max_subwords_len = max([len(s[0]) for s in sentences])
max_subwords_len = min(batch_max_subwords_len, self.max_len)
max_token_len = 0
# prepare a numpy array with the data, initialising the data with pad_idx
batch_data = self.token_pad_idx * np.ones((batch_len, max_subwords_len))
batch_token_starts = []
# copy the data to the numpy array
for j in range(batch_len):
cur_subwords_len = len(sentences[j][0])
if cur_subwords_len <= max_subwords_len:
batch_data[j][:cur_subwords_len] = sentences[j][0]
else:
batch_data[j] = sentences[j][0][:max_subwords_len]
token_start_idx = sentences[j][-1]
token_starts = np.zeros(max_subwords_len)
token_starts[[idx for idx in token_start_idx if idx < max_subwords_len]] = 1
batch_token_starts.append(token_starts)
max_token_len = max(int(sum(token_starts)), max_token_len)
if not interMode:
batch_tags = self.tag_pad_idx * np.ones((batch_len, max_token_len))
for j in range(batch_len):
cur_tags_len = len(tags[j])
if cur_tags_len <= max_token_len:
batch_tags[j][:cur_tags_len] = tags[j]
else:
batch_tags[j] = tags[j][:max_token_len]
# since all data are indices, we convert them to torch LongTensors
batch_data = torch.tensor(batch_data, dtype=torch.long)
batch_token_starts = torch.tensor(batch_token_starts, dtype=torch.long)
if not interMode:
batch_tags = torch.tensor(batch_tags, dtype=torch.long)
# shift tensors to GPU if available
batch_data, batch_token_starts = batch_data.to(self.device), batch_token_starts.to(self.device)
if not interMode:
batch_tags = batch_tags.to(self.device)
yield batch_data, batch_token_starts, batch_tags
else:
yield batch_data, batch_token_starts | 0.50415 | 0.363364 |
import json
import struct
from collections import namedtuple
from OrderAPI.pyT4.pyT4 import T4
ACCOUNT_TYPE = {
'S': '股票',
'F': '期貨',
'H': '港股'
}
class Account(object):
def __init__(self, acc):
"""
S9A95 - 9809315 - 楊伯謙
FF002000 - 9114728 - 楊伯謙
"""
self.type = acc.split('-')[0].strip()[0]
self.branch = acc.split('-')[0].strip()[1:]
self.account = acc.split('-')[1].strip()
self.name = acc.split('-')[2].strip()
def __str__(self):
return '{}{}{}{}'.format(self.name, self.type, self.branch, self.account)
class OrderAPI(object):
@staticmethod
def make_stock_orders(stock, qty, price):
res = {
'code_id': stock,
'price': str(price),
'price_type': ' ',
'qty': str(abs(qty)),
'ord_type': '00',
'bs': ' '
}
bs = ' '
if qty > 0:
bs = 'B'
elif qty < 0:
bs = 'S'
res['bs'] = bs
return res
@staticmethod
def make_future_orders(future_id, qty, price):
res = {
'code_id': future_id,
'price': str(price),
'price_type': 'LMT',
'ord_type': 'ROD',
'oct_type': ' ',
'bs': ' ',
'qty': str(abs(qty))
}
bs = ' '
if qty > 0:
bs = 'B'
elif qty < 0:
bs = 'S'
res['bs'] = bs
return res
@staticmethod
def accounts():
accounts_raw = T4.show_list2()
accounts_raw = [acc for acc in accounts_raw.split('\n') if len(acc)]
for acc in accounts_raw:
yield Account(acc)
def __init__(self, config_file='OrderAPI.json'):
self._init_t4(config_file)
self._init_ca()
def _init_ca(self):
self.accounts = {'S': [], 'F': [], 'H': []}
for acc in OrderAPI.accounts():
for ekey in self.UserInfo['CA']:
T4.add_acc_ca(acc.branch, acc.account, ekey['ID'], ekey['eKey'], ekey['eKeyPassword'])
self.accounts[acc.type].append(acc)
def _init_t4(self, config_file):
with open(config_file) as fd_json:
self.UserInfo = json.load(fd_json)
msg = T4.init_t4(self.UserInfo['UserId'], self.UserInfo['Password'], '')
self._status = msg
T4.do_register(1)
@property
def status(self):
return self._status
@property
def server_ip(self):
ip_port = T4.show_ip()
ip = ip_port.split('\n')[0].split(':')[1].strip()
port = ip_port.split('\n')[1].split(':')[1].strip()
return '{}:{}'.format(ip, port)
@staticmethod
def placing_order(acc, dt_orders):
if acc.type == 'S':
return OrderAPI.placing_stock_order(acc, dt_orders)
elif acc.type == 'F':
return OrderAPI.placing_future_order(acc, dt_orders)
@staticmethod
def placing_stock_order(acc, dt_orders):
order_args = list()
order_args.append(dt_orders['bs'])
order_args.append(acc.branch)
order_args.append(acc.account)
order_args.append(dt_orders['code_id'])
order_args.append(dt_orders['ord_type'])
order_args.append(dt_orders['price'])
order_args.append(dt_orders['qty'])
order_args.append(dt_orders['price_type'])
return T4.stock_order(*order_args)
@staticmethod
def placing_future_order(acc, dt_orders):
order_args = list()
order_args.append(dt_orders['bs'])
order_args.append(acc.branch)
order_args.append(acc.account)
order_args.append(dt_orders['code_id'])
order_args.append(dt_orders['price'])
order_args.append(dt_orders['qty'])
order_args.append(dt_orders['price_type'])
order_args.append(dt_orders['ord_type'])
order_args.append(dt_orders['oct_type'])
return T4.future_order(*order_args)
@staticmethod
def placing_cancel_order(dt_cancel):
lst_cancel_items = list()
if dt_cancel['market_id'] == 'S':
lst_cancel_items.append(dt_cancel['bs'])
lst_cancel_items.append(dt_cancel['branch'])
lst_cancel_items.append(dt_cancel['account'])
lst_cancel_items.append(dt_cancel['code_id'])
lst_cancel_items.append(dt_cancel['ord_type'])
lst_cancel_items.append(dt_cancel['ord_seq'])
lst_cancel_items.append(dt_cancel['ord_no'])
lst_cancel_items.append(dt_cancel['pre_order'])
return T4.stock_cancel(*lst_cancel_items)
if dt_cancel['market_id'] == 'F':
lst_cancel_items.append(dt_cancel['branch'])
lst_cancel_items.append(dt_cancel['account'])
lst_cancel_items.append(dt_cancel['code_id'])
lst_cancel_items.append(dt_cancel['ord_seq'])
lst_cancel_items.append(dt_cancel['ord_no'])
lst_cancel_items.append(dt_cancel['oct_type'])
lst_cancel_items.append(dt_cancel['pre_order'])
return T4.future_cancel(*lst_cancel_items) | OrderAPI/OrderAPI.py | import json
import struct
from collections import namedtuple
from OrderAPI.pyT4.pyT4 import T4
ACCOUNT_TYPE = {
'S': '股票',
'F': '期貨',
'H': '港股'
}
class Account(object):
def __init__(self, acc):
"""
S9A95 - 9809315 - 楊伯謙
FF002000 - 9114728 - 楊伯謙
"""
self.type = acc.split('-')[0].strip()[0]
self.branch = acc.split('-')[0].strip()[1:]
self.account = acc.split('-')[1].strip()
self.name = acc.split('-')[2].strip()
def __str__(self):
return '{}{}{}{}'.format(self.name, self.type, self.branch, self.account)
class OrderAPI(object):
@staticmethod
def make_stock_orders(stock, qty, price):
res = {
'code_id': stock,
'price': str(price),
'price_type': ' ',
'qty': str(abs(qty)),
'ord_type': '00',
'bs': ' '
}
bs = ' '
if qty > 0:
bs = 'B'
elif qty < 0:
bs = 'S'
res['bs'] = bs
return res
@staticmethod
def make_future_orders(future_id, qty, price):
res = {
'code_id': future_id,
'price': str(price),
'price_type': 'LMT',
'ord_type': 'ROD',
'oct_type': ' ',
'bs': ' ',
'qty': str(abs(qty))
}
bs = ' '
if qty > 0:
bs = 'B'
elif qty < 0:
bs = 'S'
res['bs'] = bs
return res
@staticmethod
def accounts():
accounts_raw = T4.show_list2()
accounts_raw = [acc for acc in accounts_raw.split('\n') if len(acc)]
for acc in accounts_raw:
yield Account(acc)
def __init__(self, config_file='OrderAPI.json'):
self._init_t4(config_file)
self._init_ca()
def _init_ca(self):
self.accounts = {'S': [], 'F': [], 'H': []}
for acc in OrderAPI.accounts():
for ekey in self.UserInfo['CA']:
T4.add_acc_ca(acc.branch, acc.account, ekey['ID'], ekey['eKey'], ekey['eKeyPassword'])
self.accounts[acc.type].append(acc)
def _init_t4(self, config_file):
with open(config_file) as fd_json:
self.UserInfo = json.load(fd_json)
msg = T4.init_t4(self.UserInfo['UserId'], self.UserInfo['Password'], '')
self._status = msg
T4.do_register(1)
@property
def status(self):
return self._status
@property
def server_ip(self):
ip_port = T4.show_ip()
ip = ip_port.split('\n')[0].split(':')[1].strip()
port = ip_port.split('\n')[1].split(':')[1].strip()
return '{}:{}'.format(ip, port)
@staticmethod
def placing_order(acc, dt_orders):
if acc.type == 'S':
return OrderAPI.placing_stock_order(acc, dt_orders)
elif acc.type == 'F':
return OrderAPI.placing_future_order(acc, dt_orders)
@staticmethod
def placing_stock_order(acc, dt_orders):
order_args = list()
order_args.append(dt_orders['bs'])
order_args.append(acc.branch)
order_args.append(acc.account)
order_args.append(dt_orders['code_id'])
order_args.append(dt_orders['ord_type'])
order_args.append(dt_orders['price'])
order_args.append(dt_orders['qty'])
order_args.append(dt_orders['price_type'])
return T4.stock_order(*order_args)
@staticmethod
def placing_future_order(acc, dt_orders):
order_args = list()
order_args.append(dt_orders['bs'])
order_args.append(acc.branch)
order_args.append(acc.account)
order_args.append(dt_orders['code_id'])
order_args.append(dt_orders['price'])
order_args.append(dt_orders['qty'])
order_args.append(dt_orders['price_type'])
order_args.append(dt_orders['ord_type'])
order_args.append(dt_orders['oct_type'])
return T4.future_order(*order_args)
@staticmethod
def placing_cancel_order(dt_cancel):
lst_cancel_items = list()
if dt_cancel['market_id'] == 'S':
lst_cancel_items.append(dt_cancel['bs'])
lst_cancel_items.append(dt_cancel['branch'])
lst_cancel_items.append(dt_cancel['account'])
lst_cancel_items.append(dt_cancel['code_id'])
lst_cancel_items.append(dt_cancel['ord_type'])
lst_cancel_items.append(dt_cancel['ord_seq'])
lst_cancel_items.append(dt_cancel['ord_no'])
lst_cancel_items.append(dt_cancel['pre_order'])
return T4.stock_cancel(*lst_cancel_items)
if dt_cancel['market_id'] == 'F':
lst_cancel_items.append(dt_cancel['branch'])
lst_cancel_items.append(dt_cancel['account'])
lst_cancel_items.append(dt_cancel['code_id'])
lst_cancel_items.append(dt_cancel['ord_seq'])
lst_cancel_items.append(dt_cancel['ord_no'])
lst_cancel_items.append(dt_cancel['oct_type'])
lst_cancel_items.append(dt_cancel['pre_order'])
return T4.future_cancel(*lst_cancel_items) | 0.394201 | 0.140513 |
import os
import argparse
import numpy as np
import mindspore.nn as nn
import mindspore.ops.operations as P
import mindspore.context as context
from mindspore.common.tensor import Tensor
from mindspore.train.serialization import export, load_checkpoint, load_param_into_net
class LeNet(nn.Cell):
def __init__(self):
super(LeNet, self).__init__()
self.relu = P.ReLU()
self.batch_size = 32
self.conv1 = nn.Conv2d(1, 6, kernel_size=5, stride=1, padding=0, has_bias=False, pad_mode='valid')
self.conv2 = nn.Conv2d(6, 16, kernel_size=5, stride=1, padding=0, has_bias=False, pad_mode='valid')
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
self.reshape = P.Reshape()
self.fc1 = nn.Dense(400, 120)
self.fc2 = nn.Dense(120, 84)
self.fc3 = nn.Dense(84, 10)
def construct(self, input_x):
output = self.conv1(input_x)
output = self.relu(output)
output = self.pool(output)
output = self.conv2(output)
output = self.relu(output)
output = self.pool(output)
output = self.reshape(output, (self.batch_size, -1))
output = self.fc1(output)
output = self.relu(output)
output = self.fc2(output)
output = self.relu(output)
output = self.fc3(output)
return output
parser = argparse.ArgumentParser(description='MindSpore Model Save')
parser.add_argument('--path', default='./lenet_model.ms', type=str, help='model save path')
if __name__ == '__main__':
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
context.set_context(enable_task_sink=True)
print("test lenet predict start")
seed = 0
np.random.seed(seed)
batch = 1
channel = 1
input_h = 32
input_w = 32
origin_data = np.random.uniform(low=0, high=255, size=(batch, channel, input_h, input_w)).astype(np.float32)
origin_data.tofile("lenet_input_data.bin")
input_data = Tensor(origin_data)
print(input_data.asnumpy())
net = LeNet()
ckpt_file_path = "./tests/ut/python/predict/checkpoint_lenet.ckpt"
predict_args = parser.parse_args()
model_path_name = predict_args.path
is_ckpt_exist = os.path.exists(ckpt_file_path)
if is_ckpt_exist:
param_dict = load_checkpoint(ckpoint_file_name=ckpt_file_path)
load_param_into_net(net, param_dict)
export(net, input_data, file_name=model_path_name, file_format='LITE')
print("test lenet predict success.")
else:
print("checkpoint file is not exist.") | tests/ut/python/predict/test_predict_save_model.py | import os
import argparse
import numpy as np
import mindspore.nn as nn
import mindspore.ops.operations as P
import mindspore.context as context
from mindspore.common.tensor import Tensor
from mindspore.train.serialization import export, load_checkpoint, load_param_into_net
class LeNet(nn.Cell):
def __init__(self):
super(LeNet, self).__init__()
self.relu = P.ReLU()
self.batch_size = 32
self.conv1 = nn.Conv2d(1, 6, kernel_size=5, stride=1, padding=0, has_bias=False, pad_mode='valid')
self.conv2 = nn.Conv2d(6, 16, kernel_size=5, stride=1, padding=0, has_bias=False, pad_mode='valid')
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
self.reshape = P.Reshape()
self.fc1 = nn.Dense(400, 120)
self.fc2 = nn.Dense(120, 84)
self.fc3 = nn.Dense(84, 10)
def construct(self, input_x):
output = self.conv1(input_x)
output = self.relu(output)
output = self.pool(output)
output = self.conv2(output)
output = self.relu(output)
output = self.pool(output)
output = self.reshape(output, (self.batch_size, -1))
output = self.fc1(output)
output = self.relu(output)
output = self.fc2(output)
output = self.relu(output)
output = self.fc3(output)
return output
parser = argparse.ArgumentParser(description='MindSpore Model Save')
parser.add_argument('--path', default='./lenet_model.ms', type=str, help='model save path')
if __name__ == '__main__':
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
context.set_context(enable_task_sink=True)
print("test lenet predict start")
seed = 0
np.random.seed(seed)
batch = 1
channel = 1
input_h = 32
input_w = 32
origin_data = np.random.uniform(low=0, high=255, size=(batch, channel, input_h, input_w)).astype(np.float32)
origin_data.tofile("lenet_input_data.bin")
input_data = Tensor(origin_data)
print(input_data.asnumpy())
net = LeNet()
ckpt_file_path = "./tests/ut/python/predict/checkpoint_lenet.ckpt"
predict_args = parser.parse_args()
model_path_name = predict_args.path
is_ckpt_exist = os.path.exists(ckpt_file_path)
if is_ckpt_exist:
param_dict = load_checkpoint(ckpoint_file_name=ckpt_file_path)
load_param_into_net(net, param_dict)
export(net, input_data, file_name=model_path_name, file_format='LITE')
print("test lenet predict success.")
else:
print("checkpoint file is not exist.") | 0.677367 | 0.220888 |
from __future__ import absolute_import, unicode_literals
import os
import mock
import pkg_resources
import pytest
from mopidy import config, exceptions, ext
from tests import IsA, any_unicode
class DummyExtension(ext.Extension):
dist_name = 'Mopidy-Foobar'
ext_name = 'foobar'
version = '1.2.3'
def get_default_config(self):
return '[foobar]\nenabled = true'
any_testextension = IsA(DummyExtension)
class TestExtension(object):
@pytest.fixture
def extension(self):
return ext.Extension()
def test_dist_name_is_none(self, extension):
assert extension.dist_name is None
def test_ext_name_is_none(self, extension):
assert extension.ext_name is None
def test_version_is_none(self, extension):
assert extension.version is None
def test_get_default_config_raises_not_implemented(self, extension):
with pytest.raises(NotImplementedError):
extension.get_default_config()
def test_get_config_schema_returns_extension_schema(self, extension):
schema = extension.get_config_schema()
assert isinstance(schema['enabled'], config.Boolean)
def test_validate_environment_does_nothing_by_default(self, extension):
assert extension.validate_environment() is None
def test_setup_raises_not_implemented(self, extension):
with pytest.raises(NotImplementedError):
extension.setup(None)
def test_get_cache_dir_raises_assertion_error(self, extension):
config = {'core': {'cache_dir': '/tmp'}}
with pytest.raises(AssertionError): # ext_name not set
ext.Extension.get_cache_dir(config)
def test_get_config_dir_raises_assertion_error(self, extension):
config = {'core': {'config_dir': '/tmp'}}
with pytest.raises(AssertionError): # ext_name not set
ext.Extension.get_config_dir(config)
def test_get_data_dir_raises_assertion_error(self, extension):
config = {'core': {'data_dir': '/tmp'}}
with pytest.raises(AssertionError): # ext_name not set
ext.Extension.get_data_dir(config)
class TestLoadExtensions(object):
@pytest.yield_fixture
def iter_entry_points_mock(self, request):
patcher = mock.patch('pkg_resources.iter_entry_points')
iter_entry_points = patcher.start()
iter_entry_points.return_value = []
yield iter_entry_points
patcher.stop()
def test_no_extensions(self, iter_entry_points_mock):
iter_entry_points_mock.return_value = []
assert ext.load_extensions() == []
def test_load_extensions(self, iter_entry_points_mock):
mock_entry_point = mock.Mock()
mock_entry_point.resolve.return_value = DummyExtension
iter_entry_points_mock.return_value = [mock_entry_point]
expected = ext.ExtensionData(
any_testextension, mock_entry_point, IsA(config.ConfigSchema),
any_unicode, None)
assert ext.load_extensions() == [expected]
def test_gets_wrong_class(self, iter_entry_points_mock):
class WrongClass(object):
pass
mock_entry_point = mock.Mock()
mock_entry_point.resolve.return_value = WrongClass
iter_entry_points_mock.return_value = [mock_entry_point]
assert ext.load_extensions() == []
def test_gets_instance(self, iter_entry_points_mock):
mock_entry_point = mock.Mock()
mock_entry_point.resolve.return_value = DummyExtension()
iter_entry_points_mock.return_value = [mock_entry_point]
assert ext.load_extensions() == []
def test_creating_instance_fails(self, iter_entry_points_mock):
mock_extension = mock.Mock(spec=ext.Extension)
mock_extension.side_effect = Exception
mock_entry_point = mock.Mock()
mock_entry_point.resolve.return_value = mock_extension
iter_entry_points_mock.return_value = [mock_entry_point]
assert ext.load_extensions() == []
def test_get_config_schema_fails(self, iter_entry_points_mock):
mock_entry_point = mock.Mock()
mock_entry_point.resolve.return_value = DummyExtension
iter_entry_points_mock.return_value = [mock_entry_point]
with mock.patch.object(DummyExtension, 'get_config_schema') as get:
get.side_effect = Exception
assert ext.load_extensions() == []
get.assert_called_once_with()
def test_get_default_config_fails(self, iter_entry_points_mock):
mock_entry_point = mock.Mock()
mock_entry_point.resolve.return_value = DummyExtension
iter_entry_points_mock.return_value = [mock_entry_point]
with mock.patch.object(DummyExtension, 'get_default_config') as get:
get.side_effect = Exception
assert ext.load_extensions() == []
get.assert_called_once_with()
def test_get_command_fails(self, iter_entry_points_mock):
mock_entry_point = mock.Mock()
mock_entry_point.resolve.return_value = DummyExtension
iter_entry_points_mock.return_value = [mock_entry_point]
with mock.patch.object(DummyExtension, 'get_command') as get:
get.side_effect = Exception
assert ext.load_extensions() == []
get.assert_called_once_with()
class TestValidateExtensionData(object):
@pytest.fixture
def ext_data(self):
extension = DummyExtension()
entry_point = mock.Mock()
entry_point.name = extension.ext_name
schema = extension.get_config_schema()
defaults = extension.get_default_config()
command = extension.get_command()
return ext.ExtensionData(
extension, entry_point, schema, defaults, command)
def test_name_mismatch(self, ext_data):
ext_data.entry_point.name = 'barfoo'
assert not ext.validate_extension_data(ext_data)
def test_distribution_not_found(self, ext_data):
error = pkg_resources.DistributionNotFound
ext_data.entry_point.require.side_effect = error
assert not ext.validate_extension_data(ext_data)
def test_version_conflict(self, ext_data):
error = pkg_resources.VersionConflict
ext_data.entry_point.require.side_effect = error
assert not ext.validate_extension_data(ext_data)
def test_entry_point_require_exception(self, ext_data):
ext_data.entry_point.require.side_effect = Exception
# Hope that entry points are well behaved, so exception will bubble.
with pytest.raises(Exception):
assert not ext.validate_extension_data(ext_data)
def test_extenions_validate_environment_error(self, ext_data):
extension = ext_data.extension
with mock.patch.object(extension, 'validate_environment') as validate:
validate.side_effect = exceptions.ExtensionError('error')
assert not ext.validate_extension_data(ext_data)
validate.assert_called_once_with()
def test_extenions_validate_environment_exception(self, ext_data):
extension = ext_data.extension
with mock.patch.object(extension, 'validate_environment') as validate:
validate.side_effect = Exception
assert not ext.validate_extension_data(ext_data)
validate.assert_called_once_with()
def test_missing_schema(self, ext_data):
ext_data = ext_data._replace(config_schema=None)
assert not ext.validate_extension_data(ext_data)
def test_schema_that_is_missing_enabled(self, ext_data):
del ext_data.config_schema['enabled']
ext_data.config_schema['baz'] = config.String()
assert not ext.validate_extension_data(ext_data)
def test_schema_with_wrong_types(self, ext_data):
ext_data.config_schema['enabled'] = 123
assert not ext.validate_extension_data(ext_data)
def test_schema_with_invalid_type(self, ext_data):
ext_data.config_schema['baz'] = 123
assert not ext.validate_extension_data(ext_data)
def test_no_default_config(self, ext_data):
ext_data = ext_data._replace(config_defaults=None)
assert not ext.validate_extension_data(ext_data)
def test_get_cache_dir(self, ext_data):
core_cache_dir = '/tmp'
config = {'core': {'cache_dir': core_cache_dir}}
extension = ext_data.extension
with mock.patch.object(ext.path, 'get_or_create_dir'):
cache_dir = extension.get_cache_dir(config)
expected = os.path.join(core_cache_dir, extension.ext_name)
assert cache_dir == expected
def test_get_config_dir(self, ext_data):
core_config_dir = '/tmp'
config = {'core': {'config_dir': core_config_dir}}
extension = ext_data.extension
with mock.patch.object(ext.path, 'get_or_create_dir'):
config_dir = extension.get_config_dir(config)
expected = os.path.join(core_config_dir, extension.ext_name)
assert config_dir == expected
def test_get_data_dir(self, ext_data):
core_data_dir = '/tmp'
config = {'core': {'data_dir': core_data_dir}}
extension = ext_data.extension
with mock.patch.object(ext.path, 'get_or_create_dir'):
data_dir = extension.get_data_dir(config)
expected = os.path.join(core_data_dir, extension.ext_name)
assert data_dir == expected | tests/test_ext.py | from __future__ import absolute_import, unicode_literals
import os
import mock
import pkg_resources
import pytest
from mopidy import config, exceptions, ext
from tests import IsA, any_unicode
class DummyExtension(ext.Extension):
dist_name = 'Mopidy-Foobar'
ext_name = 'foobar'
version = '1.2.3'
def get_default_config(self):
return '[foobar]\nenabled = true'
any_testextension = IsA(DummyExtension)
class TestExtension(object):
@pytest.fixture
def extension(self):
return ext.Extension()
def test_dist_name_is_none(self, extension):
assert extension.dist_name is None
def test_ext_name_is_none(self, extension):
assert extension.ext_name is None
def test_version_is_none(self, extension):
assert extension.version is None
def test_get_default_config_raises_not_implemented(self, extension):
with pytest.raises(NotImplementedError):
extension.get_default_config()
def test_get_config_schema_returns_extension_schema(self, extension):
schema = extension.get_config_schema()
assert isinstance(schema['enabled'], config.Boolean)
def test_validate_environment_does_nothing_by_default(self, extension):
assert extension.validate_environment() is None
def test_setup_raises_not_implemented(self, extension):
with pytest.raises(NotImplementedError):
extension.setup(None)
def test_get_cache_dir_raises_assertion_error(self, extension):
config = {'core': {'cache_dir': '/tmp'}}
with pytest.raises(AssertionError): # ext_name not set
ext.Extension.get_cache_dir(config)
def test_get_config_dir_raises_assertion_error(self, extension):
config = {'core': {'config_dir': '/tmp'}}
with pytest.raises(AssertionError): # ext_name not set
ext.Extension.get_config_dir(config)
def test_get_data_dir_raises_assertion_error(self, extension):
config = {'core': {'data_dir': '/tmp'}}
with pytest.raises(AssertionError): # ext_name not set
ext.Extension.get_data_dir(config)
class TestLoadExtensions(object):
@pytest.yield_fixture
def iter_entry_points_mock(self, request):
patcher = mock.patch('pkg_resources.iter_entry_points')
iter_entry_points = patcher.start()
iter_entry_points.return_value = []
yield iter_entry_points
patcher.stop()
def test_no_extensions(self, iter_entry_points_mock):
iter_entry_points_mock.return_value = []
assert ext.load_extensions() == []
def test_load_extensions(self, iter_entry_points_mock):
mock_entry_point = mock.Mock()
mock_entry_point.resolve.return_value = DummyExtension
iter_entry_points_mock.return_value = [mock_entry_point]
expected = ext.ExtensionData(
any_testextension, mock_entry_point, IsA(config.ConfigSchema),
any_unicode, None)
assert ext.load_extensions() == [expected]
def test_gets_wrong_class(self, iter_entry_points_mock):
class WrongClass(object):
pass
mock_entry_point = mock.Mock()
mock_entry_point.resolve.return_value = WrongClass
iter_entry_points_mock.return_value = [mock_entry_point]
assert ext.load_extensions() == []
def test_gets_instance(self, iter_entry_points_mock):
mock_entry_point = mock.Mock()
mock_entry_point.resolve.return_value = DummyExtension()
iter_entry_points_mock.return_value = [mock_entry_point]
assert ext.load_extensions() == []
def test_creating_instance_fails(self, iter_entry_points_mock):
mock_extension = mock.Mock(spec=ext.Extension)
mock_extension.side_effect = Exception
mock_entry_point = mock.Mock()
mock_entry_point.resolve.return_value = mock_extension
iter_entry_points_mock.return_value = [mock_entry_point]
assert ext.load_extensions() == []
def test_get_config_schema_fails(self, iter_entry_points_mock):
mock_entry_point = mock.Mock()
mock_entry_point.resolve.return_value = DummyExtension
iter_entry_points_mock.return_value = [mock_entry_point]
with mock.patch.object(DummyExtension, 'get_config_schema') as get:
get.side_effect = Exception
assert ext.load_extensions() == []
get.assert_called_once_with()
def test_get_default_config_fails(self, iter_entry_points_mock):
mock_entry_point = mock.Mock()
mock_entry_point.resolve.return_value = DummyExtension
iter_entry_points_mock.return_value = [mock_entry_point]
with mock.patch.object(DummyExtension, 'get_default_config') as get:
get.side_effect = Exception
assert ext.load_extensions() == []
get.assert_called_once_with()
def test_get_command_fails(self, iter_entry_points_mock):
mock_entry_point = mock.Mock()
mock_entry_point.resolve.return_value = DummyExtension
iter_entry_points_mock.return_value = [mock_entry_point]
with mock.patch.object(DummyExtension, 'get_command') as get:
get.side_effect = Exception
assert ext.load_extensions() == []
get.assert_called_once_with()
class TestValidateExtensionData(object):
@pytest.fixture
def ext_data(self):
extension = DummyExtension()
entry_point = mock.Mock()
entry_point.name = extension.ext_name
schema = extension.get_config_schema()
defaults = extension.get_default_config()
command = extension.get_command()
return ext.ExtensionData(
extension, entry_point, schema, defaults, command)
def test_name_mismatch(self, ext_data):
ext_data.entry_point.name = 'barfoo'
assert not ext.validate_extension_data(ext_data)
def test_distribution_not_found(self, ext_data):
error = pkg_resources.DistributionNotFound
ext_data.entry_point.require.side_effect = error
assert not ext.validate_extension_data(ext_data)
def test_version_conflict(self, ext_data):
error = pkg_resources.VersionConflict
ext_data.entry_point.require.side_effect = error
assert not ext.validate_extension_data(ext_data)
def test_entry_point_require_exception(self, ext_data):
ext_data.entry_point.require.side_effect = Exception
# Hope that entry points are well behaved, so exception will bubble.
with pytest.raises(Exception):
assert not ext.validate_extension_data(ext_data)
def test_extenions_validate_environment_error(self, ext_data):
extension = ext_data.extension
with mock.patch.object(extension, 'validate_environment') as validate:
validate.side_effect = exceptions.ExtensionError('error')
assert not ext.validate_extension_data(ext_data)
validate.assert_called_once_with()
def test_extenions_validate_environment_exception(self, ext_data):
extension = ext_data.extension
with mock.patch.object(extension, 'validate_environment') as validate:
validate.side_effect = Exception
assert not ext.validate_extension_data(ext_data)
validate.assert_called_once_with()
def test_missing_schema(self, ext_data):
ext_data = ext_data._replace(config_schema=None)
assert not ext.validate_extension_data(ext_data)
def test_schema_that_is_missing_enabled(self, ext_data):
del ext_data.config_schema['enabled']
ext_data.config_schema['baz'] = config.String()
assert not ext.validate_extension_data(ext_data)
def test_schema_with_wrong_types(self, ext_data):
ext_data.config_schema['enabled'] = 123
assert not ext.validate_extension_data(ext_data)
def test_schema_with_invalid_type(self, ext_data):
ext_data.config_schema['baz'] = 123
assert not ext.validate_extension_data(ext_data)
def test_no_default_config(self, ext_data):
ext_data = ext_data._replace(config_defaults=None)
assert not ext.validate_extension_data(ext_data)
def test_get_cache_dir(self, ext_data):
core_cache_dir = '/tmp'
config = {'core': {'cache_dir': core_cache_dir}}
extension = ext_data.extension
with mock.patch.object(ext.path, 'get_or_create_dir'):
cache_dir = extension.get_cache_dir(config)
expected = os.path.join(core_cache_dir, extension.ext_name)
assert cache_dir == expected
def test_get_config_dir(self, ext_data):
core_config_dir = '/tmp'
config = {'core': {'config_dir': core_config_dir}}
extension = ext_data.extension
with mock.patch.object(ext.path, 'get_or_create_dir'):
config_dir = extension.get_config_dir(config)
expected = os.path.join(core_config_dir, extension.ext_name)
assert config_dir == expected
def test_get_data_dir(self, ext_data):
core_data_dir = '/tmp'
config = {'core': {'data_dir': core_data_dir}}
extension = ext_data.extension
with mock.patch.object(ext.path, 'get_or_create_dir'):
data_dir = extension.get_data_dir(config)
expected = os.path.join(core_data_dir, extension.ext_name)
assert data_dir == expected | 0.586049 | 0.301671 |
import os
class ChemevoModel:
def __init__(self, filename, init_param=None):
self.filename = filename
self.initialize_model(**init_param)
def initialize_model(self, radius=10., time_tot=12000., dt=30.,
imf='kroupa', mbins_low=0.1, mbins_high=100.,
dm_low=0.1, dm_high=1., dtd_func='exponential',
dtd_min_time=150., dtd_time=1500.,
dtd_snia_frac=0.135, inflow='exp',
m_init=2e10, M1=4e11, b1=6000.,
inflow_ab_pattern='bbns', inflow_met=1.0,
outflow_source='ism', outflow=2.5, warmgas='False',
sf_func='constant', sf_nu1=1e-9, sf_f1=0., sf_tau1=0.,
sf_tau2=0., N_kslaw=1.):
self.radius = radius
self.time_tot = time_tot
self.dt = dt
self.imf = imf
self.mbins_low = mbins_low
self.mbins_high = mbins_high
self.dm_low = dm_low
self.dm_high = dm_high
self.dtd_func = dtd_func
self.dtd_min_time = dtd_min_time
self.dtd_time = dtd_time
self.dtd_snia_frac = dtd_snia_frac
self.inflow = inflow
self.m_init = m_init
self.M1 = M1
self.b1 = b1
self.inflow_ab_pattern = inflow_ab_pattern
self.inflow_met = inflow_met
self.outflow_source = outflow_source
self.outflow = outflow
self.warmgas = warmgas
self.sf_func = sf_func
self.sf_nu1 = sf_nu1
self.sf_f1 = sf_f1
self.sf_tau1 = sf_tau1
self.sf_tau2 = sf_tau2
self.N_kslaw = N_kslaw
def write_config(self):
outfile = open('./config/' + self.filename, 'w')
print('# Simulation', file=outfile)
print('# Fiducial', file=outfile)
print('', file=outfile)
print('# Yields', file=outfile)
print('yields_snii_dir = limongi06/iso_yields/', file=outfile)
print('yields_agb_dir = karakas10/iso_yields/', file=outfile)
print('yields_snia_dir = iwamoto99/', file=outfile)
print('yields_rprocess_dir = cescutti06/', file=outfile)
print('yields_sprocess_dir = busso01/', file=outfile)
print('yields_snia_model = w70', file=outfile)
print('yields_r_elements = Ba, Eu', file=outfile)
print('yields_s_elements = Ba,', file=outfile)
print('', file=outfile)
print('# Basic parameters', file=outfile)
print('initialize_radius = {}'.format(self.radius), file=outfile)
print('initialize_time_tot = {}'.format(self.time_tot), file=outfile)
print('initialize_dt = {}'.format(self.dt), file=outfile)
print('initialize_imf = {}'.format(self.imf), file=outfile)
print('', file=outfile)
print('# Mass bins', file=outfile)
print('mass_bins_low = {}'.format(self.mbins_low), file=outfile)
print('mass_bins_high = {}'.format(self.mbins_high), file=outfile)
print('mass_bins_dm_low = {}'.format(self.dm_low), file=outfile)
print('mass_bins_dm_high = {}'.format(self.dm_high), file=outfile)
print('', file=outfile)
print('# SNIa DTD', file=outfile)
print('snia_dtd_func = {}'.format(self.dtd_func), file=outfile)
print('snia_dtd_min_snia_time = {}'.format(
self.dtd_min_time), file=outfile)
print('snia_dtd_timescale = {}'.format(self.dtd_time), file=outfile)
print('snia_dtd_snia_fraction = {}'.format(
self.dtd_snia_frac), file=outfile)
print('', file=outfile)
print('# Inflow', file=outfile)
print('inflows_func = {}'.format(self.inflow), file=outfile)
print('inflows_mgas_init = {}'.format(self.m_init), file=outfile)
print('inflows_M1 = {}'.format(self.M1), file=outfile)
print('inflows_b1 = {}'.format(self.b1), file=outfile)
print('inflows_inflow_ab_pattern = {}'.format(
self.inflow_ab_pattern), file=outfile)
print('inflows_inflow_metallicity = {}'.format(
self.inflow_met), file=outfile)
print('', file=outfile)
print('# Outflow', file=outfile)
print('outflows_outflow_source = {}'.format(
self.outflow_source), file=outfile)
print('outflows_eta_outflow = {}'.format(self.outflow), file=outfile)
print('', file=outfile)
print('# Warm ISM', file=outfile)
print('warmgasres_warmgas = {}'.format(self.warmgas), file=outfile)
print('', file=outfile)
print('# Star Formation Law', file=outfile)
print('sf_func = {}'.format(self.sf_func), file=outfile)
print('sf_nu1 = {}'.format(self.sf_nu1), file=outfile)
if self.sf_func == 'sf_gauss':
print('sf_f1 = {}'.format(self.sf_f1), file=outfile)
print('sf_tau1 = {}'.format(self.sf_tau1), file=outfile)
print('sf_tau2 = {}'.format(self.sf_tau2), file=outfile)
print('sf_N_kslaw = {}'.format(self.N_kslaw), file=outfile)
print('', file=outfile)
def run(self):
self.write_config()
os.chdir('./flexCE/')
os.system('python flexce.py {}'.format('../config/' + self.filename))
os.chdir('../')
class DwarfModel(ChemevoModel):
'''
Implementation:
import flexce_batch as fb
fb.DwarfModel('batch_dwarf.txt')
'''
def __init__(self, filename, time_tot=13000., inflow='te-t', m_init=3e9,
M1=6e10, b1=2500., outflow=10, sf_func='constant',
sf_nu1=1e-11, sf_f1=0., sf_tau1=0., sf_tau2=0.):
self.filename = filename
self.initialize_model(time_tot=time_tot, inflow=inflow, m_init=m_init,
M1=M1, b1=b1, outflow=outflow, sf_func=sf_func,
sf_nu1=sf_nu1, sf_f1=sf_f1, sf_tau1=sf_tau1,
sf_tau2=sf_tau2)
self.run() | flexce_batch.py | import os
class ChemevoModel:
def __init__(self, filename, init_param=None):
self.filename = filename
self.initialize_model(**init_param)
def initialize_model(self, radius=10., time_tot=12000., dt=30.,
imf='kroupa', mbins_low=0.1, mbins_high=100.,
dm_low=0.1, dm_high=1., dtd_func='exponential',
dtd_min_time=150., dtd_time=1500.,
dtd_snia_frac=0.135, inflow='exp',
m_init=2e10, M1=4e11, b1=6000.,
inflow_ab_pattern='bbns', inflow_met=1.0,
outflow_source='ism', outflow=2.5, warmgas='False',
sf_func='constant', sf_nu1=1e-9, sf_f1=0., sf_tau1=0.,
sf_tau2=0., N_kslaw=1.):
self.radius = radius
self.time_tot = time_tot
self.dt = dt
self.imf = imf
self.mbins_low = mbins_low
self.mbins_high = mbins_high
self.dm_low = dm_low
self.dm_high = dm_high
self.dtd_func = dtd_func
self.dtd_min_time = dtd_min_time
self.dtd_time = dtd_time
self.dtd_snia_frac = dtd_snia_frac
self.inflow = inflow
self.m_init = m_init
self.M1 = M1
self.b1 = b1
self.inflow_ab_pattern = inflow_ab_pattern
self.inflow_met = inflow_met
self.outflow_source = outflow_source
self.outflow = outflow
self.warmgas = warmgas
self.sf_func = sf_func
self.sf_nu1 = sf_nu1
self.sf_f1 = sf_f1
self.sf_tau1 = sf_tau1
self.sf_tau2 = sf_tau2
self.N_kslaw = N_kslaw
def write_config(self):
outfile = open('./config/' + self.filename, 'w')
print('# Simulation', file=outfile)
print('# Fiducial', file=outfile)
print('', file=outfile)
print('# Yields', file=outfile)
print('yields_snii_dir = limongi06/iso_yields/', file=outfile)
print('yields_agb_dir = karakas10/iso_yields/', file=outfile)
print('yields_snia_dir = iwamoto99/', file=outfile)
print('yields_rprocess_dir = cescutti06/', file=outfile)
print('yields_sprocess_dir = busso01/', file=outfile)
print('yields_snia_model = w70', file=outfile)
print('yields_r_elements = Ba, Eu', file=outfile)
print('yields_s_elements = Ba,', file=outfile)
print('', file=outfile)
print('# Basic parameters', file=outfile)
print('initialize_radius = {}'.format(self.radius), file=outfile)
print('initialize_time_tot = {}'.format(self.time_tot), file=outfile)
print('initialize_dt = {}'.format(self.dt), file=outfile)
print('initialize_imf = {}'.format(self.imf), file=outfile)
print('', file=outfile)
print('# Mass bins', file=outfile)
print('mass_bins_low = {}'.format(self.mbins_low), file=outfile)
print('mass_bins_high = {}'.format(self.mbins_high), file=outfile)
print('mass_bins_dm_low = {}'.format(self.dm_low), file=outfile)
print('mass_bins_dm_high = {}'.format(self.dm_high), file=outfile)
print('', file=outfile)
print('# SNIa DTD', file=outfile)
print('snia_dtd_func = {}'.format(self.dtd_func), file=outfile)
print('snia_dtd_min_snia_time = {}'.format(
self.dtd_min_time), file=outfile)
print('snia_dtd_timescale = {}'.format(self.dtd_time), file=outfile)
print('snia_dtd_snia_fraction = {}'.format(
self.dtd_snia_frac), file=outfile)
print('', file=outfile)
print('# Inflow', file=outfile)
print('inflows_func = {}'.format(self.inflow), file=outfile)
print('inflows_mgas_init = {}'.format(self.m_init), file=outfile)
print('inflows_M1 = {}'.format(self.M1), file=outfile)
print('inflows_b1 = {}'.format(self.b1), file=outfile)
print('inflows_inflow_ab_pattern = {}'.format(
self.inflow_ab_pattern), file=outfile)
print('inflows_inflow_metallicity = {}'.format(
self.inflow_met), file=outfile)
print('', file=outfile)
print('# Outflow', file=outfile)
print('outflows_outflow_source = {}'.format(
self.outflow_source), file=outfile)
print('outflows_eta_outflow = {}'.format(self.outflow), file=outfile)
print('', file=outfile)
print('# Warm ISM', file=outfile)
print('warmgasres_warmgas = {}'.format(self.warmgas), file=outfile)
print('', file=outfile)
print('# Star Formation Law', file=outfile)
print('sf_func = {}'.format(self.sf_func), file=outfile)
print('sf_nu1 = {}'.format(self.sf_nu1), file=outfile)
if self.sf_func == 'sf_gauss':
print('sf_f1 = {}'.format(self.sf_f1), file=outfile)
print('sf_tau1 = {}'.format(self.sf_tau1), file=outfile)
print('sf_tau2 = {}'.format(self.sf_tau2), file=outfile)
print('sf_N_kslaw = {}'.format(self.N_kslaw), file=outfile)
print('', file=outfile)
def run(self):
self.write_config()
os.chdir('./flexCE/')
os.system('python flexce.py {}'.format('../config/' + self.filename))
os.chdir('../')
class DwarfModel(ChemevoModel):
'''
Implementation:
import flexce_batch as fb
fb.DwarfModel('batch_dwarf.txt')
'''
def __init__(self, filename, time_tot=13000., inflow='te-t', m_init=3e9,
M1=6e10, b1=2500., outflow=10, sf_func='constant',
sf_nu1=1e-11, sf_f1=0., sf_tau1=0., sf_tau2=0.):
self.filename = filename
self.initialize_model(time_tot=time_tot, inflow=inflow, m_init=m_init,
M1=M1, b1=b1, outflow=outflow, sf_func=sf_func,
sf_nu1=sf_nu1, sf_f1=sf_f1, sf_tau1=sf_tau1,
sf_tau2=sf_tau2)
self.run() | 0.315103 | 0.129871 |
import imp
import logging
import os
import sys
import traceback
import warnings
_sqlalchemy = None
try:
f, pathname, desc = imp.find_module("sqlalchemy", sys.path[1:])
_ = imp.load_module("sqlalchemy", f, pathname, desc)
if hasattr(_, "dialects"):
_sqlalchemy = _
warnings.simplefilter(action="ignore", category=_sqlalchemy.exc.SAWarning)
except ImportError:
pass
try:
import MySQLdb # used by SQLAlchemy in case of MySQL
warnings.filterwarnings("error", category=MySQLdb.Warning)
except ImportError:
pass
from lib.core.data import conf
from lib.core.data import logger
from lib.core.exception import SqlmapConnectionException
from lib.core.exception import SqlmapFilePathException
from lib.core.exception import SqlmapMissingDependence
from plugins.generic.connector import Connector as GenericConnector
class SQLAlchemy(GenericConnector):
def __init__(self, dialect=None):
GenericConnector.__init__(self)
self.dialect = dialect
def connect(self):
if _sqlalchemy:
self.initConnection()
try:
if not self.port and self.db:
if not os.path.exists(self.db):
raise SqlmapFilePathException("the provided database file '%s' does not exist" % self.db)
_ = conf.direct.split("//", 1)
conf.direct = "%s////%s" % (_[0], os.path.abspath(self.db))
if self.dialect:
conf.direct = conf.direct.replace(conf.dbms, self.dialect, 1)
if self.dialect == "sqlite":
engine = _sqlalchemy.create_engine(conf.direct, connect_args={"check_same_thread": False})
elif self.dialect == "oracle":
engine = _sqlalchemy.create_engine(conf.direct)
else:
engine = _sqlalchemy.create_engine(conf.direct, connect_args={})
self.connector = engine.connect()
except (TypeError, ValueError):
if "_get_server_version_info" in traceback.format_exc():
try:
import pymssql
if int(pymssql.__version__[0]) < 2:
raise SqlmapConnectionException("SQLAlchemy connection issue (obsolete version of pymssql ('%s') is causing problems)" % pymssql.__version__)
except ImportError:
pass
elif "invalid literal for int() with base 10: '0b" in traceback.format_exc():
raise SqlmapConnectionException("SQLAlchemy connection issue ('https://bitbucket.org/zzzeek/sqlalchemy/issues/3975')")
raise
except SqlmapFilePathException:
raise
except Exception as ex:
raise SqlmapConnectionException("SQLAlchemy connection issue ('%s')" % ex[0])
self.printConnected()
else:
raise SqlmapMissingDependence("SQLAlchemy not available")
def fetchall(self):
try:
retVal = []
for row in self.cursor.fetchall():
retVal.append(tuple(row))
return retVal
except _sqlalchemy.exc.ProgrammingError as ex:
logger.log(logging.WARN if conf.dbmsHandler else logging.DEBUG, "(remote) %s" % ex.message if hasattr(ex, "message") else ex)
return None
def execute(self, query):
try:
self.cursor = self.connector.execute(query)
except (_sqlalchemy.exc.OperationalError, _sqlalchemy.exc.ProgrammingError) as ex:
logger.log(logging.WARN if conf.dbmsHandler else logging.DEBUG, "(remote) %s" % ex.message if hasattr(ex, "message") else ex)
except _sqlalchemy.exc.InternalError as ex:
raise SqlmapConnectionException(ex[1])
def select(self, query):
self.execute(query)
return self.fetchall() | Toolz/sqlmap/lib/utils/sqlalchemy.py | import imp
import logging
import os
import sys
import traceback
import warnings
_sqlalchemy = None
try:
f, pathname, desc = imp.find_module("sqlalchemy", sys.path[1:])
_ = imp.load_module("sqlalchemy", f, pathname, desc)
if hasattr(_, "dialects"):
_sqlalchemy = _
warnings.simplefilter(action="ignore", category=_sqlalchemy.exc.SAWarning)
except ImportError:
pass
try:
import MySQLdb # used by SQLAlchemy in case of MySQL
warnings.filterwarnings("error", category=MySQLdb.Warning)
except ImportError:
pass
from lib.core.data import conf
from lib.core.data import logger
from lib.core.exception import SqlmapConnectionException
from lib.core.exception import SqlmapFilePathException
from lib.core.exception import SqlmapMissingDependence
from plugins.generic.connector import Connector as GenericConnector
class SQLAlchemy(GenericConnector):
def __init__(self, dialect=None):
GenericConnector.__init__(self)
self.dialect = dialect
def connect(self):
if _sqlalchemy:
self.initConnection()
try:
if not self.port and self.db:
if not os.path.exists(self.db):
raise SqlmapFilePathException("the provided database file '%s' does not exist" % self.db)
_ = conf.direct.split("//", 1)
conf.direct = "%s////%s" % (_[0], os.path.abspath(self.db))
if self.dialect:
conf.direct = conf.direct.replace(conf.dbms, self.dialect, 1)
if self.dialect == "sqlite":
engine = _sqlalchemy.create_engine(conf.direct, connect_args={"check_same_thread": False})
elif self.dialect == "oracle":
engine = _sqlalchemy.create_engine(conf.direct)
else:
engine = _sqlalchemy.create_engine(conf.direct, connect_args={})
self.connector = engine.connect()
except (TypeError, ValueError):
if "_get_server_version_info" in traceback.format_exc():
try:
import pymssql
if int(pymssql.__version__[0]) < 2:
raise SqlmapConnectionException("SQLAlchemy connection issue (obsolete version of pymssql ('%s') is causing problems)" % pymssql.__version__)
except ImportError:
pass
elif "invalid literal for int() with base 10: '0b" in traceback.format_exc():
raise SqlmapConnectionException("SQLAlchemy connection issue ('https://bitbucket.org/zzzeek/sqlalchemy/issues/3975')")
raise
except SqlmapFilePathException:
raise
except Exception as ex:
raise SqlmapConnectionException("SQLAlchemy connection issue ('%s')" % ex[0])
self.printConnected()
else:
raise SqlmapMissingDependence("SQLAlchemy not available")
def fetchall(self):
try:
retVal = []
for row in self.cursor.fetchall():
retVal.append(tuple(row))
return retVal
except _sqlalchemy.exc.ProgrammingError as ex:
logger.log(logging.WARN if conf.dbmsHandler else logging.DEBUG, "(remote) %s" % ex.message if hasattr(ex, "message") else ex)
return None
def execute(self, query):
try:
self.cursor = self.connector.execute(query)
except (_sqlalchemy.exc.OperationalError, _sqlalchemy.exc.ProgrammingError) as ex:
logger.log(logging.WARN if conf.dbmsHandler else logging.DEBUG, "(remote) %s" % ex.message if hasattr(ex, "message") else ex)
except _sqlalchemy.exc.InternalError as ex:
raise SqlmapConnectionException(ex[1])
def select(self, query):
self.execute(query)
return self.fetchall() | 0.214445 | 0.06486 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
from qtpy.QtWidgets import QWidget
from vispy import scene
from ..tree import Annotation, Edge
from .base_plotter import TreePlotterQWidgetBase
__all__ = ["VisPyPlotter"]
@dataclass
class Bounds:
xmin: float
xmax: float
ymin: float
ymax: float
class VisPyPlotter(TreePlotterQWidgetBase):
"""
Tree plotter using pyqtgraph as the plotting backend.
Attributes
----------
canvas : vispy.scene.SceneCanvas
Main plotting canvas
tree : TreeVisual
The tree.
"""
def __init__(self):
"""
Setup the plot canvas..
"""
self.canvas = scene.SceneCanvas(keys=None, size=(300, 1200))
self.view = self.canvas.central_widget.add_view()
self.view.camera = scene.PanZoomCamera()
self.tree = TreeVisual(parent=None)
self.view.add(self.tree)
def get_qwidget(self) -> QWidget:
return self.canvas.native
def clear(self) -> None:
self.tree.clear()
@property
def bounds(self) -> Bounds:
"""
Return (xmin, ymin, xmax, ymax) bounds of the drawn tree. This does
not include any annoatations.
"""
xs = np.concatenate([track.pos[:, 0] for id, track in self.tree.tracks.items()])
ys = np.concatenate([track.pos[:, 1] for id, track in self.tree.tracks.items()])
return Bounds(
xmin=np.min(xs), ymin=np.min(ys), xmax=np.max(xs), ymax=np.max(ys)
)
def autoscale_view(self) -> None:
"""Scale the canvas so all branches are in view."""
xs = np.concatenate([track.pos[:, 0] for id, track in self.tree.tracks.items()])
ys = np.concatenate([track.pos[:, 1] for id, track in self.tree.tracks.items()])
padding = 0.1
width, height = np.ptp(xs), np.ptp(ys)
rect = (
np.min(xs) - padding * width,
np.min(ys) - padding * height,
width * (1 + 2 * padding),
height * (1 + 2 * padding),
)
self.view.camera.rect = rect
def update_colors(self) -> None:
"""
Update plotted track colors from the colors in self.edges.
"""
for e in self.edges:
if e.id is not None:
self.tree.set_branch_color(e.id, e.color)
def add_branch(self, e: Edge) -> None:
"""
Add a single branch to the tree.
"""
self.tree.add_track(e.id, np.column_stack((e.y, e.x)), e.color)
self.autoscale_view()
def add_annotation(self, a: Annotation) -> None:
"""
Add a single label to the tree.
"""
self.tree.add_annotation(a.x, a.y, a.label, a.color)
def draw_current_time_line(self, time: int) -> None:
if not hasattr(self, "_time_line"):
self._time_line = scene.visuals.Line()
self.view.add(self._time_line)
bounds = self.bounds
padding = (bounds.xmax - bounds.xmin) * 0.1
self._time_line.set_data(
pos=np.array([[bounds.xmin - padding, time], [bounds.xmax + padding, time]])
)
class TreeVisual(scene.visuals.Compound):
"""
Tree visual that stores branches as sub-visuals.
"""
def __init__(self, parent):
super().__init__([])
self.parent = parent
self.unfreeze()
# Keep a reference to tracks we add so their colour can be changed later
self.tracks = {}
self.subvisuals = []
def get_branch_color(self, branch_id: int) -> np.ndarray:
return self.tracks[branch_id].color
def set_branch_color(self, branch_id: int, color: np.ndarray) -> None:
"""
Set the color of an individual branch.
"""
self.tracks[branch_id].set_data(color=color)
def add_track(self, id: Optional[int], pos: np.ndarray, color: np.ndarray) -> None:
"""
Parameters
----------
id :
Track ID.
pos :
Array of shape (2, 2) specifying vertex coordinates.
color :
Array of shape (n, 4) specifying RGBA values in range [0, 1] along
the track.
"""
if id is None:
visual = scene.visuals.Line(pos=pos, color=color, width=3)
else:
# Split up line into individual time steps so color can vary
# along the line
ys = np.arange(pos[0, 1], pos[1, 1] + 1)
xs = np.ones(ys.size) * pos[0, 0]
visual = scene.visuals.Line(
pos=np.column_stack((xs, ys)), color=color, width=3
)
self.tracks[id] = visual
self.add_subvisual(visual)
self.subvisuals.append(visual)
def add_annotation(self, x: float, y: float, label: str, color):
visual = scene.visuals.Text(
text=label,
color=color,
pos=[y, x, 0],
anchor_x="left",
anchor_y="top",
font_size=10,
)
self.add_subvisual(visual)
self.subvisuals.append(visual)
def clear(self) -> None:
"""Remove all tracks."""
while self.subvisuals:
subvisual = self.subvisuals.pop()
self.remove_subvisual(subvisual) | napari_arboretum/visualisation/vispy_plotter.py | from dataclasses import dataclass
from typing import Optional
import numpy as np
from qtpy.QtWidgets import QWidget
from vispy import scene
from ..tree import Annotation, Edge
from .base_plotter import TreePlotterQWidgetBase
__all__ = ["VisPyPlotter"]
@dataclass
class Bounds:
xmin: float
xmax: float
ymin: float
ymax: float
class VisPyPlotter(TreePlotterQWidgetBase):
"""
Tree plotter using pyqtgraph as the plotting backend.
Attributes
----------
canvas : vispy.scene.SceneCanvas
Main plotting canvas
tree : TreeVisual
The tree.
"""
def __init__(self):
"""
Setup the plot canvas..
"""
self.canvas = scene.SceneCanvas(keys=None, size=(300, 1200))
self.view = self.canvas.central_widget.add_view()
self.view.camera = scene.PanZoomCamera()
self.tree = TreeVisual(parent=None)
self.view.add(self.tree)
def get_qwidget(self) -> QWidget:
return self.canvas.native
def clear(self) -> None:
self.tree.clear()
@property
def bounds(self) -> Bounds:
"""
Return (xmin, ymin, xmax, ymax) bounds of the drawn tree. This does
not include any annoatations.
"""
xs = np.concatenate([track.pos[:, 0] for id, track in self.tree.tracks.items()])
ys = np.concatenate([track.pos[:, 1] for id, track in self.tree.tracks.items()])
return Bounds(
xmin=np.min(xs), ymin=np.min(ys), xmax=np.max(xs), ymax=np.max(ys)
)
def autoscale_view(self) -> None:
"""Scale the canvas so all branches are in view."""
xs = np.concatenate([track.pos[:, 0] for id, track in self.tree.tracks.items()])
ys = np.concatenate([track.pos[:, 1] for id, track in self.tree.tracks.items()])
padding = 0.1
width, height = np.ptp(xs), np.ptp(ys)
rect = (
np.min(xs) - padding * width,
np.min(ys) - padding * height,
width * (1 + 2 * padding),
height * (1 + 2 * padding),
)
self.view.camera.rect = rect
def update_colors(self) -> None:
"""
Update plotted track colors from the colors in self.edges.
"""
for e in self.edges:
if e.id is not None:
self.tree.set_branch_color(e.id, e.color)
def add_branch(self, e: Edge) -> None:
"""
Add a single branch to the tree.
"""
self.tree.add_track(e.id, np.column_stack((e.y, e.x)), e.color)
self.autoscale_view()
def add_annotation(self, a: Annotation) -> None:
"""
Add a single label to the tree.
"""
self.tree.add_annotation(a.x, a.y, a.label, a.color)
def draw_current_time_line(self, time: int) -> None:
if not hasattr(self, "_time_line"):
self._time_line = scene.visuals.Line()
self.view.add(self._time_line)
bounds = self.bounds
padding = (bounds.xmax - bounds.xmin) * 0.1
self._time_line.set_data(
pos=np.array([[bounds.xmin - padding, time], [bounds.xmax + padding, time]])
)
class TreeVisual(scene.visuals.Compound):
"""
Tree visual that stores branches as sub-visuals.
"""
def __init__(self, parent):
super().__init__([])
self.parent = parent
self.unfreeze()
# Keep a reference to tracks we add so their colour can be changed later
self.tracks = {}
self.subvisuals = []
def get_branch_color(self, branch_id: int) -> np.ndarray:
return self.tracks[branch_id].color
def set_branch_color(self, branch_id: int, color: np.ndarray) -> None:
"""
Set the color of an individual branch.
"""
self.tracks[branch_id].set_data(color=color)
def add_track(self, id: Optional[int], pos: np.ndarray, color: np.ndarray) -> None:
"""
Parameters
----------
id :
Track ID.
pos :
Array of shape (2, 2) specifying vertex coordinates.
color :
Array of shape (n, 4) specifying RGBA values in range [0, 1] along
the track.
"""
if id is None:
visual = scene.visuals.Line(pos=pos, color=color, width=3)
else:
# Split up line into individual time steps so color can vary
# along the line
ys = np.arange(pos[0, 1], pos[1, 1] + 1)
xs = np.ones(ys.size) * pos[0, 0]
visual = scene.visuals.Line(
pos=np.column_stack((xs, ys)), color=color, width=3
)
self.tracks[id] = visual
self.add_subvisual(visual)
self.subvisuals.append(visual)
def add_annotation(self, x: float, y: float, label: str, color):
visual = scene.visuals.Text(
text=label,
color=color,
pos=[y, x, 0],
anchor_x="left",
anchor_y="top",
font_size=10,
)
self.add_subvisual(visual)
self.subvisuals.append(visual)
def clear(self) -> None:
"""Remove all tracks."""
while self.subvisuals:
subvisual = self.subvisuals.pop()
self.remove_subvisual(subvisual) | 0.95138 | 0.457621 |
import torch
from .generic_pair_loss import GenericPairLoss
from ..utils import loss_and_miner_utils as lmu, common_functions as c_f
class LiftedStructureLoss(GenericPairLoss):
def __init__(self, neg_margin, pos_margin=0, **kwargs):
super().__init__(use_similarity=False, mat_based_loss=False, **kwargs)
self.neg_margin = neg_margin
self.pos_margin = pos_margin
def _compute_loss(self, pos_pairs, neg_pairs, indices_tuple):
a1, p, a2, _ = indices_tuple
if len(a1) > 0 and len(a2) > 0:
pos_pairs = pos_pairs.unsqueeze(1)
n_per_p = ((a2.unsqueeze(0) == a1.unsqueeze(1)) | (a2.unsqueeze(0) == p.unsqueeze(1))).float()
neg_pairs = neg_pairs*n_per_p
keep_mask = (~(n_per_p==0)).float()
neg_pairs_loss = lmu.logsumexp(self.neg_margin-neg_pairs, keep_mask=keep_mask, add_one=False, dim=1)
loss_per_pos_pair = neg_pairs_loss + (pos_pairs - self.pos_margin)
loss_per_pos_pair = torch.relu(loss_per_pos_pair)**2
loss_per_pos_pair /= 2 # divide by 2 since each positive pair will be counted twice
return {"loss": {"losses": loss_per_pos_pair, "indices": (a1, p), "reduction_type": "pos_pair"}}
return self.zero_losses()
class GeneralizedLiftedStructureLoss(GenericPairLoss):
# The 'generalized' lifted structure loss shown on page 4
# of the "in defense of triplet loss" paper
# https://arxiv.org/pdf/1703.07737.pdf
def __init__(self, neg_margin, pos_margin=0, **kwargs):
super().__init__(use_similarity=False, mat_based_loss=True, **kwargs)
self.neg_margin = neg_margin
self.pos_margin = pos_margin
def _compute_loss(self, mat, pos_mask, neg_mask):
pos_loss = lmu.logsumexp(mat - self.pos_margin, keep_mask=pos_mask, add_one=False)
neg_loss = lmu.logsumexp(self.neg_margin - mat, keep_mask=neg_mask, add_one=False)
return {"loss": {"losses": torch.relu(pos_loss+neg_loss), "indices": c_f.torch_arange_from_size(mat), "reduction_type": "element"}} | src/pytorch_metric_learning/losses/lifted_structure_loss.py |
import torch
from .generic_pair_loss import GenericPairLoss
from ..utils import loss_and_miner_utils as lmu, common_functions as c_f
class LiftedStructureLoss(GenericPairLoss):
def __init__(self, neg_margin, pos_margin=0, **kwargs):
super().__init__(use_similarity=False, mat_based_loss=False, **kwargs)
self.neg_margin = neg_margin
self.pos_margin = pos_margin
def _compute_loss(self, pos_pairs, neg_pairs, indices_tuple):
a1, p, a2, _ = indices_tuple
if len(a1) > 0 and len(a2) > 0:
pos_pairs = pos_pairs.unsqueeze(1)
n_per_p = ((a2.unsqueeze(0) == a1.unsqueeze(1)) | (a2.unsqueeze(0) == p.unsqueeze(1))).float()
neg_pairs = neg_pairs*n_per_p
keep_mask = (~(n_per_p==0)).float()
neg_pairs_loss = lmu.logsumexp(self.neg_margin-neg_pairs, keep_mask=keep_mask, add_one=False, dim=1)
loss_per_pos_pair = neg_pairs_loss + (pos_pairs - self.pos_margin)
loss_per_pos_pair = torch.relu(loss_per_pos_pair)**2
loss_per_pos_pair /= 2 # divide by 2 since each positive pair will be counted twice
return {"loss": {"losses": loss_per_pos_pair, "indices": (a1, p), "reduction_type": "pos_pair"}}
return self.zero_losses()
class GeneralizedLiftedStructureLoss(GenericPairLoss):
# The 'generalized' lifted structure loss shown on page 4
# of the "in defense of triplet loss" paper
# https://arxiv.org/pdf/1703.07737.pdf
def __init__(self, neg_margin, pos_margin=0, **kwargs):
super().__init__(use_similarity=False, mat_based_loss=True, **kwargs)
self.neg_margin = neg_margin
self.pos_margin = pos_margin
def _compute_loss(self, mat, pos_mask, neg_mask):
pos_loss = lmu.logsumexp(mat - self.pos_margin, keep_mask=pos_mask, add_one=False)
neg_loss = lmu.logsumexp(self.neg_margin - mat, keep_mask=neg_mask, add_one=False)
return {"loss": {"losses": torch.relu(pos_loss+neg_loss), "indices": c_f.torch_arange_from_size(mat), "reduction_type": "element"}} | 0.824002 | 0.353707 |
import redis
try:
import unittest2 as unittest
except ImportError:
import unittest
from relationships import Relationship
from relationships.relationship import default_key_list
class RelationshipsTestCase(unittest.TestCase):
def setUp(self):
self.redis_connection = redis.StrictRedis(
host='localhost',
port=6379,
db=15)
def tearDown(self):
self.redis_connection.flushdb()
def test_no_redis_connection(self):
r = Relationship()
self.assertEqual(r.redis_connection.connection_pool.connection_kwargs.get("host"), "localhost")
self.assertEqual(r.redis_connection.connection_pool.connection_kwargs.get("db"), 0)
self.assertEqual(r.redis_connection.connection_pool.connection_kwargs.get("port"), 6379)
def test_follow(self):
r = Relationship(redis_connection=self.redis_connection)
r(1).follow(42)
self.assertEqual(r(1).is_following(42), True)
self.assertEqual(r(42).is_follower(1), True)
def test_unfollow(self):
r = Relationship(redis_connection=self.redis_connection)
r(2).follow(42)
r(2).unfollow(42)
self.assertEqual(r(2).is_following(42), False)
self.assertEqual(r(42).is_follower(2), False)
def test_block(self):
r = Relationship(redis_connection=self.redis_connection)
r(1).block(42)
self.assertEqual(r(1).is_blocked(42), True)
self.assertEqual(r(42).is_blocked_by(1), True)
def test_unblock(self):
r = Relationship(redis_connection=self.redis_connection)
r(2).block(42)
r(2).unblock(42)
self.assertEqual(r(42).is_blocked_by(2), False)
self.assertEqual(r(2).is_blocked(42), False)
def test_friends(self):
r = Relationship(redis_connection=self.redis_connection)
r(5).follow(1)
r(1).follow(5)
r(100).follow(1)
r(1).follow(100)
self.assertEqual(r(1).friends(), set(['100', '5']))
def test_follower_count(self):
r = Relationship(redis_connection=self.redis_connection)
r(1000).follow(2000)
r(1001).follow(2000)
r(1002).follow(2000)
self.assertEqual(r(2000).follower_count(), 3)
def test_following_count(self):
r = Relationship(redis_connection=self.redis_connection)
r(1000).follow(2000)
r(1000).follow(1001)
self.assertEqual(r(1000).following_count(), 2)
def test_blocked_by_count(self):
r = Relationship(redis_connection=self.redis_connection)
r(1000).block(2000)
r(1001).block(2000)
r(1002).block(2000)
self.assertEqual(r(2000).blocked_count(), 3)
def test_blocking_count(self):
r = Relationship(redis_connection=self.redis_connection)
r(1000).block(2000)
r(1000).block(2001)
self.assertEqual(r(1000).block_count(), 2)
def test_followers(self):
r = Relationship(redis_connection=self.redis_connection)
r(10000).follow(100)
r(10001).follow(100)
r(10002).follow(100)
self.assertEqual(r(100).followers(), set(['10000', '10001', '10002']))
def test_following(self):
r = Relationship(redis_connection=self.redis_connection)
r(100).follow(900)
r(100).follow(901)
self.assertEqual(r(100).following(), set(['900', '901']))
def test_blocked(self):
r = Relationship(redis_connection=self.redis_connection)
r(100).block(900)
r(100).block(901)
self.assertEqual(r(100).blocks(), set(['900', '901']))
def test_blocked_by(self):
r = Relationship(redis_connection=self.redis_connection)
r(10000).block(100)
r(10001).block(100)
r(10002).block(100)
self.assertEqual(r(100).blocked(), set(['10000', '10001', '10002']))
def test_mutual_friends(self):
r = Relationship(redis_connection=self.redis_connection)
r('Emre').follow('Aydan')
r('Aydan').follow('Emre')
r('Emre').follow('Samed')
r('Samed').follow('Emre')
r('Emre').follow('Fka')
r('Fka').follow('Emre')
r('Fka').follow('Aydan')
r('Aydan').follow('Fka')
r('Fka').follow('Samed')
r('Samed').follow('Fka')
self.assertEqual(r('Emre').mutual_friends('Fka'), set(['Samed', 'Aydan']))
if __name__ == '__main__':
unittest.main() | tests.py | import redis
try:
import unittest2 as unittest
except ImportError:
import unittest
from relationships import Relationship
from relationships.relationship import default_key_list
class RelationshipsTestCase(unittest.TestCase):
def setUp(self):
self.redis_connection = redis.StrictRedis(
host='localhost',
port=6379,
db=15)
def tearDown(self):
self.redis_connection.flushdb()
def test_no_redis_connection(self):
r = Relationship()
self.assertEqual(r.redis_connection.connection_pool.connection_kwargs.get("host"), "localhost")
self.assertEqual(r.redis_connection.connection_pool.connection_kwargs.get("db"), 0)
self.assertEqual(r.redis_connection.connection_pool.connection_kwargs.get("port"), 6379)
def test_follow(self):
r = Relationship(redis_connection=self.redis_connection)
r(1).follow(42)
self.assertEqual(r(1).is_following(42), True)
self.assertEqual(r(42).is_follower(1), True)
def test_unfollow(self):
r = Relationship(redis_connection=self.redis_connection)
r(2).follow(42)
r(2).unfollow(42)
self.assertEqual(r(2).is_following(42), False)
self.assertEqual(r(42).is_follower(2), False)
def test_block(self):
r = Relationship(redis_connection=self.redis_connection)
r(1).block(42)
self.assertEqual(r(1).is_blocked(42), True)
self.assertEqual(r(42).is_blocked_by(1), True)
def test_unblock(self):
r = Relationship(redis_connection=self.redis_connection)
r(2).block(42)
r(2).unblock(42)
self.assertEqual(r(42).is_blocked_by(2), False)
self.assertEqual(r(2).is_blocked(42), False)
def test_friends(self):
r = Relationship(redis_connection=self.redis_connection)
r(5).follow(1)
r(1).follow(5)
r(100).follow(1)
r(1).follow(100)
self.assertEqual(r(1).friends(), set(['100', '5']))
def test_follower_count(self):
r = Relationship(redis_connection=self.redis_connection)
r(1000).follow(2000)
r(1001).follow(2000)
r(1002).follow(2000)
self.assertEqual(r(2000).follower_count(), 3)
def test_following_count(self):
r = Relationship(redis_connection=self.redis_connection)
r(1000).follow(2000)
r(1000).follow(1001)
self.assertEqual(r(1000).following_count(), 2)
def test_blocked_by_count(self):
r = Relationship(redis_connection=self.redis_connection)
r(1000).block(2000)
r(1001).block(2000)
r(1002).block(2000)
self.assertEqual(r(2000).blocked_count(), 3)
def test_blocking_count(self):
r = Relationship(redis_connection=self.redis_connection)
r(1000).block(2000)
r(1000).block(2001)
self.assertEqual(r(1000).block_count(), 2)
def test_followers(self):
r = Relationship(redis_connection=self.redis_connection)
r(10000).follow(100)
r(10001).follow(100)
r(10002).follow(100)
self.assertEqual(r(100).followers(), set(['10000', '10001', '10002']))
def test_following(self):
r = Relationship(redis_connection=self.redis_connection)
r(100).follow(900)
r(100).follow(901)
self.assertEqual(r(100).following(), set(['900', '901']))
def test_blocked(self):
r = Relationship(redis_connection=self.redis_connection)
r(100).block(900)
r(100).block(901)
self.assertEqual(r(100).blocks(), set(['900', '901']))
def test_blocked_by(self):
r = Relationship(redis_connection=self.redis_connection)
r(10000).block(100)
r(10001).block(100)
r(10002).block(100)
self.assertEqual(r(100).blocked(), set(['10000', '10001', '10002']))
def test_mutual_friends(self):
r = Relationship(redis_connection=self.redis_connection)
r('Emre').follow('Aydan')
r('Aydan').follow('Emre')
r('Emre').follow('Samed')
r('Samed').follow('Emre')
r('Emre').follow('Fka')
r('Fka').follow('Emre')
r('Fka').follow('Aydan')
r('Aydan').follow('Fka')
r('Fka').follow('Samed')
r('Samed').follow('Fka')
self.assertEqual(r('Emre').mutual_friends('Fka'), set(['Samed', 'Aydan']))
if __name__ == '__main__':
unittest.main() | 0.510008 | 0.671632 |
import os
import tarfile
import shutil
from subprocess import check_call
from fmpy.util import download_file
url = 'https://github.com/rpclib/rpclib/archive/refs/tags/v2.3.0.tar.gz'
checksum = 'eb9e6fa65e1a79b37097397f60599b93cb443d304fbc0447c50851bc3452fdef'
# build configuration
config = 'Release'
download_file(url, checksum)
filename = os.path.basename(url)
basedir = os.path.abspath(os.path.dirname(__file__))
source_dir = 'rpclib-2.3.0'
rpclib_dir = os.path.join(basedir, source_dir).replace('\\', '/')
# clean up
shutil.rmtree(source_dir, ignore_errors=True)
print("Extracting %s" % filename)
with tarfile.open(filename, 'r:gz') as tar:
tar.extractall()
if os.name == 'nt':
# patch the CMake project to link static against the MSVC runtime
with open(os.path.join(source_dir, 'CMakeLists.txt'), 'a') as file:
# Append 'hello' at the end of file
file.write('''
message(${CMAKE_CXX_FLAGS_RELEASE})
message(${CMAKE_CXX_FLAGS_DEBUG})
set(CompilerFlags
CMAKE_CXX_FLAGS
CMAKE_CXX_FLAGS_DEBUG
CMAKE_CXX_FLAGS_RELEASE
CMAKE_C_FLAGS
CMAKE_C_FLAGS_DEBUG
CMAKE_C_FLAGS_RELEASE
)
foreach(CompilerFlag ${CompilerFlags})
string(REPLACE "/MD" "/MT" ${CompilerFlag} "${${CompilerFlag}}")
endforeach()
message(${CMAKE_CXX_FLAGS_RELEASE})
message(${CMAKE_CXX_FLAGS_DEBUG})
''')
for bitness, generator in [('win32', 'Visual Studio 15 2017'), ('win64', 'Visual Studio 15 2017 Win64')]:
# clean up
shutil.rmtree(os.path.join(basedir, 'remoting', bitness), ignore_errors=True)
print("Building rpclib...")
check_call(args=[
'cmake',
'-B', source_dir + '/' + bitness,
'-D', 'RPCLIB_MSVC_STATIC_RUNTIME=ON',
'-D', 'CMAKE_INSTALL_PREFIX=' + source_dir + '/' + bitness + '/install',
'-G', generator,
source_dir
])
check_call(args=['cmake', '--build', source_dir + '/' + bitness, '--target', 'install', '--config', config])
print("Building remoting binaries...")
check_call(args=[
'cmake',
'-B', 'remoting/' + bitness,
'-G', generator,
'-D', 'RPCLIB=' + rpclib_dir + '/' + bitness + '/install',
'-B', 'remoting/' + bitness, 'remoting'
])
check_call(['cmake', '--build', 'remoting/' + bitness, '--config', config])
else:
# clean up
shutil.rmtree(os.path.join(basedir, 'remoting', 'linux64'), ignore_errors=True)
print("Building rpclib...")
check_call(args=[
'cmake',
'-B', source_dir + '/linux64',
'-D', 'CMAKE_INSTALL_PREFIX=' + source_dir + '/linux64' + '/install',
'-D', 'CMAKE_POSITION_INDEPENDENT_CODE=ON',
'-G', 'Unix Makefiles',
source_dir
])
check_call(args=['cmake', '--build', source_dir + '/linux64', '--target', 'install', '--config', config])
print("Building remoting binaries...")
check_call(args=[
'cmake',
'-B', 'remoting/' + 'linux64',
'-G', 'Unix Makefiles',
'-D', 'RPCLIB=' + rpclib_dir + '/linux64/install',
'-B', 'remoting/linux64', 'remoting'
])
check_call(['cmake', '--build', 'remoting/linux64', '--config', config]) | build_remoting.py | import os
import tarfile
import shutil
from subprocess import check_call
from fmpy.util import download_file
url = 'https://github.com/rpclib/rpclib/archive/refs/tags/v2.3.0.tar.gz'
checksum = 'eb9e6fa65e1a79b37097397f60599b93cb443d304fbc0447c50851bc3452fdef'
# build configuration
config = 'Release'
download_file(url, checksum)
filename = os.path.basename(url)
basedir = os.path.abspath(os.path.dirname(__file__))
source_dir = 'rpclib-2.3.0'
rpclib_dir = os.path.join(basedir, source_dir).replace('\\', '/')
# clean up
shutil.rmtree(source_dir, ignore_errors=True)
print("Extracting %s" % filename)
with tarfile.open(filename, 'r:gz') as tar:
tar.extractall()
if os.name == 'nt':
# patch the CMake project to link static against the MSVC runtime
with open(os.path.join(source_dir, 'CMakeLists.txt'), 'a') as file:
# Append 'hello' at the end of file
file.write('''
message(${CMAKE_CXX_FLAGS_RELEASE})
message(${CMAKE_CXX_FLAGS_DEBUG})
set(CompilerFlags
CMAKE_CXX_FLAGS
CMAKE_CXX_FLAGS_DEBUG
CMAKE_CXX_FLAGS_RELEASE
CMAKE_C_FLAGS
CMAKE_C_FLAGS_DEBUG
CMAKE_C_FLAGS_RELEASE
)
foreach(CompilerFlag ${CompilerFlags})
string(REPLACE "/MD" "/MT" ${CompilerFlag} "${${CompilerFlag}}")
endforeach()
message(${CMAKE_CXX_FLAGS_RELEASE})
message(${CMAKE_CXX_FLAGS_DEBUG})
''')
for bitness, generator in [('win32', 'Visual Studio 15 2017'), ('win64', 'Visual Studio 15 2017 Win64')]:
# clean up
shutil.rmtree(os.path.join(basedir, 'remoting', bitness), ignore_errors=True)
print("Building rpclib...")
check_call(args=[
'cmake',
'-B', source_dir + '/' + bitness,
'-D', 'RPCLIB_MSVC_STATIC_RUNTIME=ON',
'-D', 'CMAKE_INSTALL_PREFIX=' + source_dir + '/' + bitness + '/install',
'-G', generator,
source_dir
])
check_call(args=['cmake', '--build', source_dir + '/' + bitness, '--target', 'install', '--config', config])
print("Building remoting binaries...")
check_call(args=[
'cmake',
'-B', 'remoting/' + bitness,
'-G', generator,
'-D', 'RPCLIB=' + rpclib_dir + '/' + bitness + '/install',
'-B', 'remoting/' + bitness, 'remoting'
])
check_call(['cmake', '--build', 'remoting/' + bitness, '--config', config])
else:
# clean up
shutil.rmtree(os.path.join(basedir, 'remoting', 'linux64'), ignore_errors=True)
print("Building rpclib...")
check_call(args=[
'cmake',
'-B', source_dir + '/linux64',
'-D', 'CMAKE_INSTALL_PREFIX=' + source_dir + '/linux64' + '/install',
'-D', 'CMAKE_POSITION_INDEPENDENT_CODE=ON',
'-G', 'Unix Makefiles',
source_dir
])
check_call(args=['cmake', '--build', source_dir + '/linux64', '--target', 'install', '--config', config])
print("Building remoting binaries...")
check_call(args=[
'cmake',
'-B', 'remoting/' + 'linux64',
'-G', 'Unix Makefiles',
'-D', 'RPCLIB=' + rpclib_dir + '/linux64/install',
'-B', 'remoting/linux64', 'remoting'
])
check_call(['cmake', '--build', 'remoting/linux64', '--config', config]) | 0.265499 | 0.057229 |
import networkx as nx
import multiset as m
import json
class IteratorScheme:
def __init__(self):
self.compresser_ctr = 0
self.seen_tuples = {}
def reset(self):
self.compresser_ctr = 0
self.seen_tuples = {}
return self
def set_initial_colours(self,g):
initial_wl_colour = {}
for node in g.nodes:
if g.degree[node] > self.compresser_ctr:
self.compresser_ctr = g.degree[node]
initial_wl_colour[node] = g.degree[node]
nx.set_node_attributes(g,initial_wl_colour,"wl_colour")
return g
def set_initial_multiset(self,g):
multiset = {}
for node in g.nodes:
multiset[node] = m.Multiset()
for neighbor in nx.all_neighbors(g,node):
multiset[node].add(g.nodes[neighbor]["wl_colour"])
pass
nx.set_node_attributes(g,multiset,"neighbour_multiset")
return g
def compress_old_colour_and_multiset(self, old_label,multiset):
tupe = (old_label,json.dumps(multiset._elements))
if tupe in self.seen_tuples.keys():
return self.seen_tuples[tupe]
else:
self.compresser_ctr = self.compresser_ctr + 1
self.seen_tuples[tupe] = self.compresser_ctr
return self.compresser_ctr
pass
class StringCompressionScheme:
def reset(self):
return self
def set_initial_colours(self,g):
initial_wl_colour = {}
for node in g.nodes:
try:
label = g.nodes[node]['label']
label = label.strip("\"")
except KeyError:
label = '0'
pass
initial_wl_colour[node] = hash(label)
nx.set_node_attributes(g,initial_wl_colour,"wl_colour")
return g
def set_initial_multiset(self,g):
multiset = {}
for node in g.nodes:
multiset[node] = m.Multiset()
for neighbor in nx.all_neighbors(g,node):
multiset[node].add(g.nodes[neighbor]["wl_colour"])
pass
nx.set_node_attributes(g,multiset,"neighbour_multiset")
return g
def compress_old_colour_and_multiset(self, old_label,multiset):
tupe = (old_label,json.dumps(multiset._elements))
return hash(tupe)
pass | compression_schemes.py | import networkx as nx
import multiset as m
import json
class IteratorScheme:
def __init__(self):
self.compresser_ctr = 0
self.seen_tuples = {}
def reset(self):
self.compresser_ctr = 0
self.seen_tuples = {}
return self
def set_initial_colours(self,g):
initial_wl_colour = {}
for node in g.nodes:
if g.degree[node] > self.compresser_ctr:
self.compresser_ctr = g.degree[node]
initial_wl_colour[node] = g.degree[node]
nx.set_node_attributes(g,initial_wl_colour,"wl_colour")
return g
def set_initial_multiset(self,g):
multiset = {}
for node in g.nodes:
multiset[node] = m.Multiset()
for neighbor in nx.all_neighbors(g,node):
multiset[node].add(g.nodes[neighbor]["wl_colour"])
pass
nx.set_node_attributes(g,multiset,"neighbour_multiset")
return g
def compress_old_colour_and_multiset(self, old_label,multiset):
tupe = (old_label,json.dumps(multiset._elements))
if tupe in self.seen_tuples.keys():
return self.seen_tuples[tupe]
else:
self.compresser_ctr = self.compresser_ctr + 1
self.seen_tuples[tupe] = self.compresser_ctr
return self.compresser_ctr
pass
class StringCompressionScheme:
def reset(self):
return self
def set_initial_colours(self,g):
initial_wl_colour = {}
for node in g.nodes:
try:
label = g.nodes[node]['label']
label = label.strip("\"")
except KeyError:
label = '0'
pass
initial_wl_colour[node] = hash(label)
nx.set_node_attributes(g,initial_wl_colour,"wl_colour")
return g
def set_initial_multiset(self,g):
multiset = {}
for node in g.nodes:
multiset[node] = m.Multiset()
for neighbor in nx.all_neighbors(g,node):
multiset[node].add(g.nodes[neighbor]["wl_colour"])
pass
nx.set_node_attributes(g,multiset,"neighbour_multiset")
return g
def compress_old_colour_and_multiset(self, old_label,multiset):
tupe = (old_label,json.dumps(multiset._elements))
return hash(tupe)
pass | 0.334481 | 0.133754 |
import os
import time
import boto3
from botocore.exceptions import ClientError
from botocore.client import Config
from django.utils.crypto import get_random_string
from storages.utils import setting, lookup_env
def get_bucket_name():
return setting("AWS_STORAGE_BUCKET_NAME") or lookup_env(
["DJANGO_AWS_STORAGE_BUCKET_NAME"]
)
def get_access_key_id():
return setting("AWS_S3_ACCESS_KEY_ID", setting("AWS_ACCESS_KEY_ID")) or lookup_env(
["AWS_S3_ACCESS_KEY_ID", "AWS_ACCESS_KEY_ID"]
)
def get_secret_access_key():
return setting(
"AWS_S3_SECRET_ACCESS_KEY", setting("AWS_SECRET_ACCESS_KEY")
) or lookup_env(["AWS_S3_SECRET_ACCESS_KEY", "AWS_SECRET_ACCESS_KEY"])
def get_endpoint_url():
return setting("AWS_S3_ENDPOINT_URL") or lookup_env(
["AWS_S3_ENDPOINT_URL", "AWS_ENDPOINT_URL"]
)
def file_form_upload_dir():
return setting("FILE_FORM_UPLOAD_DIR", "temp_uploads")
def get_client():
signature_version = setting("AWS_S3_SIGNATURE_VERSION", None)
region_name = setting("AWS_S3_REGION_NAME", None)
while True:
try:
# https://github.com/boto/boto3/issues/801
return boto3.client(
"s3",
endpoint_url=get_endpoint_url(),
aws_access_key_id=get_access_key_id(),
aws_secret_access_key=get_secret_access_key(),
config=Config(
signature_version=signature_version, region_name=region_name
),
)
except:
time.sleep(0.01)
def exists(client, bucket_name, name):
"""
Check if key already exists in bucket.
Code adapted from storage.backends.s3boto3
"""
try:
client.head_object(Bucket=bucket_name, Key=name)
return True
except ClientError:
return False
def get_alternative_name(file_root, file_ext):
"""
Return an alternative filename, by adding an underscore and a random 7
character alphanumeric string (before the file extension, if one
exists) to the filename.
Code adapted from django.storage.get_alternative_name
"""
return f"{file_root}_{get_random_string(7)}{file_ext}"
def get_available_name(client, bucket_name, name):
"""
Return a filename that's free on the target storage system and
available for new content to be written to.
Code adapted from django.storage.get_available_name
"""
dir_name, file_name = os.path.split(name)
file_root, file_ext = os.path.splitext(file_name)
# If the filename already exists, generate an alternative filename
# until it doesn't exist.
while exists(client, bucket_name, name):
# file_ext includes the dot.
name = os.path.join(dir_name, get_alternative_name(file_root, file_ext))
return name | django_file_form/s3_multipart/utils.py | import os
import time
import boto3
from botocore.exceptions import ClientError
from botocore.client import Config
from django.utils.crypto import get_random_string
from storages.utils import setting, lookup_env
def get_bucket_name():
return setting("AWS_STORAGE_BUCKET_NAME") or lookup_env(
["DJANGO_AWS_STORAGE_BUCKET_NAME"]
)
def get_access_key_id():
return setting("AWS_S3_ACCESS_KEY_ID", setting("AWS_ACCESS_KEY_ID")) or lookup_env(
["AWS_S3_ACCESS_KEY_ID", "AWS_ACCESS_KEY_ID"]
)
def get_secret_access_key():
return setting(
"AWS_S3_SECRET_ACCESS_KEY", setting("AWS_SECRET_ACCESS_KEY")
) or lookup_env(["AWS_S3_SECRET_ACCESS_KEY", "AWS_SECRET_ACCESS_KEY"])
def get_endpoint_url():
return setting("AWS_S3_ENDPOINT_URL") or lookup_env(
["AWS_S3_ENDPOINT_URL", "AWS_ENDPOINT_URL"]
)
def file_form_upload_dir():
return setting("FILE_FORM_UPLOAD_DIR", "temp_uploads")
def get_client():
signature_version = setting("AWS_S3_SIGNATURE_VERSION", None)
region_name = setting("AWS_S3_REGION_NAME", None)
while True:
try:
# https://github.com/boto/boto3/issues/801
return boto3.client(
"s3",
endpoint_url=get_endpoint_url(),
aws_access_key_id=get_access_key_id(),
aws_secret_access_key=get_secret_access_key(),
config=Config(
signature_version=signature_version, region_name=region_name
),
)
except:
time.sleep(0.01)
def exists(client, bucket_name, name):
"""
Check if key already exists in bucket.
Code adapted from storage.backends.s3boto3
"""
try:
client.head_object(Bucket=bucket_name, Key=name)
return True
except ClientError:
return False
def get_alternative_name(file_root, file_ext):
"""
Return an alternative filename, by adding an underscore and a random 7
character alphanumeric string (before the file extension, if one
exists) to the filename.
Code adapted from django.storage.get_alternative_name
"""
return f"{file_root}_{get_random_string(7)}{file_ext}"
def get_available_name(client, bucket_name, name):
"""
Return a filename that's free on the target storage system and
available for new content to be written to.
Code adapted from django.storage.get_available_name
"""
dir_name, file_name = os.path.split(name)
file_root, file_ext = os.path.splitext(file_name)
# If the filename already exists, generate an alternative filename
# until it doesn't exist.
while exists(client, bucket_name, name):
# file_ext includes the dot.
name = os.path.join(dir_name, get_alternative_name(file_root, file_ext))
return name | 0.419053 | 0.070528 |
import logging
from abc import ABCMeta
import aiohttp
import jinja2
import sender
import telepot
from jinja2 import PackageLoader
from page_monitor import config
env = jinja2.Environment(loader=PackageLoader('page_monitor', 'templates'))
email_template = env.get_template('email.html')
logger = logging.getLogger(__name__)
class Action(metaclass=ABCMeta):
ACTION_TYPE = ''
class ActionEmail(Action):
ACTION_TYPE = 'email'
def __init__(self, email_to: str):
self.email_to = email_to
async def send_email(self, url: str, name: str, diff: str):
lines = []
for line in diff.split('\n'):
if line.startswith('+ '):
line = f'<span style="color: #28a745">{line}</span>'
elif line.startswith('- '):
line = f'<span style="color: #dc3545">{line}</span>'
lines.append(line)
colored_diff = '<br>'.join(lines)
rendered_template = email_template.render(name=name, url=url,
colored_diff=colored_diff)
text_content = f'''
Content change detected for {name}
{diff}
See here: {url}
'''
subject = f'Content change detected for {name}'
if config.EMAIL_SERVICE == 'smtp':
await self._send_with_smtp(subject, text_content,
rendered_template)
elif config.EMAIL_SERVICE == 'mailgun':
await self._send_with_mailgun(subject, text_content,
rendered_template)
async def _send_with_smtp(self, subject: str, text_content: str,
html_content: str):
message = sender.Message(subject)
message.html = html_content
message.body = text_content
message.to = self.email_to
message.fromaddr = f'{config.SMTP_FROM_EMAIL}'
mail = sender.Mail(host=config.SMTP_HOST,
username=config.SMTP_USERNAME,
password=config.SMTP_PASSWORD,
port=config.SMTP_PORT, use_tls=config.SMTP_USE_TLS)
mail.send(message)
async def _send_with_mailgun(self, subject: str, text_content: str,
html_content: str):
mailgun_url = (f'https://api.mailgun.net/v3/'
f'{config.MAILGUN_DOMAIN}/messages')
mailgun_data = {
'from': f'{config.MAILGUN_FROM_NAME} '
f'<{config.MAILGUN_FROM_EMAIL}>',
'to': [self.email_to],
'subject': subject,
'text': text_content,
'html': html_content
}
async with aiohttp.ClientSession() as session:
async with session.post(mailgun_url, auth=aiohttp.BasicAuth(
"api", config.MAILGUN_API_KEY), data=mailgun_data) as r:
try:
r.raise_for_status()
logger.info(f"Sent email to {self.email_to}")
except Exception:
logger.exception(f'Failed to send email to '
f'{self.email_to}')
class ActionTelegram(Action):
ACTION_TYPE = 'telegram'
def __init__(self, chat_id: str, token: str):
self.chat_id = chat_id
self._bot = telepot.Bot(token)
def send_telegram_message(self, url: str, name: str, diff: str):
content = (f'Content change detected on [{name}]({url})\n\n'
f'```\n{diff}```\n\n[{url}]({url})')
self._bot.sendMessage(self.chat_id, content, parse_mode='Markdown')
logger.info(f"Sent Telegram message to chat {self.chat_id}") | page_monitor/actions.py | import logging
from abc import ABCMeta
import aiohttp
import jinja2
import sender
import telepot
from jinja2 import PackageLoader
from page_monitor import config
env = jinja2.Environment(loader=PackageLoader('page_monitor', 'templates'))
email_template = env.get_template('email.html')
logger = logging.getLogger(__name__)
class Action(metaclass=ABCMeta):
ACTION_TYPE = ''
class ActionEmail(Action):
ACTION_TYPE = 'email'
def __init__(self, email_to: str):
self.email_to = email_to
async def send_email(self, url: str, name: str, diff: str):
lines = []
for line in diff.split('\n'):
if line.startswith('+ '):
line = f'<span style="color: #28a745">{line}</span>'
elif line.startswith('- '):
line = f'<span style="color: #dc3545">{line}</span>'
lines.append(line)
colored_diff = '<br>'.join(lines)
rendered_template = email_template.render(name=name, url=url,
colored_diff=colored_diff)
text_content = f'''
Content change detected for {name}
{diff}
See here: {url}
'''
subject = f'Content change detected for {name}'
if config.EMAIL_SERVICE == 'smtp':
await self._send_with_smtp(subject, text_content,
rendered_template)
elif config.EMAIL_SERVICE == 'mailgun':
await self._send_with_mailgun(subject, text_content,
rendered_template)
async def _send_with_smtp(self, subject: str, text_content: str,
html_content: str):
message = sender.Message(subject)
message.html = html_content
message.body = text_content
message.to = self.email_to
message.fromaddr = f'{config.SMTP_FROM_EMAIL}'
mail = sender.Mail(host=config.SMTP_HOST,
username=config.SMTP_USERNAME,
password=config.SMTP_PASSWORD,
port=config.SMTP_PORT, use_tls=config.SMTP_USE_TLS)
mail.send(message)
async def _send_with_mailgun(self, subject: str, text_content: str,
html_content: str):
mailgun_url = (f'https://api.mailgun.net/v3/'
f'{config.MAILGUN_DOMAIN}/messages')
mailgun_data = {
'from': f'{config.MAILGUN_FROM_NAME} '
f'<{config.MAILGUN_FROM_EMAIL}>',
'to': [self.email_to],
'subject': subject,
'text': text_content,
'html': html_content
}
async with aiohttp.ClientSession() as session:
async with session.post(mailgun_url, auth=aiohttp.BasicAuth(
"api", config.MAILGUN_API_KEY), data=mailgun_data) as r:
try:
r.raise_for_status()
logger.info(f"Sent email to {self.email_to}")
except Exception:
logger.exception(f'Failed to send email to '
f'{self.email_to}')
class ActionTelegram(Action):
ACTION_TYPE = 'telegram'
def __init__(self, chat_id: str, token: str):
self.chat_id = chat_id
self._bot = telepot.Bot(token)
def send_telegram_message(self, url: str, name: str, diff: str):
content = (f'Content change detected on [{name}]({url})\n\n'
f'```\n{diff}```\n\n[{url}]({url})')
self._bot.sendMessage(self.chat_id, content, parse_mode='Markdown')
logger.info(f"Sent Telegram message to chat {self.chat_id}") | 0.46223 | 0.076961 |
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import func, desc, asc, distinct, and_, or_
from sqlalchemy.orm import relationship
from config import app_active, app_config
from model.User import User
from model.Category import Category
config = app_config[app_active]
db = SQLAlchemy(config.APP)
class Product(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(20), unique=True, nullable=False)
description = db.Column(db.Text(), nullable=False)
qtd = db.Column(db.Integer, nullable=True, default=0)
image = db.Column(db.Text(), nullable=True)
price = db.Column(db.Numeric(10,2), nullable=False)
date_created = db.Column(db.DateTime(6), default=db.func.current_timestamp(), nullable=False)
last_update = db.Column(db.DateTime(6), onupdate=db.func.current_timestamp(), nullable=False)
status = db.Column(db.Integer, default=1, nullable=True)
user_created = db.Column(db.Integer, db.ForeignKey(User.id), nullable=False)
category = db.Column(db.Integer, db.ForeignKey(Category.id), nullable=False)
usuario = relationship(User)
categoria = relationship(Category)
def get_all(self, limit):
try:
if limit is None:
res = db.session.query(Product).all()
else:
res = db.session.query(Product).order_by(Product.date_created).limit(limit).all()
except Exception as e:
res = []
print(e)
finally:
db.session.close()
return res
def get_total_products(self):
try:
res = db.session.query(func.count(Product.id)).first()
except Exception as e:
res = []
print(e)
finally:
db.session.close()
return res
def get_last_products(self):
try:
res = db.session.query(Product).order_by(Product.date_created).limit(5).all()
except Exception as e:
res = []
print(e)
finally:
db.session.close()
return res
def get_product_by_id(self):
try:
res = db.session.query(Product).filter(Product.id==self.id).first()
except Exception as e:
res = []
print(e)
finally:
db.session.close()
return res | model/Product.py | from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import func, desc, asc, distinct, and_, or_
from sqlalchemy.orm import relationship
from config import app_active, app_config
from model.User import User
from model.Category import Category
config = app_config[app_active]
db = SQLAlchemy(config.APP)
class Product(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(20), unique=True, nullable=False)
description = db.Column(db.Text(), nullable=False)
qtd = db.Column(db.Integer, nullable=True, default=0)
image = db.Column(db.Text(), nullable=True)
price = db.Column(db.Numeric(10,2), nullable=False)
date_created = db.Column(db.DateTime(6), default=db.func.current_timestamp(), nullable=False)
last_update = db.Column(db.DateTime(6), onupdate=db.func.current_timestamp(), nullable=False)
status = db.Column(db.Integer, default=1, nullable=True)
user_created = db.Column(db.Integer, db.ForeignKey(User.id), nullable=False)
category = db.Column(db.Integer, db.ForeignKey(Category.id), nullable=False)
usuario = relationship(User)
categoria = relationship(Category)
def get_all(self, limit):
try:
if limit is None:
res = db.session.query(Product).all()
else:
res = db.session.query(Product).order_by(Product.date_created).limit(limit).all()
except Exception as e:
res = []
print(e)
finally:
db.session.close()
return res
def get_total_products(self):
try:
res = db.session.query(func.count(Product.id)).first()
except Exception as e:
res = []
print(e)
finally:
db.session.close()
return res
def get_last_products(self):
try:
res = db.session.query(Product).order_by(Product.date_created).limit(5).all()
except Exception as e:
res = []
print(e)
finally:
db.session.close()
return res
def get_product_by_id(self):
try:
res = db.session.query(Product).filter(Product.id==self.id).first()
except Exception as e:
res = []
print(e)
finally:
db.session.close()
return res | 0.369884 | 0.07072 |
import glob
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
import os
import time
class ModuleCacheTestcaseUniversal(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number in a(int) to break at.
self.cache_dir = os.path.join(self.getBuildDir(), 'lldb-module-cache')
# Set the lldb module cache directory to a directory inside the build
# artifacts directory so no other tests are interfered with.
self.runCmd('settings set symbols.lldb-index-cache-path "%s"' % (self.cache_dir))
self.runCmd('settings set symbols.enable-lldb-index-cache true')
def get_module_cache_files(self, basename):
module_file_glob = os.path.join(self.cache_dir, "llvmcache-*%s*" % (basename))
return glob.glob(module_file_glob)
# Doesn't depend on any specific debug information.
@no_debug_info_test
def test(self):
"""
Test module cache functionality for a universal mach-o files.
This will test that if we enable the module cache, we can create
lldb module caches for each slice of a universal mach-o file and
they will each have a unique directory.
"""
exe_basename = "testit"
src_dir = self.getSourceDir()
yaml_path = os.path.join(src_dir, "universal.yaml")
yaml_base, ext = os.path.splitext(yaml_path)
exe = self.getBuildArtifact(exe_basename)
self.yaml2obj(yaml_path, exe)
self.assertTrue(os.path.exists(exe))
# Create a module with no depedencies.
self.runCmd('target create -d --arch x86_64 %s' % (exe))
self.runCmd('image dump symtab %s' % (exe_basename))
self.runCmd('target create -d --arch arm64 %s' % (exe))
self.runCmd('image dump symtab %s' % (exe_basename))
cache_files = self.get_module_cache_files(exe_basename)
self.assertEqual(len(cache_files), 2,
"make sure there are two files in the module cache directory (%s) for %s" % (self.cache_dir, exe_basename)) | lldb/test/API/functionalities/module_cache/universal/TestModuleCacheUniversal.py |
import glob
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
import os
import time
class ModuleCacheTestcaseUniversal(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number in a(int) to break at.
self.cache_dir = os.path.join(self.getBuildDir(), 'lldb-module-cache')
# Set the lldb module cache directory to a directory inside the build
# artifacts directory so no other tests are interfered with.
self.runCmd('settings set symbols.lldb-index-cache-path "%s"' % (self.cache_dir))
self.runCmd('settings set symbols.enable-lldb-index-cache true')
def get_module_cache_files(self, basename):
module_file_glob = os.path.join(self.cache_dir, "llvmcache-*%s*" % (basename))
return glob.glob(module_file_glob)
# Doesn't depend on any specific debug information.
@no_debug_info_test
def test(self):
"""
Test module cache functionality for a universal mach-o files.
This will test that if we enable the module cache, we can create
lldb module caches for each slice of a universal mach-o file and
they will each have a unique directory.
"""
exe_basename = "testit"
src_dir = self.getSourceDir()
yaml_path = os.path.join(src_dir, "universal.yaml")
yaml_base, ext = os.path.splitext(yaml_path)
exe = self.getBuildArtifact(exe_basename)
self.yaml2obj(yaml_path, exe)
self.assertTrue(os.path.exists(exe))
# Create a module with no depedencies.
self.runCmd('target create -d --arch x86_64 %s' % (exe))
self.runCmd('image dump symtab %s' % (exe_basename))
self.runCmd('target create -d --arch arm64 %s' % (exe))
self.runCmd('image dump symtab %s' % (exe_basename))
cache_files = self.get_module_cache_files(exe_basename)
self.assertEqual(len(cache_files), 2,
"make sure there are two files in the module cache directory (%s) for %s" % (self.cache_dir, exe_basename)) | 0.398992 | 0.223123 |
from __future__ import unicode_literals
import json
import operator
from wtforms import fields, widgets
__all__ = [
'KeyPropertyField',
'JsonPropertyField',
'RepeatedKeyPropertyField',
'PrefetchedKeyPropertyField',
'RepeatedPrefetchedKeyPropertyField',
'StringListPropertyField',
'IntegerListPropertyField',
'ReferencePropertyField']
class KeyPropertyField(fields.SelectFieldBase):
"""
A field for ``ndb.KeyProperty``. The list items are rendered in a select.
:param ndb.Model reference_class:
A Model class which will be used to generate the default query
to make the list of items. If this is not specified, The `query`
argument must be provided.
:param get_label:
If a string, use this attribute on the model class as the label
associated with each option. If a one-argument callable, this callable
will be passed model instance and expected to return the label text.
Otherwise, the model object's `__str__` or `__unicode__` will be used.
:param allow_blank:
If set to true, a blank choice will be added to the top of the list
to allow `None` to be chosen.
:param blank_text:
Use this to override the default blank option's label.
:param ndb.Query query:
A query to provide a list of valid options.
"""
widget = widgets.Select()
def __init__(self, label=None, validators=None, reference_class=None,
get_label=str, allow_blank=False, blank_text='',
query=None, **kwargs):
super(KeyPropertyField, self).__init__(label, validators, **kwargs)
if isinstance(get_label, str):
self.get_label = operator.attrgetter(get_label)
else:
self.get_label = get_label
self.allow_blank = allow_blank
self.blank_text = blank_text
self._set_data(None)
if reference_class is not None:
query = query or reference_class.query()
if query:
self.set_query(query)
def set_query(self, query):
# Evaluate and set the query value
# Setting the query manually will still work, but is not advised
# as each iteration though it will cause it to be re-evaluated.
self.query = query.fetch()
@staticmethod
def _key_value(key):
"""
Get's the form-friendly representation of the ndb.Key.
This should return a hashable object (such as a string).
"""
# n.b. Possible security concern here as urlsafe() exposes
# *all* the detail about the instance. But it's also the only
# way to reliably record ancestor information, and ID values in
# a typesafe manner.
# Possible fix: Hash the value of urlsafe
return key.urlsafe()
def _get_data(self):
if self._formdata is not None:
for obj in self.query:
if self._key_value(obj.key) == self._formdata:
self._set_data(obj.key)
break
return self._data
def _set_data(self, data):
self._data = data
self._formdata = None
data = property(_get_data, _set_data)
def iter_choices(self):
if self.allow_blank:
yield ('__None', self.blank_text, self.data is None)
for obj in self.query:
key = self._key_value(obj.key)
label = self.get_label(obj)
yield (key,
label,
(self.data == obj.key) if self.data else False)
def process_formdata(self, valuelist):
if valuelist:
if valuelist[0] == '__None':
self.data = None
else:
self._data = None
self._formdata = valuelist[0]
def pre_validate(self, form):
if self.data is not None:
for obj in self.query:
if self.data == obj.key:
break
else:
raise ValueError(self.gettext('Not a valid choice'))
elif not self.allow_blank:
raise ValueError(self.gettext('Not a valid choice'))
def populate_obj(self, obj, name):
setattr(obj, name, self.data)
class SelectMultipleMixin(object):
widget = widgets.Select(multiple=True)
def iter_choices(self):
data = self.data or []
for obj in self.query:
key = self._key_value(obj.key)
label = self.get_label(obj)
selected = obj.key in data
yield (key, label, selected)
def process_data(self, value):
if value:
futures = [x.get_async() for x in value]
self.data = [x.get_result() for x in futures]
else:
self.data = None
def process_formdata(self, valuelist):
self._formdata = valuelist
def pre_validate(self, form):
if self.data:
values = [x.key for x in self.query]
for d in self.data:
if d not in values:
raise ValueError(
"%(value)s is not a valid choice for this field")
def _get_data(self):
if self._formdata is not None:
m = {self._key_value(obj.key): obj.key for obj in self.query}
self._set_data([m.get(x, x) for x in self._formdata])
return self._data
def _set_data(self, data):
self._data = data
self._formdata = None
data = property(_get_data, _set_data)
def populate_obj(self, obj, name):
setattr(obj, name, self.data or [])
class RepeatedKeyPropertyField(SelectMultipleMixin, KeyPropertyField):
widget = widgets.Select(multiple=True)
class PrefetchedKeyPropertyField(KeyPropertyField):
"""
A field for ``ndb.KeyProperty``. The list items are rendered in a select.
The query is executed asynchronously. This should provide noticable speed
improvements on forms with multiple KeyProperty fields.
See :py:`KeyPropertyField` for constructor arguments.
"""
widget = widgets.Select()
def set_query(self, query):
self._query = query.fetch_async()
@property
def query(self):
return self._query.get_result()
class RepeatedPrefetchedKeyPropertyField(SelectMultipleMixin,
PrefetchedKeyPropertyField):
widget = widgets.Select(multiple=True)
class JsonPropertyField(fields.StringField):
"""
This field is the base for most of the more complicated fields, and
represents an ``<input type="text">``.
"""
widget = widgets.TextArea()
def process_formdata(self, valuelist):
if valuelist:
self.data = json.loads(valuelist[0])
else:
self.data = None
def _value(self):
return json.dumps(self.data) if self.data is not None else ''
class ReferencePropertyField(KeyPropertyField):
"""
A field for ``db.ReferenceProperty``. The list items are rendered in a
select.
:param reference_class:
A db.Model class which will be used to generate the default query
to make the list of items. If this is not specified, The `query`
property must be overridden before validation.
:param get_label:
If a string, use this attribute on the model class as the label
associated with each option. If a one-argument callable, this callable
will be passed model instance and expected to return the label text.
Otherwise, the model object's `__str__` or `__unicode__` will be used.
:param allow_blank:
If set to true, a blank choice will be added to the top of the list
to allow `None` to be chosen.
:param blank_text:
Use this to override the default blank option's label.
"""
widget = widgets.Select()
def __init__(self, label=None, validators=None, reference_class=None,
get_label=None, allow_blank=False,
blank_text='', **kwargs):
super(ReferencePropertyField, self).__init__(label, validators,
**kwargs)
if get_label is None:
self.get_label = lambda x: x
elif isinstance(get_label, str):
self.get_label = operator.attrgetter(get_label)
else:
self.get_label = get_label
self.allow_blank = allow_blank
self.blank_text = blank_text
self._set_data(None)
if reference_class is not None:
self.query = reference_class.query()
def _get_data(self):
if self._formdata is not None:
for obj in self.query:
if str(obj.key) == self._formdata:
self._set_data(obj)
break
return self._data
def _set_data(self, data):
self._data = data
self._formdata = None
data = property(_get_data, _set_data)
def iter_choices(self):
if self.allow_blank:
yield ('__None', self.blank_text, self.data is None)
for obj in self.query:
key = self._key_value(obj.key)
label = self.get_label(obj)
yield (key,
label,
(self.data == obj.key) if self.data else False)
def process_formdata(self, valuelist):
if valuelist:
if valuelist[0] == '__None':
self.data = None
else:
self._data = None
self._formdata = valuelist[0]
def pre_validate(self, form):
data = self.data
if data is not None:
s_key = str(data.key)
for obj in self.query:
if s_key == str(obj.key):
break
else:
raise ValueError(self.gettext('Not a valid choice'))
elif not self.allow_blank:
raise ValueError(self.gettext('Not a valid choice'))
class StringListPropertyField(fields.TextAreaField):
"""
A field for ``db.StringListProperty``. The list items are rendered in a
textarea.
"""
def _value(self):
if self.raw_data:
return self.raw_data[0]
else:
return self.data and str("\n".join(self.data)) or ''
def process_formdata(self, valuelist):
if valuelist:
try:
self.data = valuelist[0].splitlines()
except ValueError:
raise ValueError(self.gettext('Not a valid list'))
class IntegerListPropertyField(fields.TextAreaField):
"""
A field for ``db.StringListProperty``. The list items are rendered in a
textarea.
"""
def _value(self):
if self.raw_data:
return self.raw_data[0]
else:
return str('\n'.join(self.data)) if self.data else ''
def process_formdata(self, valuelist):
if valuelist:
try:
self.data = [int(value) for value in valuelist[0].splitlines()]
except ValueError:
raise ValueError(self.gettext('Not a valid integer list')) | wtforms_appengine/fields/ndb.py | from __future__ import unicode_literals
import json
import operator
from wtforms import fields, widgets
__all__ = [
'KeyPropertyField',
'JsonPropertyField',
'RepeatedKeyPropertyField',
'PrefetchedKeyPropertyField',
'RepeatedPrefetchedKeyPropertyField',
'StringListPropertyField',
'IntegerListPropertyField',
'ReferencePropertyField']
class KeyPropertyField(fields.SelectFieldBase):
"""
A field for ``ndb.KeyProperty``. The list items are rendered in a select.
:param ndb.Model reference_class:
A Model class which will be used to generate the default query
to make the list of items. If this is not specified, The `query`
argument must be provided.
:param get_label:
If a string, use this attribute on the model class as the label
associated with each option. If a one-argument callable, this callable
will be passed model instance and expected to return the label text.
Otherwise, the model object's `__str__` or `__unicode__` will be used.
:param allow_blank:
If set to true, a blank choice will be added to the top of the list
to allow `None` to be chosen.
:param blank_text:
Use this to override the default blank option's label.
:param ndb.Query query:
A query to provide a list of valid options.
"""
widget = widgets.Select()
def __init__(self, label=None, validators=None, reference_class=None,
get_label=str, allow_blank=False, blank_text='',
query=None, **kwargs):
super(KeyPropertyField, self).__init__(label, validators, **kwargs)
if isinstance(get_label, str):
self.get_label = operator.attrgetter(get_label)
else:
self.get_label = get_label
self.allow_blank = allow_blank
self.blank_text = blank_text
self._set_data(None)
if reference_class is not None:
query = query or reference_class.query()
if query:
self.set_query(query)
def set_query(self, query):
# Evaluate and set the query value
# Setting the query manually will still work, but is not advised
# as each iteration though it will cause it to be re-evaluated.
self.query = query.fetch()
@staticmethod
def _key_value(key):
"""
Get's the form-friendly representation of the ndb.Key.
This should return a hashable object (such as a string).
"""
# n.b. Possible security concern here as urlsafe() exposes
# *all* the detail about the instance. But it's also the only
# way to reliably record ancestor information, and ID values in
# a typesafe manner.
# Possible fix: Hash the value of urlsafe
return key.urlsafe()
def _get_data(self):
if self._formdata is not None:
for obj in self.query:
if self._key_value(obj.key) == self._formdata:
self._set_data(obj.key)
break
return self._data
def _set_data(self, data):
self._data = data
self._formdata = None
data = property(_get_data, _set_data)
def iter_choices(self):
if self.allow_blank:
yield ('__None', self.blank_text, self.data is None)
for obj in self.query:
key = self._key_value(obj.key)
label = self.get_label(obj)
yield (key,
label,
(self.data == obj.key) if self.data else False)
def process_formdata(self, valuelist):
if valuelist:
if valuelist[0] == '__None':
self.data = None
else:
self._data = None
self._formdata = valuelist[0]
def pre_validate(self, form):
if self.data is not None:
for obj in self.query:
if self.data == obj.key:
break
else:
raise ValueError(self.gettext('Not a valid choice'))
elif not self.allow_blank:
raise ValueError(self.gettext('Not a valid choice'))
def populate_obj(self, obj, name):
setattr(obj, name, self.data)
class SelectMultipleMixin(object):
widget = widgets.Select(multiple=True)
def iter_choices(self):
data = self.data or []
for obj in self.query:
key = self._key_value(obj.key)
label = self.get_label(obj)
selected = obj.key in data
yield (key, label, selected)
def process_data(self, value):
if value:
futures = [x.get_async() for x in value]
self.data = [x.get_result() for x in futures]
else:
self.data = None
def process_formdata(self, valuelist):
self._formdata = valuelist
def pre_validate(self, form):
if self.data:
values = [x.key for x in self.query]
for d in self.data:
if d not in values:
raise ValueError(
"%(value)s is not a valid choice for this field")
def _get_data(self):
if self._formdata is not None:
m = {self._key_value(obj.key): obj.key for obj in self.query}
self._set_data([m.get(x, x) for x in self._formdata])
return self._data
def _set_data(self, data):
self._data = data
self._formdata = None
data = property(_get_data, _set_data)
def populate_obj(self, obj, name):
setattr(obj, name, self.data or [])
class RepeatedKeyPropertyField(SelectMultipleMixin, KeyPropertyField):
widget = widgets.Select(multiple=True)
class PrefetchedKeyPropertyField(KeyPropertyField):
"""
A field for ``ndb.KeyProperty``. The list items are rendered in a select.
The query is executed asynchronously. This should provide noticable speed
improvements on forms with multiple KeyProperty fields.
See :py:`KeyPropertyField` for constructor arguments.
"""
widget = widgets.Select()
def set_query(self, query):
self._query = query.fetch_async()
@property
def query(self):
return self._query.get_result()
class RepeatedPrefetchedKeyPropertyField(SelectMultipleMixin,
PrefetchedKeyPropertyField):
widget = widgets.Select(multiple=True)
class JsonPropertyField(fields.StringField):
"""
This field is the base for most of the more complicated fields, and
represents an ``<input type="text">``.
"""
widget = widgets.TextArea()
def process_formdata(self, valuelist):
if valuelist:
self.data = json.loads(valuelist[0])
else:
self.data = None
def _value(self):
return json.dumps(self.data) if self.data is not None else ''
class ReferencePropertyField(KeyPropertyField):
"""
A field for ``db.ReferenceProperty``. The list items are rendered in a
select.
:param reference_class:
A db.Model class which will be used to generate the default query
to make the list of items. If this is not specified, The `query`
property must be overridden before validation.
:param get_label:
If a string, use this attribute on the model class as the label
associated with each option. If a one-argument callable, this callable
will be passed model instance and expected to return the label text.
Otherwise, the model object's `__str__` or `__unicode__` will be used.
:param allow_blank:
If set to true, a blank choice will be added to the top of the list
to allow `None` to be chosen.
:param blank_text:
Use this to override the default blank option's label.
"""
widget = widgets.Select()
def __init__(self, label=None, validators=None, reference_class=None,
get_label=None, allow_blank=False,
blank_text='', **kwargs):
super(ReferencePropertyField, self).__init__(label, validators,
**kwargs)
if get_label is None:
self.get_label = lambda x: x
elif isinstance(get_label, str):
self.get_label = operator.attrgetter(get_label)
else:
self.get_label = get_label
self.allow_blank = allow_blank
self.blank_text = blank_text
self._set_data(None)
if reference_class is not None:
self.query = reference_class.query()
def _get_data(self):
if self._formdata is not None:
for obj in self.query:
if str(obj.key) == self._formdata:
self._set_data(obj)
break
return self._data
def _set_data(self, data):
self._data = data
self._formdata = None
data = property(_get_data, _set_data)
def iter_choices(self):
if self.allow_blank:
yield ('__None', self.blank_text, self.data is None)
for obj in self.query:
key = self._key_value(obj.key)
label = self.get_label(obj)
yield (key,
label,
(self.data == obj.key) if self.data else False)
def process_formdata(self, valuelist):
if valuelist:
if valuelist[0] == '__None':
self.data = None
else:
self._data = None
self._formdata = valuelist[0]
def pre_validate(self, form):
data = self.data
if data is not None:
s_key = str(data.key)
for obj in self.query:
if s_key == str(obj.key):
break
else:
raise ValueError(self.gettext('Not a valid choice'))
elif not self.allow_blank:
raise ValueError(self.gettext('Not a valid choice'))
class StringListPropertyField(fields.TextAreaField):
"""
A field for ``db.StringListProperty``. The list items are rendered in a
textarea.
"""
def _value(self):
if self.raw_data:
return self.raw_data[0]
else:
return self.data and str("\n".join(self.data)) or ''
def process_formdata(self, valuelist):
if valuelist:
try:
self.data = valuelist[0].splitlines()
except ValueError:
raise ValueError(self.gettext('Not a valid list'))
class IntegerListPropertyField(fields.TextAreaField):
"""
A field for ``db.StringListProperty``. The list items are rendered in a
textarea.
"""
def _value(self):
if self.raw_data:
return self.raw_data[0]
else:
return str('\n'.join(self.data)) if self.data else ''
def process_formdata(self, valuelist):
if valuelist:
try:
self.data = [int(value) for value in valuelist[0].splitlines()]
except ValueError:
raise ValueError(self.gettext('Not a valid integer list')) | 0.864668 | 0.264435 |
#Import Local Modules
from marvin.cloudstackTestCase import cloudstackTestCase
from marvin.lib.base import (SecurityGroup,
Account)
from marvin.lib.common import (get_zone,
get_domain,
get_template)
from marvin.lib.utils import (validateList,
cleanup_resources)
from marvin.codes import (PASS, EMPTY_LIST)
from nose.plugins.attrib import attr
class TestSecurityGroups(cloudstackTestCase):
@classmethod
def setUpClass(cls):
try:
cls._cleanup = []
cls.testClient = super(TestSecurityGroups, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = cls.testClient.getParsedTestDataConfig()
# Get Domain, Zone, Template
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services['mode'] = cls.zone.networktype
cls.account = Account.create(
cls.api_client,
cls.services["account"],
domainid=cls.domain.id
)
# Getting authentication for user in newly created Account
cls.user = cls.account.user[0]
cls.userapiclient = cls.testClient.getUserApiClient(cls.user.username, cls.domain.name)
cls._cleanup.append(cls.account)
except Exception as e:
cls.tearDownClass()
raise Exception("Warning: Exception in setup : %s" % e)
return
def setUp(self):
self.apiClient = self.testClient.getApiClient()
self.cleanup = []
def tearDown(self):
#Clean up, terminate the created resources
cleanup_resources(self.apiClient, self.cleanup)
return
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def __verify_values(self, expected_vals, actual_vals):
"""
@Desc: Function to verify expected and actual values
@Steps:
Step1: Initializing return flag to True
Step1: Verifying length of expected and actual dictionaries is matching.
If not matching returning false
Step2: Listing all the keys from expected dictionary
Step3: Looping through each key from step2 and verifying expected and actual dictionaries have same value
If not making return flag to False
Step4: returning the return flag after all the values are verified
"""
return_flag = True
if len(expected_vals) != len(actual_vals):
return False
keys = expected_vals.keys()
for i in range(0, len(expected_vals)):
exp_val = expected_vals[keys[i]]
act_val = actual_vals[keys[i]]
if exp_val == act_val:
return_flag = return_flag and True
else:
return_flag = return_flag and False
self.debug("expected Value: %s, is not matching with actual value: %s" % (
exp_val,
act_val
))
return return_flag
@attr(tags=["basic", "provisioning"])
def test_01_list_securitygroups_pagination(self):
"""
@Desc: Test to List Security Groups pagination
@steps:
Step1: Listing all the Security Groups for a user
Step2: Verifying that list size is 1
Step3: Creating (page size) number of Security Groups
Step4: Listing all the Security Groups again for a user
Step5: Verifying that list size is (page size + 1)
Step6: Listing all the Security Groups in page1
Step7: Verifying that list size is (page size)
Step8: Listing all the Security Groups in page2
Step9: Verifying that list size is 1
Step10: Deleting the Security Group present in page 2
Step11: Listing all the Security Groups in page2
Step12: Verifying that no security groups are listed
"""
# Listing all the Security Groups for a User
list_securitygroups_before = SecurityGroup.list(
self.userapiclient,
listall=self.services["listall"]
)
# Verifying that default security group is created
status = validateList(list_securitygroups_before)
self.assertEquals(
PASS,
status[0],
"Default Security Groups creation failed"
)
# Verifying the size of the list is 1
self.assertEquals(
1,
len(list_securitygroups_before),
"Count of Security Groups list is not matching"
)
# Creating pagesize number of security groups
for i in range(0, (self.services["pagesize"])):
securitygroup_created = SecurityGroup.create(
self.userapiclient,
self.services["security_group"],
account=self.account.name,
domainid=self.domain.id,
description=self.services["security_group"]["name"]
)
self.assertIsNotNone(
securitygroup_created,
"Security Group creation failed"
)
if (i < self.services["pagesize"]):
self.cleanup.append(securitygroup_created)
# Listing all the security groups for user again
list_securitygroups_after = SecurityGroup.list(
self.userapiclient,
listall=self.services["listall"]
)
status = validateList(list_securitygroups_after)
self.assertEquals(
PASS,
status[0],
"Security Groups creation failed"
)
# Verifying that list size is pagesize + 1
self.assertEquals(
self.services["pagesize"] + 1,
len(list_securitygroups_after),
"Failed to create pagesize + 1 number of Security Groups"
)
# Listing all the security groups in page 1
list_securitygroups_page1 = SecurityGroup.list(
self.userapiclient,
listall=self.services["listall"],
page=1,
pagesize=self.services["pagesize"]
)
status = validateList(list_securitygroups_page1)
self.assertEquals(
PASS,
status[0],
"Failed to list security groups in page 1"
)
# Verifying the list size to be equal to pagesize
self.assertEquals(
self.services["pagesize"],
len(list_securitygroups_page1),
"Size of security groups in page 1 is not matching"
)
# Listing all the security groups in page 2
list_securitygroups_page2 = SecurityGroup.list(
self.userapiclient,
listall=self.services["listall"],
page=2,
pagesize=self.services["pagesize"]
)
status = validateList(list_securitygroups_page2)
self.assertEquals(
PASS,
status[0],
"Failed to list security groups in page 2"
)
# Verifying the list size to be equal to pagesize
self.assertEquals(
1,
len(list_securitygroups_page2),
"Size of security groups in page 2 is not matching"
)
# Deleting the security group present in page 2
SecurityGroup.delete(
securitygroup_created,
self.userapiclient)
self.cleanup.remove(securitygroup_created)
# Listing all the security groups in page 2 again
list_securitygroups_page2 = SecurityGroup.list(
self.userapiclient,
listall=self.services["listall"],
page=2,
pagesize=self.services["pagesize"]
)
# Verifying that there are no security groups listed
self.assertIsNone(
list_securitygroups_page2,
"Security Groups not deleted from page 2"
)
return
@attr(tags=["basic", "provisioning"])
def test_02_securitygroups_authorize_revoke_ingress(self):
"""
@Desc: Test to Authorize and Revoke Ingress for Security Group
@steps:
Step1: Listing all the Security Groups for a user
Step2: Verifying that list size is 1
Step3: Creating a Security Groups
Step4: Listing all the Security Groups again for a user
Step5: Verifying that list size is 2
Step6: Authorizing Ingress for the security group created in step3
Step7: Listing the security groups by passing id of security group created in step3
Step8: Verifying that list size is 1
Step9: Verifying that Ingress is authorized to the security group
Step10: Verifying the details of the Ingress rule are as expected
Step11: Revoking Ingress for the security group created in step3
Step12: Listing the security groups by passing id of security group created in step3
Step13: Verifying that list size is 1
Step14: Verifying that Ingress is revoked from the security group
"""
# Listing all the Security Groups for a User
list_securitygroups_before = SecurityGroup.list(
self.userapiclient,
listall=self.services["listall"]
)
# Verifying that default security group is created
status = validateList(list_securitygroups_before)
self.assertEquals(
PASS,
status[0],
"Default Security Groups creation failed"
)
# Verifying the size of the list is 1
self.assertEquals(
1,
len(list_securitygroups_before),
"Count of Security Groups list is not matching"
)
# Creating a security group
securitygroup_created = SecurityGroup.create(
self.userapiclient,
self.services["security_group"],
account=self.account.name,
domainid=self.domain.id,
description=self.services["security_group"]["name"]
)
self.assertIsNotNone(
securitygroup_created,
"Security Group creation failed"
)
self.cleanup.append(securitygroup_created)
# Listing all the security groups for user again
list_securitygroups_after = SecurityGroup.list(
self.userapiclient,
listall=self.services["listall"]
)
status = validateList(list_securitygroups_after)
self.assertEquals(
PASS,
status[0],
"Security Groups creation failed"
)
# Verifying that list size is 2
self.assertEquals(
2,
len(list_securitygroups_after),
"Failed to create Security Group"
)
# Authorizing Ingress for the security group created in step3
securitygroup_created.authorize(
self.userapiclient,
self.services["ingress_rule"],
self.account.name,
self.domain.id,
)
# Listing the security group by Id
list_securitygroups_byid = SecurityGroup.list(
self.userapiclient,
listall=self.services["listall"],
id=securitygroup_created.id,
domainid=self.domain.id
)
# Verifying that security group is listed
status = validateList(list_securitygroups_byid)
self.assertEquals(
PASS,
status[0],
"Listing of Security Groups by id failed"
)
# Verifying size of the list is 1
self.assertEquals(
1,
len(list_securitygroups_byid),
"Count of the listing security group by id is not matching"
)
securitygroup_ingress = list_securitygroups_byid[0].ingressrule
# Validating the Ingress rule
status = validateList(securitygroup_ingress)
self.assertEquals(
PASS,
status[0],
"Security Groups Ingress rule authorization failed"
)
self.assertEquals(
1,
len(securitygroup_ingress),
"Security Group Ingress rules count is not matching"
)
# Verifying the details of the Ingress rule are as expected
#Creating expected and actual values dictionaries
expected_dict = {
"cidr":self.services["ingress_rule"]["cidrlist"],
"protocol":self.services["ingress_rule"]["protocol"],
"startport":self.services["ingress_rule"]["startport"],
"endport":self.services["ingress_rule"]["endport"],
}
actual_dict = {
"cidr":str(securitygroup_ingress[0].cidr),
"protocol":str(securitygroup_ingress[0].protocol.upper()),
"startport":str(securitygroup_ingress[0].startport),
"endport":str(securitygroup_ingress[0].endport),
}
ingress_status = self.__verify_values(
expected_dict,
actual_dict
)
self.assertEqual(
True,
ingress_status,
"Listed Security group Ingress rule details are not as expected"
)
# Revoking the Ingress rule from Security Group
securitygroup_created.revoke(self.userapiclient, securitygroup_ingress[0].ruleid)
# Listing the security group by Id
list_securitygroups_byid = SecurityGroup.list(
self.userapiclient,
listall=self.services["listall"],
id=securitygroup_created.id,
domainid=self.domain.id
)
# Verifying that security group is listed
status = validateList(list_securitygroups_byid)
self.assertEquals(
PASS,
status[0],
"Listing of Security Groups by id failed"
)
# Verifying size of the list is 1
self.assertEquals(
1,
len(list_securitygroups_byid),
"Count of the listing security group by id is not matching"
)
securitygroup_ingress = list_securitygroups_byid[0].ingressrule
# Verifying that Ingress rule is empty(revoked)
status = validateList(securitygroup_ingress)
self.assertEquals(
EMPTY_LIST,
status[2],
"Security Groups Ingress rule is not revoked"
)
return
@attr(tags=["basic", "provisioning"])
def test_03_securitygroups_authorize_revoke_egress(self):
"""
@Desc: Test to Authorize and Revoke Egress for Security Group
@steps:
Step1: Listing all the Security Groups for a user
Step2: Verifying that list size is 1
Step3: Creating a Security Groups
Step4: Listing all the Security Groups again for a user
Step5: Verifying that list size is 2
Step6: Authorizing Egress for the security group created in step3
Step7: Listing the security groups by passing id of security group created in step3
Step8: Verifying that list size is 1
Step9: Verifying that Egress is authorized to the security group
Step10: Verifying the details of the Egress rule are as expected
Step11: Revoking Egress for the security group created in step3
Step12: Listing the security groups by passing id of security group created in step3
Step13: Verifying that list size is 1
Step14: Verifying that Egress is revoked from the security group
"""
# Listing all the Security Groups for a User
list_securitygroups_before = SecurityGroup.list(
self.userapiclient,
listall=self.services["listall"]
)
# Verifying that default security group is created
status = validateList(list_securitygroups_before)
self.assertEquals(
PASS,
status[0],
"Default Security Groups creation failed"
)
# Verifying the size of the list is 1
self.assertEquals(
1,
len(list_securitygroups_before),
"Count of Security Groups list is not matching"
)
# Creating a security group
securitygroup_created = SecurityGroup.create(
self.userapiclient,
self.services["security_group"],
account=self.account.name,
domainid=self.domain.id,
description=self.services["security_group"]["name"]
)
self.assertIsNotNone(
securitygroup_created,
"Security Group creation failed"
)
self.cleanup.append(securitygroup_created)
# Listing all the security groups for user again
list_securitygroups_after = SecurityGroup.list(
self.userapiclient,
listall=self.services["listall"]
)
status = validateList(list_securitygroups_after)
self.assertEquals(
PASS,
status[0],
"Security Groups creation failed"
)
# Verifying that list size is 2
self.assertEquals(
2,
len(list_securitygroups_after),
"Failed to create Security Group"
)
# Authorizing Egress for the security group created in step3
securitygroup_created.authorizeEgress(
self.userapiclient,
self.services["ingress_rule"],
self.account.name,
self.domain.id,
)
# Listing the security group by Id
list_securitygroups_byid = SecurityGroup.list(
self.userapiclient,
listall=self.services["listall"],
id=securitygroup_created.id,
domainid=self.domain.id
)
# Verifying that security group is listed
status = validateList(list_securitygroups_byid)
self.assertEquals(
PASS,
status[0],
"Listing of Security Groups by id failed"
)
# Verifying size of the list is 1
self.assertEquals(
1,
len(list_securitygroups_byid),
"Count of the listing security group by id is not matching"
)
securitygroup_egress = list_securitygroups_byid[0].egressrule
# Validating the Ingress rule
status = validateList(securitygroup_egress)
self.assertEquals(
PASS,
status[0],
"Security Groups Egress rule authorization failed"
)
self.assertEquals(
1,
len(securitygroup_egress),
"Security Group Egress rules count is not matching"
)
# Verifying the details of the Egress rule are as expected
#Creating expected and actual values dictionaries
expected_dict = {
"cidr":self.services["ingress_rule"]["cidrlist"],
"protocol":self.services["ingress_rule"]["protocol"],
"startport":self.services["ingress_rule"]["startport"],
"endport":self.services["ingress_rule"]["endport"],
}
actual_dict = {
"cidr":str(securitygroup_egress[0].cidr),
"protocol":str(securitygroup_egress[0].protocol.upper()),
"startport":str(securitygroup_egress[0].startport),
"endport":str(securitygroup_egress[0].endport),
}
ingress_status = self.__verify_values(
expected_dict,
actual_dict
)
self.assertEqual(
True,
ingress_status,
"Listed Security group Egress rule details are not as expected"
)
# Revoking the Egress rule from Security Group
securitygroup_created.revokeEgress(self.userapiclient, securitygroup_egress[0].ruleid)
# Listing the security group by Id
list_securitygroups_byid = SecurityGroup.list(
self.userapiclient,
listall=self.services["listall"],
id=securitygroup_created.id,
domainid=self.domain.id
)
# Verifying that security group is listed
status = validateList(list_securitygroups_byid)
self.assertEquals(
PASS,
status[0],
"Listing of Security Groups by id failed"
)
# Verifying size of the list is 1
self.assertEquals(
1,
len(list_securitygroups_byid),
"Count of the listing security group by id is not matching"
)
securitygroup_egress = list_securitygroups_byid[0].egressrule
# Verifying that Ingress rule is empty(revoked)
status = validateList(securitygroup_egress)
self.assertEquals(
EMPTY_LIST,
status[2],
"Security Groups Egress rule is not revoked"
)
return | test/integration/component/test_escalations_securitygroups.py |
#Import Local Modules
from marvin.cloudstackTestCase import cloudstackTestCase
from marvin.lib.base import (SecurityGroup,
Account)
from marvin.lib.common import (get_zone,
get_domain,
get_template)
from marvin.lib.utils import (validateList,
cleanup_resources)
from marvin.codes import (PASS, EMPTY_LIST)
from nose.plugins.attrib import attr
class TestSecurityGroups(cloudstackTestCase):
@classmethod
def setUpClass(cls):
try:
cls._cleanup = []
cls.testClient = super(TestSecurityGroups, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = cls.testClient.getParsedTestDataConfig()
# Get Domain, Zone, Template
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services['mode'] = cls.zone.networktype
cls.account = Account.create(
cls.api_client,
cls.services["account"],
domainid=cls.domain.id
)
# Getting authentication for user in newly created Account
cls.user = cls.account.user[0]
cls.userapiclient = cls.testClient.getUserApiClient(cls.user.username, cls.domain.name)
cls._cleanup.append(cls.account)
except Exception as e:
cls.tearDownClass()
raise Exception("Warning: Exception in setup : %s" % e)
return
def setUp(self):
self.apiClient = self.testClient.getApiClient()
self.cleanup = []
def tearDown(self):
#Clean up, terminate the created resources
cleanup_resources(self.apiClient, self.cleanup)
return
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def __verify_values(self, expected_vals, actual_vals):
"""
@Desc: Function to verify expected and actual values
@Steps:
Step1: Initializing return flag to True
Step1: Verifying length of expected and actual dictionaries is matching.
If not matching returning false
Step2: Listing all the keys from expected dictionary
Step3: Looping through each key from step2 and verifying expected and actual dictionaries have same value
If not making return flag to False
Step4: returning the return flag after all the values are verified
"""
return_flag = True
if len(expected_vals) != len(actual_vals):
return False
keys = expected_vals.keys()
for i in range(0, len(expected_vals)):
exp_val = expected_vals[keys[i]]
act_val = actual_vals[keys[i]]
if exp_val == act_val:
return_flag = return_flag and True
else:
return_flag = return_flag and False
self.debug("expected Value: %s, is not matching with actual value: %s" % (
exp_val,
act_val
))
return return_flag
@attr(tags=["basic", "provisioning"])
def test_01_list_securitygroups_pagination(self):
"""
@Desc: Test to List Security Groups pagination
@steps:
Step1: Listing all the Security Groups for a user
Step2: Verifying that list size is 1
Step3: Creating (page size) number of Security Groups
Step4: Listing all the Security Groups again for a user
Step5: Verifying that list size is (page size + 1)
Step6: Listing all the Security Groups in page1
Step7: Verifying that list size is (page size)
Step8: Listing all the Security Groups in page2
Step9: Verifying that list size is 1
Step10: Deleting the Security Group present in page 2
Step11: Listing all the Security Groups in page2
Step12: Verifying that no security groups are listed
"""
# Listing all the Security Groups for a User
list_securitygroups_before = SecurityGroup.list(
self.userapiclient,
listall=self.services["listall"]
)
# Verifying that default security group is created
status = validateList(list_securitygroups_before)
self.assertEquals(
PASS,
status[0],
"Default Security Groups creation failed"
)
# Verifying the size of the list is 1
self.assertEquals(
1,
len(list_securitygroups_before),
"Count of Security Groups list is not matching"
)
# Creating pagesize number of security groups
for i in range(0, (self.services["pagesize"])):
securitygroup_created = SecurityGroup.create(
self.userapiclient,
self.services["security_group"],
account=self.account.name,
domainid=self.domain.id,
description=self.services["security_group"]["name"]
)
self.assertIsNotNone(
securitygroup_created,
"Security Group creation failed"
)
if (i < self.services["pagesize"]):
self.cleanup.append(securitygroup_created)
# Listing all the security groups for user again
list_securitygroups_after = SecurityGroup.list(
self.userapiclient,
listall=self.services["listall"]
)
status = validateList(list_securitygroups_after)
self.assertEquals(
PASS,
status[0],
"Security Groups creation failed"
)
# Verifying that list size is pagesize + 1
self.assertEquals(
self.services["pagesize"] + 1,
len(list_securitygroups_after),
"Failed to create pagesize + 1 number of Security Groups"
)
# Listing all the security groups in page 1
list_securitygroups_page1 = SecurityGroup.list(
self.userapiclient,
listall=self.services["listall"],
page=1,
pagesize=self.services["pagesize"]
)
status = validateList(list_securitygroups_page1)
self.assertEquals(
PASS,
status[0],
"Failed to list security groups in page 1"
)
# Verifying the list size to be equal to pagesize
self.assertEquals(
self.services["pagesize"],
len(list_securitygroups_page1),
"Size of security groups in page 1 is not matching"
)
# Listing all the security groups in page 2
list_securitygroups_page2 = SecurityGroup.list(
self.userapiclient,
listall=self.services["listall"],
page=2,
pagesize=self.services["pagesize"]
)
status = validateList(list_securitygroups_page2)
self.assertEquals(
PASS,
status[0],
"Failed to list security groups in page 2"
)
# Verifying the list size to be equal to pagesize
self.assertEquals(
1,
len(list_securitygroups_page2),
"Size of security groups in page 2 is not matching"
)
# Deleting the security group present in page 2
SecurityGroup.delete(
securitygroup_created,
self.userapiclient)
self.cleanup.remove(securitygroup_created)
# Listing all the security groups in page 2 again
list_securitygroups_page2 = SecurityGroup.list(
self.userapiclient,
listall=self.services["listall"],
page=2,
pagesize=self.services["pagesize"]
)
# Verifying that there are no security groups listed
self.assertIsNone(
list_securitygroups_page2,
"Security Groups not deleted from page 2"
)
return
@attr(tags=["basic", "provisioning"])
def test_02_securitygroups_authorize_revoke_ingress(self):
"""
@Desc: Test to Authorize and Revoke Ingress for Security Group
@steps:
Step1: Listing all the Security Groups for a user
Step2: Verifying that list size is 1
Step3: Creating a Security Groups
Step4: Listing all the Security Groups again for a user
Step5: Verifying that list size is 2
Step6: Authorizing Ingress for the security group created in step3
Step7: Listing the security groups by passing id of security group created in step3
Step8: Verifying that list size is 1
Step9: Verifying that Ingress is authorized to the security group
Step10: Verifying the details of the Ingress rule are as expected
Step11: Revoking Ingress for the security group created in step3
Step12: Listing the security groups by passing id of security group created in step3
Step13: Verifying that list size is 1
Step14: Verifying that Ingress is revoked from the security group
"""
# Listing all the Security Groups for a User
list_securitygroups_before = SecurityGroup.list(
self.userapiclient,
listall=self.services["listall"]
)
# Verifying that default security group is created
status = validateList(list_securitygroups_before)
self.assertEquals(
PASS,
status[0],
"Default Security Groups creation failed"
)
# Verifying the size of the list is 1
self.assertEquals(
1,
len(list_securitygroups_before),
"Count of Security Groups list is not matching"
)
# Creating a security group
securitygroup_created = SecurityGroup.create(
self.userapiclient,
self.services["security_group"],
account=self.account.name,
domainid=self.domain.id,
description=self.services["security_group"]["name"]
)
self.assertIsNotNone(
securitygroup_created,
"Security Group creation failed"
)
self.cleanup.append(securitygroup_created)
# Listing all the security groups for user again
list_securitygroups_after = SecurityGroup.list(
self.userapiclient,
listall=self.services["listall"]
)
status = validateList(list_securitygroups_after)
self.assertEquals(
PASS,
status[0],
"Security Groups creation failed"
)
# Verifying that list size is 2
self.assertEquals(
2,
len(list_securitygroups_after),
"Failed to create Security Group"
)
# Authorizing Ingress for the security group created in step3
securitygroup_created.authorize(
self.userapiclient,
self.services["ingress_rule"],
self.account.name,
self.domain.id,
)
# Listing the security group by Id
list_securitygroups_byid = SecurityGroup.list(
self.userapiclient,
listall=self.services["listall"],
id=securitygroup_created.id,
domainid=self.domain.id
)
# Verifying that security group is listed
status = validateList(list_securitygroups_byid)
self.assertEquals(
PASS,
status[0],
"Listing of Security Groups by id failed"
)
# Verifying size of the list is 1
self.assertEquals(
1,
len(list_securitygroups_byid),
"Count of the listing security group by id is not matching"
)
securitygroup_ingress = list_securitygroups_byid[0].ingressrule
# Validating the Ingress rule
status = validateList(securitygroup_ingress)
self.assertEquals(
PASS,
status[0],
"Security Groups Ingress rule authorization failed"
)
self.assertEquals(
1,
len(securitygroup_ingress),
"Security Group Ingress rules count is not matching"
)
# Verifying the details of the Ingress rule are as expected
#Creating expected and actual values dictionaries
expected_dict = {
"cidr":self.services["ingress_rule"]["cidrlist"],
"protocol":self.services["ingress_rule"]["protocol"],
"startport":self.services["ingress_rule"]["startport"],
"endport":self.services["ingress_rule"]["endport"],
}
actual_dict = {
"cidr":str(securitygroup_ingress[0].cidr),
"protocol":str(securitygroup_ingress[0].protocol.upper()),
"startport":str(securitygroup_ingress[0].startport),
"endport":str(securitygroup_ingress[0].endport),
}
ingress_status = self.__verify_values(
expected_dict,
actual_dict
)
self.assertEqual(
True,
ingress_status,
"Listed Security group Ingress rule details are not as expected"
)
# Revoking the Ingress rule from Security Group
securitygroup_created.revoke(self.userapiclient, securitygroup_ingress[0].ruleid)
# Listing the security group by Id
list_securitygroups_byid = SecurityGroup.list(
self.userapiclient,
listall=self.services["listall"],
id=securitygroup_created.id,
domainid=self.domain.id
)
# Verifying that security group is listed
status = validateList(list_securitygroups_byid)
self.assertEquals(
PASS,
status[0],
"Listing of Security Groups by id failed"
)
# Verifying size of the list is 1
self.assertEquals(
1,
len(list_securitygroups_byid),
"Count of the listing security group by id is not matching"
)
securitygroup_ingress = list_securitygroups_byid[0].ingressrule
# Verifying that Ingress rule is empty(revoked)
status = validateList(securitygroup_ingress)
self.assertEquals(
EMPTY_LIST,
status[2],
"Security Groups Ingress rule is not revoked"
)
return
@attr(tags=["basic", "provisioning"])
def test_03_securitygroups_authorize_revoke_egress(self):
"""
@Desc: Test to Authorize and Revoke Egress for Security Group
@steps:
Step1: Listing all the Security Groups for a user
Step2: Verifying that list size is 1
Step3: Creating a Security Groups
Step4: Listing all the Security Groups again for a user
Step5: Verifying that list size is 2
Step6: Authorizing Egress for the security group created in step3
Step7: Listing the security groups by passing id of security group created in step3
Step8: Verifying that list size is 1
Step9: Verifying that Egress is authorized to the security group
Step10: Verifying the details of the Egress rule are as expected
Step11: Revoking Egress for the security group created in step3
Step12: Listing the security groups by passing id of security group created in step3
Step13: Verifying that list size is 1
Step14: Verifying that Egress is revoked from the security group
"""
# Listing all the Security Groups for a User
list_securitygroups_before = SecurityGroup.list(
self.userapiclient,
listall=self.services["listall"]
)
# Verifying that default security group is created
status = validateList(list_securitygroups_before)
self.assertEquals(
PASS,
status[0],
"Default Security Groups creation failed"
)
# Verifying the size of the list is 1
self.assertEquals(
1,
len(list_securitygroups_before),
"Count of Security Groups list is not matching"
)
# Creating a security group
securitygroup_created = SecurityGroup.create(
self.userapiclient,
self.services["security_group"],
account=self.account.name,
domainid=self.domain.id,
description=self.services["security_group"]["name"]
)
self.assertIsNotNone(
securitygroup_created,
"Security Group creation failed"
)
self.cleanup.append(securitygroup_created)
# Listing all the security groups for user again
list_securitygroups_after = SecurityGroup.list(
self.userapiclient,
listall=self.services["listall"]
)
status = validateList(list_securitygroups_after)
self.assertEquals(
PASS,
status[0],
"Security Groups creation failed"
)
# Verifying that list size is 2
self.assertEquals(
2,
len(list_securitygroups_after),
"Failed to create Security Group"
)
# Authorizing Egress for the security group created in step3
securitygroup_created.authorizeEgress(
self.userapiclient,
self.services["ingress_rule"],
self.account.name,
self.domain.id,
)
# Listing the security group by Id
list_securitygroups_byid = SecurityGroup.list(
self.userapiclient,
listall=self.services["listall"],
id=securitygroup_created.id,
domainid=self.domain.id
)
# Verifying that security group is listed
status = validateList(list_securitygroups_byid)
self.assertEquals(
PASS,
status[0],
"Listing of Security Groups by id failed"
)
# Verifying size of the list is 1
self.assertEquals(
1,
len(list_securitygroups_byid),
"Count of the listing security group by id is not matching"
)
securitygroup_egress = list_securitygroups_byid[0].egressrule
# Validating the Ingress rule
status = validateList(securitygroup_egress)
self.assertEquals(
PASS,
status[0],
"Security Groups Egress rule authorization failed"
)
self.assertEquals(
1,
len(securitygroup_egress),
"Security Group Egress rules count is not matching"
)
# Verifying the details of the Egress rule are as expected
#Creating expected and actual values dictionaries
expected_dict = {
"cidr":self.services["ingress_rule"]["cidrlist"],
"protocol":self.services["ingress_rule"]["protocol"],
"startport":self.services["ingress_rule"]["startport"],
"endport":self.services["ingress_rule"]["endport"],
}
actual_dict = {
"cidr":str(securitygroup_egress[0].cidr),
"protocol":str(securitygroup_egress[0].protocol.upper()),
"startport":str(securitygroup_egress[0].startport),
"endport":str(securitygroup_egress[0].endport),
}
ingress_status = self.__verify_values(
expected_dict,
actual_dict
)
self.assertEqual(
True,
ingress_status,
"Listed Security group Egress rule details are not as expected"
)
# Revoking the Egress rule from Security Group
securitygroup_created.revokeEgress(self.userapiclient, securitygroup_egress[0].ruleid)
# Listing the security group by Id
list_securitygroups_byid = SecurityGroup.list(
self.userapiclient,
listall=self.services["listall"],
id=securitygroup_created.id,
domainid=self.domain.id
)
# Verifying that security group is listed
status = validateList(list_securitygroups_byid)
self.assertEquals(
PASS,
status[0],
"Listing of Security Groups by id failed"
)
# Verifying size of the list is 1
self.assertEquals(
1,
len(list_securitygroups_byid),
"Count of the listing security group by id is not matching"
)
securitygroup_egress = list_securitygroups_byid[0].egressrule
# Verifying that Ingress rule is empty(revoked)
status = validateList(securitygroup_egress)
self.assertEquals(
EMPTY_LIST,
status[2],
"Security Groups Egress rule is not revoked"
)
return | 0.536799 | 0.245226 |
import base64
import gzip
import json
from unittest.mock import MagicMock, patch
import os
import sys
import unittest
sys.modules["trace_forwarder.connection"] = MagicMock()
sys.modules["datadog_lambda.wrapper"] = MagicMock()
sys.modules["datadog_lambda.metric"] = MagicMock()
sys.modules["datadog"] = MagicMock()
sys.modules["requests"] = MagicMock()
sys.modules["requests_futures.sessions"] = MagicMock()
env_patch = patch.dict(os.environ, {"DD_API_KEY": "11111111111111111111111111111111"})
env_patch.start()
from parsing import awslogs_handler, parse_event_source, separate_security_hub_findings
env_patch.stop()
class TestParseEventSource(unittest.TestCase):
def test_aws_source_if_none_found(self):
self.assertEqual(parse_event_source({}, "asdfalsfhalskjdfhalsjdf"), "aws")
def test_cloudtrail_event(self):
self.assertEqual(
parse_event_source(
{"Records": ["logs-from-s3"]},
"cloud-trail/AWSLogs/123456779121/CloudTrail/us-west-3/2018/01/07/123456779121_CloudTrail_eu-west-3_20180707T1735Z_abcdefghi0MCRL2O.json.gz",
),
"cloudtrail",
)
def test_cloudtrail_event_with_service_substrings(self):
# Assert that source "cloudtrail" is parsed even though substrings "waf" and "sns" are present in the key
self.assertEqual(
parse_event_source(
{"Records": ["logs-from-s3"]},
"cloud-trail/AWSLogs/123456779121/CloudTrail/us-west-3/2018/01/07/123456779121_CloudTrail_eu-west-3_20180707T1735Z_xywafKsnsXMBrdsMCRL2O.json.gz",
),
"cloudtrail",
)
def test_rds_event(self):
self.assertEqual(
parse_event_source({"awslogs": "logs"}, "/aws/rds/my-rds-resource"), "rds"
)
def test_mariadb_event(self):
self.assertEqual(
parse_event_source({"awslogs": "logs"}, "/aws/rds/mariaDB-instance/error"),
"mariadb",
)
def test_mysql_event(self):
self.assertEqual(
parse_event_source({"awslogs": "logs"}, "/aws/rds/mySQL-instance/error"),
"mysql",
)
def test_postgresql_event(self):
self.assertEqual(
parse_event_source(
{"awslogs": "logs"}, "/aws/rds/instance/datadog/postgresql"
),
"postgresql",
)
def test_lambda_event(self):
self.assertEqual(
parse_event_source({"awslogs": "logs"}, "/aws/lambda/postRestAPI"), "lambda"
)
def test_apigateway_event(self):
self.assertEqual(
parse_event_source(
{"awslogs": "logs"}, "Api-Gateway-Execution-Logs_a1b23c/test"
),
"apigateway",
)
self.assertEqual(
parse_event_source({"awslogs": "logs"}, "/aws/api-gateway/my-project"),
"apigateway",
)
self.assertEqual(
parse_event_source({"awslogs": "logs"}, "/aws/http-api/my-project"),
"apigateway",
)
def test_dms_event(self):
self.assertEqual(
parse_event_source({"awslogs": "logs"}, "dms-tasks-test-instance"), "dms"
)
self.assertEqual(
parse_event_source(
{"Records": ["logs-from-s3"]}, "AWSLogs/amazon_dms/my-s3.json.gz"
),
"dms",
)
def test_sns_event(self):
self.assertEqual(
parse_event_source(
{"awslogs": "logs"}, "sns/us-east-1/123456779121/SnsTopicX"
),
"sns",
)
def test_codebuild_event(self):
self.assertEqual(
parse_event_source(
{"awslogs": "logs"}, "/aws/codebuild/new-project-sample"
),
"codebuild",
)
def test_kinesis_event(self):
self.assertEqual(
parse_event_source({"awslogs": "logs"}, "/aws/kinesisfirehose/test"),
"kinesis",
)
self.assertEqual(
parse_event_source(
{"Records": ["logs-from-s3"]}, "AWSLogs/amazon_kinesis/my-s3.json.gz"
),
"kinesis",
)
def test_docdb_event(self):
self.assertEqual(
parse_event_source({"awslogs": "logs"}, "/aws/docdb/testCluster/profile"),
"docdb",
)
self.assertEqual(
parse_event_source(
{"Records": ["logs-from-s3"]}, "/amazon_documentdb/dev/123abc.zip"
),
"docdb",
)
def test_vpc_event(self):
self.assertEqual(
parse_event_source({"awslogs": "logs"}, "abc123_my_vpc_loggroup"), "vpc"
)
self.assertEqual(
parse_event_source(
{"Records": ["logs-from-s3"]},
"AWSLogs/123456779121/vpcflowlogs/us-east-1/2020/10/02/123456779121_vpcflowlogs_us-east-1_fl-xxxxx.log.gz",
),
"vpc",
)
def test_elb_event(self):
self.assertEqual(
parse_event_source(
{"Records": ["logs-from-s3"]},
"AWSLogs/123456779121/elasticloadbalancing/us-east-1/2020/10/02/123456779121_elasticloadbalancing_us-east-1_app.alb.xxxxx.xx.xxx.xxx_x.log.gz",
),
"elb",
)
def test_waf_event(self):
self.assertEqual(
parse_event_source(
{"Records": ["logs-from-s3"]},
"2020/10/02/21/aws-waf-logs-testing-1-2020-10-02-21-25-30-x123x-x456x",
),
"waf",
)
def test_redshift_event(self):
self.assertEqual(
parse_event_source(
{"Records": ["logs-from-s3"]},
"AWSLogs/123456779121/redshift/us-east-1/2020/10/21/123456779121_redshift_us-east-1_mycluster_userlog_2020-10-21T18:01.gz",
),
"redshift",
)
def test_route53_event(self):
self.assertEqual(
parse_event_source(
{"awslogs": "logs"},
"my-route53-loggroup123",
),
"route53",
)
def test_vpcdnsquerylogs_event(self):
self.assertEqual(
parse_event_source(
{"Records": ["logs-from-s3"]},
"AWSLogs/123456779121/vpcdnsquerylogs/vpc-********/2021/05/11/vpc-********_vpcdnsquerylogs_********_20210511T0910Z_71584702.log.gz",
),
"route53",
)
def test_fargate_event(self):
self.assertEqual(
parse_event_source(
{"awslogs": "logs"},
"/ecs/fargate-logs",
),
"fargate",
)
def test_cloudfront_event(self):
self.assertEqual(
parse_event_source(
{"Records": ["logs-from-s3"]},
"AWSLogs/cloudfront/123456779121/test/01.gz",
),
"cloudfront",
)
def test_eks_event(self):
self.assertEqual(
parse_event_source(
{"awslogs": "logs"},
"/aws/eks/control-plane/cluster",
),
"eks",
)
def test_elasticsearch_event(self):
self.assertEqual(
parse_event_source({"awslogs": "logs"}, "/elasticsearch/domain"),
"elasticsearch",
)
def test_msk_event(self):
self.assertEqual(
parse_event_source(
{"awslogs": "logs"},
"/myMSKLogGroup",
),
"msk",
)
self.assertEqual(
parse_event_source(
{"Records": ["logs-from-s3"]},
"AWSLogs/amazon_msk/us-east-1/xxxxx.log.gz",
),
"msk",
)
def test_carbon_black_event(self):
self.assertEqual(
parse_event_source(
{"Records": ["logs-from-s3"]},
"carbon-black-cloud-forwarder/alerts/8436e850-7e78-40e4-b3cd-6ebbc854d0a2.jsonl.gz",
),
"carbonblack",
)
def test_cloudwatch_source_if_none_found(self):
self.assertEqual(parse_event_source({"awslogs": "logs"}, ""), "cloudwatch")
def test_s3_source_if_none_found(self):
self.assertEqual(parse_event_source({"Records": ["logs-from-s3"]}, ""), "s3")
class TestParseSecurityHubEvents(unittest.TestCase):
def test_security_hub_no_findings(self):
event = {"ddsource": "securityhub"}
self.assertEqual(
separate_security_hub_findings(event),
None,
)
def test_security_hub_one_finding_no_resources(self):
event = {
"ddsource": "securityhub",
"detail": {"findings": [{"myattribute": "somevalue"}]},
}
self.assertEqual(
separate_security_hub_findings(event),
[
{
"ddsource": "securityhub",
"detail": {
"finding": {"myattribute": "somevalue", "resources": {}}
},
}
],
)
def test_security_hub_two_findings_one_resource_each(self):
event = {
"ddsource": "securityhub",
"detail": {
"findings": [
{
"myattribute": "somevalue",
"Resources": [
{"Region": "us-east-1", "Type": "AwsEc2SecurityGroup"}
],
},
{
"myattribute": "somevalue",
"Resources": [
{"Region": "us-east-1", "Type": "AwsEc2SecurityGroup"}
],
},
]
},
}
self.assertEqual(
separate_security_hub_findings(event),
[
{
"ddsource": "securityhub",
"detail": {
"finding": {
"myattribute": "somevalue",
"resources": {
"AwsEc2SecurityGroup": {"Region": "us-east-1"}
},
}
},
},
{
"ddsource": "securityhub",
"detail": {
"finding": {
"myattribute": "somevalue",
"resources": {
"AwsEc2SecurityGroup": {"Region": "us-east-1"}
},
}
},
},
],
)
def test_security_hub_multiple_findings_multiple_resources(self):
event = {
"ddsource": "securityhub",
"detail": {
"findings": [
{
"myattribute": "somevalue",
"Resources": [
{"Region": "us-east-1", "Type": "AwsEc2SecurityGroup"}
],
},
{
"myattribute": "somevalue",
"Resources": [
{"Region": "us-east-1", "Type": "AwsEc2SecurityGroup"},
{"Region": "us-east-1", "Type": "AwsOtherSecurityGroup"},
],
},
{
"myattribute": "somevalue",
"Resources": [
{"Region": "us-east-1", "Type": "AwsEc2SecurityGroup"},
{"Region": "us-east-1", "Type": "AwsOtherSecurityGroup"},
{"Region": "us-east-1", "Type": "AwsAnotherSecurityGroup"},
],
},
]
},
}
self.assertEqual(
separate_security_hub_findings(event),
[
{
"ddsource": "securityhub",
"detail": {
"finding": {
"myattribute": "somevalue",
"resources": {
"AwsEc2SecurityGroup": {"Region": "us-east-1"}
},
}
},
},
{
"ddsource": "securityhub",
"detail": {
"finding": {
"myattribute": "somevalue",
"resources": {
"AwsEc2SecurityGroup": {"Region": "us-east-1"},
"AwsOtherSecurityGroup": {"Region": "us-east-1"},
},
}
},
},
{
"ddsource": "securityhub",
"detail": {
"finding": {
"myattribute": "somevalue",
"resources": {
"AwsEc2SecurityGroup": {"Region": "us-east-1"},
"AwsOtherSecurityGroup": {"Region": "us-east-1"},
"AwsAnotherSecurityGroup": {"Region": "us-east-1"},
},
}
},
},
],
)
class TestAWSLogsHandler(unittest.TestCase):
def test_awslogs_handler_rds_postgresql(self):
event = {
"awslogs": {
"data": base64.b64encode(
gzip.compress(
bytes(
json.dumps(
{
"owner": "123456789012",
"logGroup": "/aws/rds/instance/datadog/postgresql",
"logStream": "datadog.0",
"logEvents": [
{
"id": "31953106606966983378809025079804211143289615424298221568",
"timestamp": 1609556645000,
"message": "2021-01-02 03:04:05 UTC::@:[5306]:LOG: database system is ready to accept connections",
}
],
}
),
"utf-8",
)
)
)
}
}
context = None
metadata = {"ddsource": "postgresql", "ddtags": "env:dev"}
self.assertEqual(
[
{
"aws": {
"awslogs": {
"logGroup": "/aws/rds/instance/datadog/postgresql",
"logStream": "datadog.0",
"owner": "123456789012",
}
},
"id": "31953106606966983378809025079804211143289615424298221568",
"message": "2021-01-02 03:04:05 UTC::@:[5306]:LOG: database system is ready "
"to accept connections",
"timestamp": 1609556645000,
}
],
list(awslogs_handler(event, context, metadata)),
)
self.assertEqual(
{
"ddsource": "postgresql",
"ddtags": "env:dev,logname:postgresql",
"host": "datadog",
"service": "postgresql",
},
metadata,
)
if __name__ == "__main__":
unittest.main() | aws/logs_monitoring/tests/test_parsing.py | import base64
import gzip
import json
from unittest.mock import MagicMock, patch
import os
import sys
import unittest
sys.modules["trace_forwarder.connection"] = MagicMock()
sys.modules["datadog_lambda.wrapper"] = MagicMock()
sys.modules["datadog_lambda.metric"] = MagicMock()
sys.modules["datadog"] = MagicMock()
sys.modules["requests"] = MagicMock()
sys.modules["requests_futures.sessions"] = MagicMock()
env_patch = patch.dict(os.environ, {"DD_API_KEY": "11111111111111111111111111111111"})
env_patch.start()
from parsing import awslogs_handler, parse_event_source, separate_security_hub_findings
env_patch.stop()
class TestParseEventSource(unittest.TestCase):
def test_aws_source_if_none_found(self):
self.assertEqual(parse_event_source({}, "asdfalsfhalskjdfhalsjdf"), "aws")
def test_cloudtrail_event(self):
self.assertEqual(
parse_event_source(
{"Records": ["logs-from-s3"]},
"cloud-trail/AWSLogs/123456779121/CloudTrail/us-west-3/2018/01/07/123456779121_CloudTrail_eu-west-3_20180707T1735Z_abcdefghi0MCRL2O.json.gz",
),
"cloudtrail",
)
def test_cloudtrail_event_with_service_substrings(self):
# Assert that source "cloudtrail" is parsed even though substrings "waf" and "sns" are present in the key
self.assertEqual(
parse_event_source(
{"Records": ["logs-from-s3"]},
"cloud-trail/AWSLogs/123456779121/CloudTrail/us-west-3/2018/01/07/123456779121_CloudTrail_eu-west-3_20180707T1735Z_xywafKsnsXMBrdsMCRL2O.json.gz",
),
"cloudtrail",
)
def test_rds_event(self):
self.assertEqual(
parse_event_source({"awslogs": "logs"}, "/aws/rds/my-rds-resource"), "rds"
)
def test_mariadb_event(self):
self.assertEqual(
parse_event_source({"awslogs": "logs"}, "/aws/rds/mariaDB-instance/error"),
"mariadb",
)
def test_mysql_event(self):
self.assertEqual(
parse_event_source({"awslogs": "logs"}, "/aws/rds/mySQL-instance/error"),
"mysql",
)
def test_postgresql_event(self):
self.assertEqual(
parse_event_source(
{"awslogs": "logs"}, "/aws/rds/instance/datadog/postgresql"
),
"postgresql",
)
def test_lambda_event(self):
self.assertEqual(
parse_event_source({"awslogs": "logs"}, "/aws/lambda/postRestAPI"), "lambda"
)
def test_apigateway_event(self):
self.assertEqual(
parse_event_source(
{"awslogs": "logs"}, "Api-Gateway-Execution-Logs_a1b23c/test"
),
"apigateway",
)
self.assertEqual(
parse_event_source({"awslogs": "logs"}, "/aws/api-gateway/my-project"),
"apigateway",
)
self.assertEqual(
parse_event_source({"awslogs": "logs"}, "/aws/http-api/my-project"),
"apigateway",
)
def test_dms_event(self):
self.assertEqual(
parse_event_source({"awslogs": "logs"}, "dms-tasks-test-instance"), "dms"
)
self.assertEqual(
parse_event_source(
{"Records": ["logs-from-s3"]}, "AWSLogs/amazon_dms/my-s3.json.gz"
),
"dms",
)
def test_sns_event(self):
self.assertEqual(
parse_event_source(
{"awslogs": "logs"}, "sns/us-east-1/123456779121/SnsTopicX"
),
"sns",
)
def test_codebuild_event(self):
self.assertEqual(
parse_event_source(
{"awslogs": "logs"}, "/aws/codebuild/new-project-sample"
),
"codebuild",
)
def test_kinesis_event(self):
self.assertEqual(
parse_event_source({"awslogs": "logs"}, "/aws/kinesisfirehose/test"),
"kinesis",
)
self.assertEqual(
parse_event_source(
{"Records": ["logs-from-s3"]}, "AWSLogs/amazon_kinesis/my-s3.json.gz"
),
"kinesis",
)
def test_docdb_event(self):
self.assertEqual(
parse_event_source({"awslogs": "logs"}, "/aws/docdb/testCluster/profile"),
"docdb",
)
self.assertEqual(
parse_event_source(
{"Records": ["logs-from-s3"]}, "/amazon_documentdb/dev/123abc.zip"
),
"docdb",
)
def test_vpc_event(self):
self.assertEqual(
parse_event_source({"awslogs": "logs"}, "abc123_my_vpc_loggroup"), "vpc"
)
self.assertEqual(
parse_event_source(
{"Records": ["logs-from-s3"]},
"AWSLogs/123456779121/vpcflowlogs/us-east-1/2020/10/02/123456779121_vpcflowlogs_us-east-1_fl-xxxxx.log.gz",
),
"vpc",
)
def test_elb_event(self):
self.assertEqual(
parse_event_source(
{"Records": ["logs-from-s3"]},
"AWSLogs/123456779121/elasticloadbalancing/us-east-1/2020/10/02/123456779121_elasticloadbalancing_us-east-1_app.alb.xxxxx.xx.xxx.xxx_x.log.gz",
),
"elb",
)
def test_waf_event(self):
self.assertEqual(
parse_event_source(
{"Records": ["logs-from-s3"]},
"2020/10/02/21/aws-waf-logs-testing-1-2020-10-02-21-25-30-x123x-x456x",
),
"waf",
)
def test_redshift_event(self):
self.assertEqual(
parse_event_source(
{"Records": ["logs-from-s3"]},
"AWSLogs/123456779121/redshift/us-east-1/2020/10/21/123456779121_redshift_us-east-1_mycluster_userlog_2020-10-21T18:01.gz",
),
"redshift",
)
def test_route53_event(self):
self.assertEqual(
parse_event_source(
{"awslogs": "logs"},
"my-route53-loggroup123",
),
"route53",
)
def test_vpcdnsquerylogs_event(self):
self.assertEqual(
parse_event_source(
{"Records": ["logs-from-s3"]},
"AWSLogs/123456779121/vpcdnsquerylogs/vpc-********/2021/05/11/vpc-********_vpcdnsquerylogs_********_20210511T0910Z_71584702.log.gz",
),
"route53",
)
def test_fargate_event(self):
self.assertEqual(
parse_event_source(
{"awslogs": "logs"},
"/ecs/fargate-logs",
),
"fargate",
)
def test_cloudfront_event(self):
self.assertEqual(
parse_event_source(
{"Records": ["logs-from-s3"]},
"AWSLogs/cloudfront/123456779121/test/01.gz",
),
"cloudfront",
)
def test_eks_event(self):
self.assertEqual(
parse_event_source(
{"awslogs": "logs"},
"/aws/eks/control-plane/cluster",
),
"eks",
)
def test_elasticsearch_event(self):
self.assertEqual(
parse_event_source({"awslogs": "logs"}, "/elasticsearch/domain"),
"elasticsearch",
)
def test_msk_event(self):
self.assertEqual(
parse_event_source(
{"awslogs": "logs"},
"/myMSKLogGroup",
),
"msk",
)
self.assertEqual(
parse_event_source(
{"Records": ["logs-from-s3"]},
"AWSLogs/amazon_msk/us-east-1/xxxxx.log.gz",
),
"msk",
)
def test_carbon_black_event(self):
self.assertEqual(
parse_event_source(
{"Records": ["logs-from-s3"]},
"carbon-black-cloud-forwarder/alerts/8436e850-7e78-40e4-b3cd-6ebbc854d0a2.jsonl.gz",
),
"carbonblack",
)
def test_cloudwatch_source_if_none_found(self):
self.assertEqual(parse_event_source({"awslogs": "logs"}, ""), "cloudwatch")
def test_s3_source_if_none_found(self):
self.assertEqual(parse_event_source({"Records": ["logs-from-s3"]}, ""), "s3")
class TestParseSecurityHubEvents(unittest.TestCase):
def test_security_hub_no_findings(self):
event = {"ddsource": "securityhub"}
self.assertEqual(
separate_security_hub_findings(event),
None,
)
def test_security_hub_one_finding_no_resources(self):
event = {
"ddsource": "securityhub",
"detail": {"findings": [{"myattribute": "somevalue"}]},
}
self.assertEqual(
separate_security_hub_findings(event),
[
{
"ddsource": "securityhub",
"detail": {
"finding": {"myattribute": "somevalue", "resources": {}}
},
}
],
)
def test_security_hub_two_findings_one_resource_each(self):
event = {
"ddsource": "securityhub",
"detail": {
"findings": [
{
"myattribute": "somevalue",
"Resources": [
{"Region": "us-east-1", "Type": "AwsEc2SecurityGroup"}
],
},
{
"myattribute": "somevalue",
"Resources": [
{"Region": "us-east-1", "Type": "AwsEc2SecurityGroup"}
],
},
]
},
}
self.assertEqual(
separate_security_hub_findings(event),
[
{
"ddsource": "securityhub",
"detail": {
"finding": {
"myattribute": "somevalue",
"resources": {
"AwsEc2SecurityGroup": {"Region": "us-east-1"}
},
}
},
},
{
"ddsource": "securityhub",
"detail": {
"finding": {
"myattribute": "somevalue",
"resources": {
"AwsEc2SecurityGroup": {"Region": "us-east-1"}
},
}
},
},
],
)
def test_security_hub_multiple_findings_multiple_resources(self):
event = {
"ddsource": "securityhub",
"detail": {
"findings": [
{
"myattribute": "somevalue",
"Resources": [
{"Region": "us-east-1", "Type": "AwsEc2SecurityGroup"}
],
},
{
"myattribute": "somevalue",
"Resources": [
{"Region": "us-east-1", "Type": "AwsEc2SecurityGroup"},
{"Region": "us-east-1", "Type": "AwsOtherSecurityGroup"},
],
},
{
"myattribute": "somevalue",
"Resources": [
{"Region": "us-east-1", "Type": "AwsEc2SecurityGroup"},
{"Region": "us-east-1", "Type": "AwsOtherSecurityGroup"},
{"Region": "us-east-1", "Type": "AwsAnotherSecurityGroup"},
],
},
]
},
}
self.assertEqual(
separate_security_hub_findings(event),
[
{
"ddsource": "securityhub",
"detail": {
"finding": {
"myattribute": "somevalue",
"resources": {
"AwsEc2SecurityGroup": {"Region": "us-east-1"}
},
}
},
},
{
"ddsource": "securityhub",
"detail": {
"finding": {
"myattribute": "somevalue",
"resources": {
"AwsEc2SecurityGroup": {"Region": "us-east-1"},
"AwsOtherSecurityGroup": {"Region": "us-east-1"},
},
}
},
},
{
"ddsource": "securityhub",
"detail": {
"finding": {
"myattribute": "somevalue",
"resources": {
"AwsEc2SecurityGroup": {"Region": "us-east-1"},
"AwsOtherSecurityGroup": {"Region": "us-east-1"},
"AwsAnotherSecurityGroup": {"Region": "us-east-1"},
},
}
},
},
],
)
class TestAWSLogsHandler(unittest.TestCase):
def test_awslogs_handler_rds_postgresql(self):
event = {
"awslogs": {
"data": base64.b64encode(
gzip.compress(
bytes(
json.dumps(
{
"owner": "123456789012",
"logGroup": "/aws/rds/instance/datadog/postgresql",
"logStream": "datadog.0",
"logEvents": [
{
"id": "31953106606966983378809025079804211143289615424298221568",
"timestamp": 1609556645000,
"message": "2021-01-02 03:04:05 UTC::@:[5306]:LOG: database system is ready to accept connections",
}
],
}
),
"utf-8",
)
)
)
}
}
context = None
metadata = {"ddsource": "postgresql", "ddtags": "env:dev"}
self.assertEqual(
[
{
"aws": {
"awslogs": {
"logGroup": "/aws/rds/instance/datadog/postgresql",
"logStream": "datadog.0",
"owner": "123456789012",
}
},
"id": "31953106606966983378809025079804211143289615424298221568",
"message": "2021-01-02 03:04:05 UTC::@:[5306]:LOG: database system is ready "
"to accept connections",
"timestamp": 1609556645000,
}
],
list(awslogs_handler(event, context, metadata)),
)
self.assertEqual(
{
"ddsource": "postgresql",
"ddtags": "env:dev,logname:postgresql",
"host": "datadog",
"service": "postgresql",
},
metadata,
)
if __name__ == "__main__":
unittest.main() | 0.412885 | 0.159283 |
from cumulusci.robotframework.pageobjects import DetailPage
from cumulusci.robotframework.pageobjects import ListingPage
from cumulusci.robotframework.pageobjects import pageobject
from BaseObjects import BaseNPSPPage
import time
from NPSP import npsp_lex_locators
@pageobject("Details", "Opportunity")
class OpportunityPage(BaseNPSPPage, DetailPage):
object_name = "Opportunity"
def _is_current_page(self):
""" Verify we are on the opportunity details page
by verifying that the url contains '/view'
"""
self.selenium.wait_until_location_contains("/lightning/r/Opportunity/",message="Current page is not a Opportunity detail view")
def ensure_opportunity_details_are_loaded(self,objectID, value):
""" Navigate to the page with objectid mentioned
Wait for the page to load and confirm atleast the opportunity name exists
"""
self.pageobjects.go_to_page("Details", "Opportunity", objectID)
self.npsp.navigate_to_and_validate_field_value("Opportunity Name", "contains", value)
def navigate_to_matching_gifts_page(self):
self.npsp.click_more_actions_button()
self.selenium.click_link('Find Matched Gifts')
self.npsp.choose_frame("vfFrameId")
def navigate_to_writeoff_payments_page(self):
self.npsp.click_related_list_dd_button('Payments', 'Show one more action', 'Write Off Payments')
self.npsp.wait_for_locator('frame','Write Off Remaining Balance')
self.npsp.choose_frame("Write Off Remaining Balance")
self.selenium.wait_until_page_contains("You are preparing to write off")
def change_related_contact_role_settings(self,name,role=None,**kwargs):
"""Loads the related contact from opportunity, waits for the modal and updates the role and primary settings"""
dropdown = npsp_lex_locators['related_drop_down'].format(name)
edit = npsp_lex_locators['record']['dd_edit_option'].format("Edit")
self.selenium.wait_until_page_contains_element(dropdown)
self.salesforce._jsclick(dropdown)
self.selenium.wait_until_element_is_visible(edit)
self.selenium.click_element(edit)
self.salesforce.wait_until_modal_is_open()
self.npsp.select_value_from_dropdown ("Role",role)
self.npsp.populate_modal_form(**kwargs)
self.salesforce.click_modal_button("Save")
@pageobject("Listing", "Opportunity")
class OpportunityListingPage(BaseNPSPPage, ListingPage):
object_name = "Opportunity"
def _is_current_page(self):
""" Verify we are on the opportunities listing page
by verifying that the url contains '/list'
"""
self.selenium.wait_until_location_contains("lightning/o/Opportunity/list",message="Current page is not a list page")
def perform_delete_menu_operation_on(self,value,action):
""" Identifies the value to delete from the List and chooses delete
option from the menu. Confirms the delete action from the confirmation modal
"""
locators = npsp_lex_locators['name']
list_ele = self.selenium.get_webelements(locators)
for index, element in enumerate(list_ele):
if element.text == value:
drop_down = npsp_lex_locators['opportunities_dropdown'].format(index + 1)
self.selenium.set_focus_to_element(drop_down)
self.selenium.wait_until_element_is_visible(drop_down)
self.selenium.wait_until_element_is_enabled(drop_down)
self.selenium.click_element(drop_down)
self.selenium.wait_until_page_contains(action)
self.selenium.click_link(action)
# Wait for the delete button from the modal and confirm the delete action
delete_btn=npsp_lex_locators["Delete_opportunity_modal_button"]
self.selenium.wait_until_element_is_visible(delete_btn)
self.selenium.click_button(delete_btn)
self.selenium.wait_until_location_contains("/list")
break | robot/Cumulus/resources/OpportunityPageObject.py | from cumulusci.robotframework.pageobjects import DetailPage
from cumulusci.robotframework.pageobjects import ListingPage
from cumulusci.robotframework.pageobjects import pageobject
from BaseObjects import BaseNPSPPage
import time
from NPSP import npsp_lex_locators
@pageobject("Details", "Opportunity")
class OpportunityPage(BaseNPSPPage, DetailPage):
object_name = "Opportunity"
def _is_current_page(self):
""" Verify we are on the opportunity details page
by verifying that the url contains '/view'
"""
self.selenium.wait_until_location_contains("/lightning/r/Opportunity/",message="Current page is not a Opportunity detail view")
def ensure_opportunity_details_are_loaded(self,objectID, value):
""" Navigate to the page with objectid mentioned
Wait for the page to load and confirm atleast the opportunity name exists
"""
self.pageobjects.go_to_page("Details", "Opportunity", objectID)
self.npsp.navigate_to_and_validate_field_value("Opportunity Name", "contains", value)
def navigate_to_matching_gifts_page(self):
self.npsp.click_more_actions_button()
self.selenium.click_link('Find Matched Gifts')
self.npsp.choose_frame("vfFrameId")
def navigate_to_writeoff_payments_page(self):
self.npsp.click_related_list_dd_button('Payments', 'Show one more action', 'Write Off Payments')
self.npsp.wait_for_locator('frame','Write Off Remaining Balance')
self.npsp.choose_frame("Write Off Remaining Balance")
self.selenium.wait_until_page_contains("You are preparing to write off")
def change_related_contact_role_settings(self,name,role=None,**kwargs):
"""Loads the related contact from opportunity, waits for the modal and updates the role and primary settings"""
dropdown = npsp_lex_locators['related_drop_down'].format(name)
edit = npsp_lex_locators['record']['dd_edit_option'].format("Edit")
self.selenium.wait_until_page_contains_element(dropdown)
self.salesforce._jsclick(dropdown)
self.selenium.wait_until_element_is_visible(edit)
self.selenium.click_element(edit)
self.salesforce.wait_until_modal_is_open()
self.npsp.select_value_from_dropdown ("Role",role)
self.npsp.populate_modal_form(**kwargs)
self.salesforce.click_modal_button("Save")
@pageobject("Listing", "Opportunity")
class OpportunityListingPage(BaseNPSPPage, ListingPage):
object_name = "Opportunity"
def _is_current_page(self):
""" Verify we are on the opportunities listing page
by verifying that the url contains '/list'
"""
self.selenium.wait_until_location_contains("lightning/o/Opportunity/list",message="Current page is not a list page")
def perform_delete_menu_operation_on(self,value,action):
""" Identifies the value to delete from the List and chooses delete
option from the menu. Confirms the delete action from the confirmation modal
"""
locators = npsp_lex_locators['name']
list_ele = self.selenium.get_webelements(locators)
for index, element in enumerate(list_ele):
if element.text == value:
drop_down = npsp_lex_locators['opportunities_dropdown'].format(index + 1)
self.selenium.set_focus_to_element(drop_down)
self.selenium.wait_until_element_is_visible(drop_down)
self.selenium.wait_until_element_is_enabled(drop_down)
self.selenium.click_element(drop_down)
self.selenium.wait_until_page_contains(action)
self.selenium.click_link(action)
# Wait for the delete button from the modal and confirm the delete action
delete_btn=npsp_lex_locators["Delete_opportunity_modal_button"]
self.selenium.wait_until_element_is_visible(delete_btn)
self.selenium.click_button(delete_btn)
self.selenium.wait_until_location_contains("/list")
break | 0.448306 | 0.129706 |
from nose.tools import assert_raises
from pyeda.boolalg import boolfunc
from pyeda.boolalg import exprnode
from pyeda.boolalg.bfarray import exprvars
from pyeda.boolalg.expr import (
Zero, One,
exprvar, expr,
#expr2dimacscnf, expr2dimacssat,
Expression,
Not, Or, And, Xor, Equal, Implies, ITE,
Nor, Nand, Xnor, Unequal,
OneHot0, OneHot, Majority, AchillesHeel, Mux,
)
# Common variables
a, b, c, d, e, p, q, s, w, x, y, z = map(exprvar, 'abcdepqswxyz')
d1, d0 = map(exprvar, ('d1', 'd0'))
xs = exprvars('x', 16)
ys = exprvars('y', 16, 16, 16)
def test_exprnode_constants():
"""Test exprnode constants"""
assert exprnode.ZERO == 0x0
assert exprnode.ONE == 0x1
assert exprnode.COMP == 0x4
assert exprnode.VAR == 0x5
assert exprnode.OP_OR == 0x8
assert exprnode.OP_AND == 0x9
assert exprnode.OP_XOR == 0xA
assert exprnode.OP_EQ == 0xB
assert exprnode.OP_NOT == 0xC
assert exprnode.OP_IMPL == 0xD
assert exprnode.OP_ITE == 0xE
def test_exprnode_errors():
"""Test exprnode errors."""
assert_raises(TypeError, exprnode.lit, "invalid input")
assert_raises(ValueError, exprnode.lit, 0)
assert_raises(TypeError, exprnode.not_, "invalid input")
assert_raises(TypeError, exprnode.or_, "invalid input", b.node)
assert_raises(TypeError, exprnode.or_, a.node, "invalid input")
assert_raises(TypeError, exprnode.and_, "invalid input", b.node)
assert_raises(TypeError, exprnode.and_, a.node, "invalid input")
assert_raises(TypeError, exprnode.xor, "invalid input", b.node)
assert_raises(TypeError, exprnode.xor, a.node, "invalid input")
assert_raises(TypeError, exprnode.eq, "invalid input", b.node)
assert_raises(TypeError, exprnode.eq, a.node, "invalid input")
assert_raises(TypeError, exprnode.impl, "invalid input", q.node)
assert_raises(TypeError, exprnode.impl, p.node, "invalid input")
assert_raises(TypeError, exprnode.ite, "invalid input", d1.node, d0.node)
assert_raises(TypeError, exprnode.ite, s.node, "invalid input", d0.node)
assert_raises(TypeError, exprnode.ite, s.node, d1.node, "invalid input")
def test_expr():
f = a & ~b | c ^ ~d
assert expr(Zero) is Zero
assert expr(a) is a
assert expr(f) is f
assert expr(False) is Zero
assert expr(True) is One
assert expr(0) is Zero
assert expr(1) is One
assert expr('0') is Zero
assert expr('1') is One
assert expr([]) is Zero
assert expr(['foo', 'bar']) is One
assert str(expr("a & ~b | c ^ ~d")) == "Or(And(a, ~b), Xor(c, ~d))"
assert str(expr("a & 0 | 1 ^ ~d", simplify=False)) == "Or(And(a, 0), Xor(1, ~d))"
def test_to_ast():
"""Test exprnode.to_ast()."""
f = (~a | b & ~c ^ d).eq(~(0 & p) >> (~q ^ 1))
assert f.to_ast() == \
('eq',
('or',
('lit', -a.uniqid),
('xor',
('and', ('lit', b.uniqid),
('lit', -c.uniqid)),
('lit', d.uniqid))),
('impl',
('not',
('and',
('lit', p.uniqid),
('const', 0))),
('xor',
('lit', -q.uniqid),
('const', 1))))
def test_not():
assert Not(0) is One
assert Not(1) is Zero
assert Not(~a) is a
assert Not(a) is ~a
assert Not(~a | a) is Zero
assert Not(~a & a) is One
assert str(Not(~a | b)) == "Not(Or(~a, b))"
assert str(Not(~a | b | 0, simplify=False)) == "Not(Or(Or(~a, b), 0))"
assert ~~a is a
assert ~~~a is ~a
assert ~~~~a is a
def test_or():
assert Or() is Zero
assert Or(a) is a
assert Or(0, 0) is Zero
assert Or(0, 1) is One
assert Or(1, 0) is One
assert Or(1, 1) is One
assert Or(0, 0, 0) is Zero
assert Or(0, 0, 1) is One
assert Or(0, 1, 0) is One
assert Or(0, 1, 1) is One
assert Or(1, 0, 0) is One
assert Or(1, 0, 1) is One
assert Or(1, 1, 0) is One
assert Or(1, 1, 1) is One
assert Or(a, 0) is a
assert Or(1, a) is One
assert Or(~a, a) is One
assert str(Or(a, 0, simplify=False)) == "Or(a, 0)"
assert str(Or(1, a, simplify=False)) == "Or(1, a)"
assert str(Or(~a, a, simplify=False)) == "Or(~a, a)"
def test_and():
assert And() is One
assert And(a) is a
assert And(0, 0) is Zero
assert And(0, 1) is Zero
assert And(1, 0) is Zero
assert And(1, 1) is One
assert And(0, 0, 0) is Zero
assert And(0, 0, 1) is Zero
assert And(0, 1, 0) is Zero
assert And(0, 1, 1) is Zero
assert And(1, 0, 0) is Zero
assert And(1, 0, 1) is Zero
assert And(1, 1, 0) is Zero
assert And(1, 1, 1) is One
assert And(a, 0) is Zero
assert And(1, a) is a
assert And(~a, a) is Zero
assert str(And(a, 0, simplify=False)) == "And(a, 0)"
assert str(And(1, a, simplify=False)) == "And(1, a)"
assert str(And(~a, a, simplify=False)) == "And(~a, a)"
def test_xor():
assert Xor() is Zero
assert Xor(a) is a
assert Xor(0, 0) is Zero
assert Xor(0, 1) is One
assert Xor(1, 0) is One
assert Xor(1, 1) is Zero
assert Xor(0, 0, 0) is Zero
assert Xor(0, 0, 1) is One
assert Xor(0, 1, 0) is One
assert Xor(0, 1, 1) is Zero
assert Xor(1, 0, 0) is One
assert Xor(1, 0, 1) is Zero
assert Xor(1, 1, 0) is Zero
assert Xor(1, 1, 1) is One
assert Xor(a, 0) is a
assert Xor(1, a) is ~a
assert Xor(~a, a) is One
assert str(Xor(a, 0, simplify=False)) == "Xor(a, 0)"
assert str(Xor(1, a, simplify=False)) == "Xor(1, a)"
assert str(Xor(~a, a, simplify=False)) == "Xor(~a, a)"
def test_equal():
assert Equal() is One
assert Equal(a) is One
assert Equal(0, 0) is One
assert Equal(0, 1) is Zero
assert Equal(1, 0) is Zero
assert Equal(1, 1) is One
assert Equal(0, 0, 0) is One
assert Equal(0, 0, 1) is Zero
assert Equal(0, 1, 0) is Zero
assert Equal(0, 1, 1) is Zero
assert Equal(1, 0, 0) is Zero
assert Equal(1, 0, 1) is Zero
assert Equal(1, 1, 0) is Zero
assert Equal(1, 1, 1) is One
assert Equal(a, 0) is ~a
assert Equal(1, a) is a
assert Equal(~a, a) is Zero
assert str(Equal(a, 0, simplify=False)) == "Equal(a, 0)"
assert str(Equal(1, a, simplify=False)) == "Equal(1, a)"
assert str(Equal(~a, a, simplify=False)) == "Equal(~a, a)"
def test_implies():
assert Implies(0, 0) is One
assert Implies(0, 1) is One
assert Implies(1, 0) is Zero
assert Implies(1, 1) is One
assert Implies(a, 0) is ~a
assert Implies(1, a) is a
assert Implies(~a, a) is a
assert str(Implies(a, 0, simplify=False)) == "Implies(a, 0)"
assert str(Implies(1, a, simplify=False)) == "Implies(1, a)"
assert str(Implies(~a, a, simplify=False)) == "Implies(~a, a)"
def test_ite():
assert ITE(0, 0, 0) is Zero
assert ITE(0, 0, 1) is One
assert ITE(0, 1, 0) is Zero
assert ITE(0, 1, 1) is One
assert ITE(1, 0, 0) is Zero
assert ITE(1, 0, 1) is Zero
assert ITE(1, 1, 0) is One
assert ITE(1, 1, 1) is One
def test_is_zero_one():
assert Zero.is_zero()
assert not One.is_zero()
assert not a.is_zero()
assert not (~a | b).is_zero()
assert One.is_one()
assert not Zero.is_one()
assert not a.is_one()
assert not (~a | b).is_one()
def test_box():
assert Expression.box(a) is a
assert Expression.box(0) is Zero
assert Expression.box(1) is One
assert Expression.box('0') is Zero
assert Expression.box('1') is One
assert Expression.box([]) is Zero
assert Expression.box(42) is One | pyeda/boolalg/test/test_exxpr.py | from nose.tools import assert_raises
from pyeda.boolalg import boolfunc
from pyeda.boolalg import exprnode
from pyeda.boolalg.bfarray import exprvars
from pyeda.boolalg.expr import (
Zero, One,
exprvar, expr,
#expr2dimacscnf, expr2dimacssat,
Expression,
Not, Or, And, Xor, Equal, Implies, ITE,
Nor, Nand, Xnor, Unequal,
OneHot0, OneHot, Majority, AchillesHeel, Mux,
)
# Common variables
a, b, c, d, e, p, q, s, w, x, y, z = map(exprvar, 'abcdepqswxyz')
d1, d0 = map(exprvar, ('d1', 'd0'))
xs = exprvars('x', 16)
ys = exprvars('y', 16, 16, 16)
def test_exprnode_constants():
"""Test exprnode constants"""
assert exprnode.ZERO == 0x0
assert exprnode.ONE == 0x1
assert exprnode.COMP == 0x4
assert exprnode.VAR == 0x5
assert exprnode.OP_OR == 0x8
assert exprnode.OP_AND == 0x9
assert exprnode.OP_XOR == 0xA
assert exprnode.OP_EQ == 0xB
assert exprnode.OP_NOT == 0xC
assert exprnode.OP_IMPL == 0xD
assert exprnode.OP_ITE == 0xE
def test_exprnode_errors():
"""Test exprnode errors."""
assert_raises(TypeError, exprnode.lit, "invalid input")
assert_raises(ValueError, exprnode.lit, 0)
assert_raises(TypeError, exprnode.not_, "invalid input")
assert_raises(TypeError, exprnode.or_, "invalid input", b.node)
assert_raises(TypeError, exprnode.or_, a.node, "invalid input")
assert_raises(TypeError, exprnode.and_, "invalid input", b.node)
assert_raises(TypeError, exprnode.and_, a.node, "invalid input")
assert_raises(TypeError, exprnode.xor, "invalid input", b.node)
assert_raises(TypeError, exprnode.xor, a.node, "invalid input")
assert_raises(TypeError, exprnode.eq, "invalid input", b.node)
assert_raises(TypeError, exprnode.eq, a.node, "invalid input")
assert_raises(TypeError, exprnode.impl, "invalid input", q.node)
assert_raises(TypeError, exprnode.impl, p.node, "invalid input")
assert_raises(TypeError, exprnode.ite, "invalid input", d1.node, d0.node)
assert_raises(TypeError, exprnode.ite, s.node, "invalid input", d0.node)
assert_raises(TypeError, exprnode.ite, s.node, d1.node, "invalid input")
def test_expr():
f = a & ~b | c ^ ~d
assert expr(Zero) is Zero
assert expr(a) is a
assert expr(f) is f
assert expr(False) is Zero
assert expr(True) is One
assert expr(0) is Zero
assert expr(1) is One
assert expr('0') is Zero
assert expr('1') is One
assert expr([]) is Zero
assert expr(['foo', 'bar']) is One
assert str(expr("a & ~b | c ^ ~d")) == "Or(And(a, ~b), Xor(c, ~d))"
assert str(expr("a & 0 | 1 ^ ~d", simplify=False)) == "Or(And(a, 0), Xor(1, ~d))"
def test_to_ast():
"""Test exprnode.to_ast()."""
f = (~a | b & ~c ^ d).eq(~(0 & p) >> (~q ^ 1))
assert f.to_ast() == \
('eq',
('or',
('lit', -a.uniqid),
('xor',
('and', ('lit', b.uniqid),
('lit', -c.uniqid)),
('lit', d.uniqid))),
('impl',
('not',
('and',
('lit', p.uniqid),
('const', 0))),
('xor',
('lit', -q.uniqid),
('const', 1))))
def test_not():
assert Not(0) is One
assert Not(1) is Zero
assert Not(~a) is a
assert Not(a) is ~a
assert Not(~a | a) is Zero
assert Not(~a & a) is One
assert str(Not(~a | b)) == "Not(Or(~a, b))"
assert str(Not(~a | b | 0, simplify=False)) == "Not(Or(Or(~a, b), 0))"
assert ~~a is a
assert ~~~a is ~a
assert ~~~~a is a
def test_or():
assert Or() is Zero
assert Or(a) is a
assert Or(0, 0) is Zero
assert Or(0, 1) is One
assert Or(1, 0) is One
assert Or(1, 1) is One
assert Or(0, 0, 0) is Zero
assert Or(0, 0, 1) is One
assert Or(0, 1, 0) is One
assert Or(0, 1, 1) is One
assert Or(1, 0, 0) is One
assert Or(1, 0, 1) is One
assert Or(1, 1, 0) is One
assert Or(1, 1, 1) is One
assert Or(a, 0) is a
assert Or(1, a) is One
assert Or(~a, a) is One
assert str(Or(a, 0, simplify=False)) == "Or(a, 0)"
assert str(Or(1, a, simplify=False)) == "Or(1, a)"
assert str(Or(~a, a, simplify=False)) == "Or(~a, a)"
def test_and():
assert And() is One
assert And(a) is a
assert And(0, 0) is Zero
assert And(0, 1) is Zero
assert And(1, 0) is Zero
assert And(1, 1) is One
assert And(0, 0, 0) is Zero
assert And(0, 0, 1) is Zero
assert And(0, 1, 0) is Zero
assert And(0, 1, 1) is Zero
assert And(1, 0, 0) is Zero
assert And(1, 0, 1) is Zero
assert And(1, 1, 0) is Zero
assert And(1, 1, 1) is One
assert And(a, 0) is Zero
assert And(1, a) is a
assert And(~a, a) is Zero
assert str(And(a, 0, simplify=False)) == "And(a, 0)"
assert str(And(1, a, simplify=False)) == "And(1, a)"
assert str(And(~a, a, simplify=False)) == "And(~a, a)"
def test_xor():
assert Xor() is Zero
assert Xor(a) is a
assert Xor(0, 0) is Zero
assert Xor(0, 1) is One
assert Xor(1, 0) is One
assert Xor(1, 1) is Zero
assert Xor(0, 0, 0) is Zero
assert Xor(0, 0, 1) is One
assert Xor(0, 1, 0) is One
assert Xor(0, 1, 1) is Zero
assert Xor(1, 0, 0) is One
assert Xor(1, 0, 1) is Zero
assert Xor(1, 1, 0) is Zero
assert Xor(1, 1, 1) is One
assert Xor(a, 0) is a
assert Xor(1, a) is ~a
assert Xor(~a, a) is One
assert str(Xor(a, 0, simplify=False)) == "Xor(a, 0)"
assert str(Xor(1, a, simplify=False)) == "Xor(1, a)"
assert str(Xor(~a, a, simplify=False)) == "Xor(~a, a)"
def test_equal():
assert Equal() is One
assert Equal(a) is One
assert Equal(0, 0) is One
assert Equal(0, 1) is Zero
assert Equal(1, 0) is Zero
assert Equal(1, 1) is One
assert Equal(0, 0, 0) is One
assert Equal(0, 0, 1) is Zero
assert Equal(0, 1, 0) is Zero
assert Equal(0, 1, 1) is Zero
assert Equal(1, 0, 0) is Zero
assert Equal(1, 0, 1) is Zero
assert Equal(1, 1, 0) is Zero
assert Equal(1, 1, 1) is One
assert Equal(a, 0) is ~a
assert Equal(1, a) is a
assert Equal(~a, a) is Zero
assert str(Equal(a, 0, simplify=False)) == "Equal(a, 0)"
assert str(Equal(1, a, simplify=False)) == "Equal(1, a)"
assert str(Equal(~a, a, simplify=False)) == "Equal(~a, a)"
def test_implies():
assert Implies(0, 0) is One
assert Implies(0, 1) is One
assert Implies(1, 0) is Zero
assert Implies(1, 1) is One
assert Implies(a, 0) is ~a
assert Implies(1, a) is a
assert Implies(~a, a) is a
assert str(Implies(a, 0, simplify=False)) == "Implies(a, 0)"
assert str(Implies(1, a, simplify=False)) == "Implies(1, a)"
assert str(Implies(~a, a, simplify=False)) == "Implies(~a, a)"
def test_ite():
assert ITE(0, 0, 0) is Zero
assert ITE(0, 0, 1) is One
assert ITE(0, 1, 0) is Zero
assert ITE(0, 1, 1) is One
assert ITE(1, 0, 0) is Zero
assert ITE(1, 0, 1) is Zero
assert ITE(1, 1, 0) is One
assert ITE(1, 1, 1) is One
def test_is_zero_one():
assert Zero.is_zero()
assert not One.is_zero()
assert not a.is_zero()
assert not (~a | b).is_zero()
assert One.is_one()
assert not Zero.is_one()
assert not a.is_one()
assert not (~a | b).is_one()
def test_box():
assert Expression.box(a) is a
assert Expression.box(0) is Zero
assert Expression.box(1) is One
assert Expression.box('0') is Zero
assert Expression.box('1') is One
assert Expression.box([]) is Zero
assert Expression.box(42) is One | 0.765681 | 0.664826 |
import roslib; roslib.load_manifest('vigir_behavior_praying_mantis_calibration')
from flexbe_core import Behavior, Autonomy, OperatableStateMachine, Logger
from vigir_flexbe_states.change_control_mode_action_state import ChangeControlModeActionState
from flexbe_states.calculation_state import CalculationState
from flexbe_states.wait_state import WaitState
from vigir_flexbe_states.execute_trajectory_both_arms_state import ExecuteTrajectoryBothArmsState
from vigir_flexbe_states.current_joint_positions_state import CurrentJointPositionsState
from flexbe_states.flexible_calculation_state import FlexibleCalculationState
from vigir_flexbe_states.moveit_starting_point_state import MoveitStartingPointState
from flexbe_states.decision_state import DecisionState
from flexbe_states.operator_decision_state import OperatorDecisionState
from vigir_flexbe_states.update_joint_calibration_state import UpdateJointCalibrationState
from flexbe_states.log_state import LogState
# Additional imports can be added inside the following tags
# [MANUAL_IMPORT]
import os
import time
import pprint
import rospy
from control_msgs.msg import *
from trajectory_msgs.msg import *
from flexbe_core.proxy import ProxyPublisher
from vigir_flexbe_behaviors.atlas_definitions import AtlasDefinitions
from vigir_flexbe_behaviors.atlas_functions import AtlasFunctions
# [/MANUAL_IMPORT]
'''
Created on Sat Feb 14 2015
@author: <NAME>
'''
class PrayingMantisCalibrationSM(Behavior):
'''
A behavior that moves ATLAS into the "praying mantis" pose upon startup in order to get consistent joint encoder offsets for calibration purposes.
'''
def __init__(self):
super(PrayingMantisCalibrationSM, self).__init__()
self.name = 'Praying Mantis Calibration'
# parameters of this behavior
# references to used behaviors
# Additional initialization code can be added inside the following tags
# [MANUAL_INIT]
self._offset_topic = "/flor/controller/encoder_offsets"
self._pub = ProxyPublisher({self._offset_topic: JointTrajectory})
self._joint_limits = AtlasDefinitions.arm_joint_limits
# Define 90 percent positions for both arms (order of joints same as in _joint_names attribute)
# atlas_v5
# - account for fall protection pads
# - ignore the lower 3 joints, ie, the electric motor ones
left_calib_upper = [-1.4252, -1.4649, +0.1588, +2.2767, +0.1, +0.1, +0.1]
left_calib_lower = [+0.5470, +1.2355, +2.9297, +0.1191, -0.1, +1.0, -0.1]
right_calib_upper = [+1.4914, +1.4296, +0.2118, -2.2899, +0.1, +0.1, +0.1]
right_calib_lower = [-0.5470, -1.2355, +2.9297, -0.1191, -0.1, -1.0, -0.1]
# # atlas_v5 (without shoulder pads)
# left_calib_upper = [+0.5470, +1.2355, +2.9297, +2.1576, +0.1, +0.1, +0.1]
# left_calib_lower = [-1.1869, -1.4296, +0.2118, +0.1191, -1.3, +1.0, -0.1]
# right_calib_upper = [-0.5470, -1.2355, +2.9297, -2.1576, +0.1, +0.1, +0.1]
# right_calib_lower = [+1.1869, +1.4296, +0.2118, -0.1191, -1.3, -1.0, -0.1]
self._joint_calib = {'left_arm': {'upper': left_calib_upper, 'lower': left_calib_lower},
'right_arm': {'upper': right_calib_upper, 'lower': right_calib_lower}
}
self._joint_names = AtlasDefinitions.arm_joint_names
# [/MANUAL_INIT]
# Behavior comments:
# O 47 211 /Perform_Checks/Manipulate_Limits
# Without this output_key, Check Behavior complains. Because traj_past_limits could in theory be undefined during runtime.
def create(self):
initial_mode = "stand"
motion_mode = "manipulate"
mantis_mode = "manipulate_limits"
percent_past_limits = 0.10 # before: 0.075
# x:788 y:72, x:474 y:133
_state_machine = OperatableStateMachine(outcomes=['finished', 'failed'])
_state_machine.userdata.target_limits = 'upper'
_state_machine.userdata.cycle_counter = 1
_state_machine.userdata.stand_posture = None # calculated
_state_machine.userdata.offsets = {'left_arm': dict(), 'right_arm': dict()}
# Additional creation code can be added inside the following tags
# [MANUAL_CREATE]
self._percent_past_limits = percent_past_limits
# Create STAND posture trajectory
_state_machine.userdata.stand_posture = AtlasFunctions.gen_stand_posture_trajectory()
# [/MANUAL_CREATE]
# x:222 y:281, x:349 y:167
_sm_determine_offsets_0 = OperatableStateMachine(outcomes=['finished', 'failed'], input_keys=['cycle_counter', 'offsets'], output_keys=['offsets'])
with _sm_determine_offsets_0:
# x:61 y:53
OperatableStateMachine.add('Get_Left_Joint_Positions',
CurrentJointPositionsState(planning_group="l_arm_group"),
transitions={'retrieved': 'Determine_Closest_Limits_Left', 'failed': 'failed'},
autonomy={'retrieved': Autonomy.Off, 'failed': Autonomy.Low},
remapping={'joint_positions': 'joint_positions'})
# x:319 y:54
OperatableStateMachine.add('Determine_Closest_Limits_Left',
CalculationState(calculation=self.get_closest_limits_left),
transitions={'done': 'Store_Offsets_Left'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'joint_positions', 'output_value': 'joint_limits'})
# x:598 y:162
OperatableStateMachine.add('Get_Right_Joint_Positions',
CurrentJointPositionsState(planning_group="r_arm_group"),
transitions={'retrieved': 'Determine_Closest_Limits_Right', 'failed': 'failed'},
autonomy={'retrieved': Autonomy.Off, 'failed': Autonomy.Low},
remapping={'joint_positions': 'joint_positions'})
# x:584 y:275
OperatableStateMachine.add('Determine_Closest_Limits_Right',
CalculationState(calculation=self.get_closest_limits_right),
transitions={'done': 'Store_Offsets_Right'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'joint_positions', 'output_value': 'joint_limits'})
# x:608 y:54
OperatableStateMachine.add('Store_Offsets_Left',
FlexibleCalculationState(calculation=self.store_offsets_left, input_keys=['limits', 'value', 'offsets', 'counter']),
transitions={'done': 'Get_Right_Joint_Positions'},
autonomy={'done': Autonomy.Off},
remapping={'limits': 'joint_limits', 'value': 'joint_positions', 'offsets': 'offsets', 'counter': 'cycle_counter', 'output_value': 'offsets'})
# x:340 y:274
OperatableStateMachine.add('Store_Offsets_Right',
FlexibleCalculationState(calculation=self.store_offsets_right, input_keys=['limits', 'value', 'offsets', 'counter']),
transitions={'done': 'finished'},
autonomy={'done': Autonomy.Off},
remapping={'limits': 'joint_limits', 'value': 'joint_positions', 'offsets': 'offsets', 'counter': 'cycle_counter', 'output_value': 'offsets'})
# x:528 y:401, x:707 y:282
_sm_manipulate_limits_1 = OperatableStateMachine(outcomes=['finished', 'failed'], input_keys=['cycle_counter', 'offsets'], output_keys=['offsets', 'traj_past_limits'])
with _sm_manipulate_limits_1:
# x:100 y:156
OperatableStateMachine.add('Prevent_Runtime_Failure',
CalculationState(calculation=lambda x: dict()),
transitions={'done': 'Go_to_MANIPULATE_LIMITS'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'cycle_counter', 'output_value': 'traj_past_limits'})
# x:387 y:55
OperatableStateMachine.add('Wait_for_Control_Mode_change',
WaitState(wait_time=1.0),
transitions={'done': 'Get_Left_Joint_Positions'},
autonomy={'done': Autonomy.Low})
# x:895 y:279
OperatableStateMachine.add('Gen_Traj_from_90%_to_110%',
CalculationState(calculation=self.gen_traj_past_limits),
transitions={'done': 'Go_to_110%_Joint_Limits'},
autonomy={'done': Autonomy.Low},
remapping={'input_value': 'current_joint_values', 'output_value': 'traj_past_limits'})
# x:893 y:391
OperatableStateMachine.add('Go_to_110%_Joint_Limits',
ExecuteTrajectoryBothArmsState(controllers=['left_arm_traj_controller', 'right_arm_traj_controller']),
transitions={'done': 'Determine_Offsets', 'failed': 'Determine_Offsets'},
autonomy={'done': Autonomy.Off, 'failed': Autonomy.High},
remapping={'trajectories': 'traj_past_limits'})
# x:651 y:385
OperatableStateMachine.add('Determine_Offsets',
_sm_determine_offsets_0,
transitions={'finished': 'finished', 'failed': 'failed'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit},
remapping={'cycle_counter': 'cycle_counter', 'offsets': 'offsets'})
# x:648 y:54
OperatableStateMachine.add('Get_Left_Joint_Positions',
CurrentJointPositionsState(planning_group="l_arm_group"),
transitions={'retrieved': 'Get_Right_Joint_Positions', 'failed': 'failed'},
autonomy={'retrieved': Autonomy.Off, 'failed': Autonomy.High},
remapping={'joint_positions': 'joint_positions_left'})
# x:904 y:53
OperatableStateMachine.add('Get_Right_Joint_Positions',
CurrentJointPositionsState(planning_group="r_arm_group"),
transitions={'retrieved': 'Generate_Joint_Positions_Struct', 'failed': 'failed'},
autonomy={'retrieved': Autonomy.Off, 'failed': Autonomy.High},
remapping={'joint_positions': 'joint_positions_right'})
# x:886 y:168
OperatableStateMachine.add('Generate_Joint_Positions_Struct',
FlexibleCalculationState(calculation=lambda ik: {'left_arm': ik[0], 'right_arm': ik[1]}, input_keys=['left', 'right']),
transitions={'done': 'Gen_Traj_from_90%_to_110%'},
autonomy={'done': Autonomy.Off},
remapping={'left': 'joint_positions_left', 'right': 'joint_positions_right', 'output_value': 'current_joint_values'})
# x:92 y:55
OperatableStateMachine.add('Go_to_MANIPULATE_LIMITS',
ChangeControlModeActionState(target_mode=mantis_mode),
transitions={'changed': 'Wait_for_Control_Mode_change', 'failed': 'failed'},
autonomy={'changed': Autonomy.Off, 'failed': Autonomy.High})
# x:574 y:247, x:276 y:549
_sm_update_calibration_2 = OperatableStateMachine(outcomes=['finished', 'failed'], input_keys=['offsets'])
with _sm_update_calibration_2:
# x:46 y:44
OperatableStateMachine.add('Process_Offsets',
CalculationState(calculation=self.process_offsets),
transitions={'done': 'Print_Offset_Info'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'offsets', 'output_value': 'offsets'})
# x:227 y:45
OperatableStateMachine.add('Print_Offset_Info',
CalculationState(calculation=self.print_offset_info),
transitions={'done': 'Publish_Offsets'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'offsets', 'output_value': 'none'})
# x:390 y:158
OperatableStateMachine.add('Ask_Perform_Update',
OperatorDecisionState(outcomes=['update', 'no_update'], hint="Do you want to apply the calculated offsets for calibration?", suggestion=None),
transitions={'update': 'Convert_Offset_Data', 'no_update': 'finished'},
autonomy={'update': Autonomy.Full, 'no_update': Autonomy.Full})
# x:232 y:337
OperatableStateMachine.add('Update_Calibration',
UpdateJointCalibrationState(joint_names=self._joint_names['left_arm'][0:4] + self._joint_names['right_arm'][0:4]),
transitions={'updated': 'Calibration_Successful', 'failed': 'Calibration_Failed'},
autonomy={'updated': Autonomy.Low, 'failed': Autonomy.High},
remapping={'joint_offsets': 'offset_list'})
# x:241 y:242
OperatableStateMachine.add('Convert_Offset_Data',
CalculationState(calculation=lambda o: o['left_arm']['avg'] + o['right_arm']['avg']),
transitions={'done': 'Update_Calibration'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'offsets', 'output_value': 'offset_list'})
# x:522 y:337
OperatableStateMachine.add('Calibration_Successful',
LogState(text="Successfully updated calibration offsets.", severity=Logger.REPORT_INFO),
transitions={'done': 'finished'},
autonomy={'done': Autonomy.Off})
# x:246 y:445
OperatableStateMachine.add('Calibration_Failed',
LogState(text="Failed to apply calibration offsets!", severity=Logger.REPORT_ERROR),
transitions={'done': 'failed'},
autonomy={'done': Autonomy.Off})
# x:399 y:44
OperatableStateMachine.add('Publish_Offsets',
CalculationState(calculation=self.publish_offsets),
transitions={'done': 'Ask_Perform_Update'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'offsets', 'output_value': 'none'})
# x:978 y:197, x:394 y:80
_sm_perform_checks_3 = OperatableStateMachine(outcomes=['finished', 'failed'], input_keys=['cycle_counter', 'target_limits', 'offsets'], output_keys=['cycle_counter', 'offsets'])
with _sm_perform_checks_3:
# x:105 y:74
OperatableStateMachine.add('Go_to_Intermediate_Mode',
ChangeControlModeActionState(target_mode=motion_mode),
transitions={'changed': 'Gen_Traj_to_90%_Limits', 'failed': 'failed'},
autonomy={'changed': Autonomy.Off, 'failed': Autonomy.High})
# x:653 y:274
OperatableStateMachine.add('Manipulate_Limits',
_sm_manipulate_limits_1,
transitions={'finished': 'Gen_Traj_back_to_90%_Limits', 'failed': 'failed'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit},
remapping={'cycle_counter': 'cycle_counter', 'offsets': 'offsets', 'traj_past_limits': 'traj_past_limits'})
# x:903 y:78
OperatableStateMachine.add('Increment_Cycle_counter',
CalculationState(calculation=lambda counter: counter + 1),
transitions={'done': 'finished'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'cycle_counter', 'output_value': 'cycle_counter'})
# x:344 y:277
OperatableStateMachine.add('Move_to_90%_Joint_Limits',
MoveitStartingPointState(vel_scaling=0.3),
transitions={'reached': 'Manipulate_Limits', 'failed': 'Move_to_90%_Joint_Limits'},
autonomy={'reached': Autonomy.Low, 'failed': Autonomy.Full},
remapping={'trajectories': 'trajectories_90'})
# x:114 y:276
OperatableStateMachine.add('Gen_Traj_to_90%_Limits',
CalculationState(calculation=self.gen_traj_pre_limits),
transitions={'done': 'Move_to_90%_Joint_Limits'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'target_limits', 'output_value': 'trajectories_90'})
# x:636 y:78
OperatableStateMachine.add('Go_back_to_90%_Joint_Limits',
ExecuteTrajectoryBothArmsState(controllers=['left_arm_traj_controller', 'right_arm_traj_controller']),
transitions={'done': 'Increment_Cycle_counter', 'failed': 'failed'},
autonomy={'done': Autonomy.Off, 'failed': Autonomy.High},
remapping={'trajectories': 'traj_back_to_90'})
# x:636 y:172
OperatableStateMachine.add('Gen_Traj_back_to_90%_Limits',
FlexibleCalculationState(calculation=self.gen_traj_back_from_limits, input_keys=['trajectories_90', 'traj_past_limits']),
transitions={'done': 'Go_back_to_90%_Joint_Limits'},
autonomy={'done': Autonomy.Off},
remapping={'trajectories_90': 'trajectories_90', 'traj_past_limits': 'traj_past_limits', 'output_value': 'traj_back_to_90'})
with _state_machine:
# x:110 y:52
OperatableStateMachine.add('Initial_Control_Mode',
ChangeControlModeActionState(target_mode=initial_mode),
transitions={'changed': 'Perform_Checks', 'failed': 'failed'},
autonomy={'changed': Autonomy.High, 'failed': Autonomy.High})
# x:712 y:317
OperatableStateMachine.add('Initial_Mode_before_exit',
ChangeControlModeActionState(target_mode=initial_mode),
transitions={'changed': 'Update_Calibration', 'failed': 'failed'},
autonomy={'changed': Autonomy.Off, 'failed': Autonomy.High})
# x:122 y:302
OperatableStateMachine.add('Perform_Checks',
_sm_perform_checks_3,
transitions={'finished': 'Are_We_Done_Yet?', 'failed': 'Intermediate_Mode_before_exit'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit},
remapping={'cycle_counter': 'cycle_counter', 'target_limits': 'target_limits', 'offsets': 'offsets'})
# x:126 y:505
OperatableStateMachine.add('Are_We_Done_Yet?',
DecisionState(outcomes=["done", "more"], conditions=lambda counter: "done" if counter >= 2 else "more"),
transitions={'done': 'Intermediate_Mode_before_exit', 'more': 'Setup_next_Cycle'},
autonomy={'done': Autonomy.Low, 'more': Autonomy.High},
remapping={'input_value': 'cycle_counter'})
# x:15 y:404
OperatableStateMachine.add('Setup_next_Cycle',
CalculationState(calculation=lambda lim: 'lower' if lim == 'upper' else 'upper'),
transitions={'done': 'Perform_Checks'},
autonomy={'done': Autonomy.Low},
remapping={'input_value': 'target_limits', 'output_value': 'target_limits'})
# x:725 y:186
OperatableStateMachine.add('Update_Calibration',
_sm_update_calibration_2,
transitions={'finished': 'finished', 'failed': 'failed'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit},
remapping={'offsets': 'offsets'})
# x:726 y:427
OperatableStateMachine.add('Move_to_Stand_Posture',
MoveitStartingPointState(vel_scaling=0.3),
transitions={'reached': 'Initial_Mode_before_exit', 'failed': 'Move_to_Stand_Posture'},
autonomy={'reached': Autonomy.Off, 'failed': Autonomy.Full},
remapping={'trajectories': 'stand_posture'})
# x:412 y:427
OperatableStateMachine.add('Intermediate_Mode_before_exit',
ChangeControlModeActionState(target_mode=motion_mode),
transitions={'changed': 'Move_to_Stand_Posture', 'failed': 'failed'},
autonomy={'changed': Autonomy.Off, 'failed': Autonomy.High})
return _state_machine
# Private functions can be added inside the following tags
# [MANUAL_FUNC]
def gen_traj_pre_limits(self, limits_side):
"""Create trajectories for going to 90 percent of joint limits (either upper or lower limits)"""
joint_config = {'left_arm': self._joint_calib['left_arm'][limits_side],
'right_arm': self._joint_calib['right_arm'][limits_side]
}
return AtlasFunctions.gen_arm_trajectory_from_joint_configuration(joint_config)
def _get_closest_limits(self, side, current_values):
"""
Selects the closest limit with respect to the current value (upper or lower bound).
"""
limits = self._joint_limits[side]
closest_limit = list()
for i in range(len(current_values)):
near_limit = 'upper' if abs(limits['upper'][i] - current_values[i]) < abs(limits['lower'][i] - current_values[i]) else 'lower'
closest_limit.append(limits[near_limit][i])
rospy.loginfo("Limit joint positions: %s" % str(closest_limit))
rospy.loginfo("Current joint positions: %s" % str(current_values))
return closest_limit
def get_closest_limits_left(self, current_values):
return self._get_closest_limits('left_arm', current_values)
def get_closest_limits_right(self, current_values):
return self._get_closest_limits('right_arm', current_values)
def gen_traj_past_limits(self, current_joint_values):
"""
Given all joint limits, generate a trajectory that takes the joints to 110%% percent past limits.
atlas_v5 update: Do not push the lower 3 joints (electric ones) path the limits.
"""
result = dict()
for arm in ['left_arm', 'right_arm']:
current_values = current_joint_values[arm]
arm_limits = self._get_closest_limits(arm, current_values)
arm_target = list()
arm_effort = list()
percentage = self._percent_past_limits
# Push the upper 4 joints against the limits
for i in range(0,4):
near_limit = 'upper' if self._joint_limits[arm]['upper'][i] == arm_limits[i] else 'lower'
limit_range = self._joint_limits[arm]['upper'][i] - self._joint_limits[arm]['lower'][i]
offset_sign = 1 if near_limit is 'upper' else -1
arm_target.append(arm_limits[i] + offset_sign * percentage * limit_range)
arm_effort.append(float(offset_sign))
# "Ignore" the lower 3 joints (electric motor ones)
for i in range(4,7):
arm_target.append(current_values[i])
arm_effort.append(0.0) # Zero effort stands for not applying additional force
trajectory = JointTrajectory()
trajectory.joint_names = self._joint_names[arm]
point = JointTrajectoryPoint()
point.positions = arm_target
point.velocities = [0.0] * len(arm_target) # David's controller expects zero velocities
point.effort = arm_effort
point.time_from_start = rospy.Duration.from_sec(2.5)
trajectory.points.append(point)
# rospy.loginfo("110%% joint positions for %s arm: %s" % (arm, str(arm_target[0:4]))) # Only report the relevant joints
result[arm] = trajectory
return result
def gen_traj_back_from_limits(self, input_keys):
"""The resulting trajectory points are the same as for going to 90%% of limits, but with the efforts set for David's controllers."""
traj_pre_limits = input_keys[0]
traj_past_limits = input_keys[1]
traj_back_to_90 = dict()
for arm in ['left_arm', 'right_arm']:
trajectory = traj_pre_limits[arm] # Start with 90% of joint limits as the trajectory points
trajectory.points[0].effort = traj_past_limits[arm].points[0].effort # Set efforts as per David's controllers
trajectory.points[0].time_from_start = rospy.Duration.from_sec(1.0)
# David's controller expects zero velocities
trajectory.points[0].velocities = [0.0] * len(trajectory.points[0].positions)
traj_back_to_90[arm] = trajectory
return traj_back_to_90
def store_offsets(self, side, input_keys):
limits = input_keys[0][0:4] # Ignore the lower 3 joints
values = input_keys[1][0:4] # --//-- --//--
offsets = input_keys[2]
counter = input_keys[3]
offsets[side][counter] = [limit - value for limit, value in zip(limits, values)]
msg = JointTrajectory()
msg.joint_names = self._joint_names[side][0:4] # Ignore the lower 3 joints
point = JointTrajectoryPoint()
point.positions = values
point.velocities = offsets[side][counter]
msg.points.append(point)
self._pub.publish(self._offset_topic, msg)
Logger.loginfo("Publishing %s arm offsets to %s" % (side, self._offset_topic))
return offsets
def publish_offsets(self, offsets, arms = ['left_arm', 'right_arm'], current_values = []):
for side in arms:
msg = JointTrajectory()
msg.joint_names = self._joint_names[side]
point = JointTrajectoryPoint()
point.positions = current_values
point.velocities = offsets[side]['avg']
msg.points.append(point)
self._pub.publish(self._offset_topic, msg)
Logger.loginfo("Publishing %s arm offsets to %s" % (side, self._offset_topic))
def store_offsets_left(self, input_keys):
return self.store_offsets('left_arm', input_keys)
def store_offsets_right(self, input_keys):
return self.store_offsets('right_arm', input_keys)
def process_offsets(self, offsets):
for side in ['left_arm', 'right_arm']:
# transposes list of lists from iteration,joint to joint,iteration
iteration_values = map(list, zip(*offsets[side].values()))
# Calculate the average offset and the deviation from the average
offsets[side]['avg'] = [sum(joint_entries)/float(len(joint_entries)) for joint_entries in iteration_values]
offsets[side]['diff'] = [max(map(lambda x: abs(x-avg),joint_entries)) for joint_entries,avg in zip(iteration_values, offsets[side]['avg'])]
return offsets
def print_offset_info(self, offsets):
sides = ['left_arm', 'right_arm']
for side in sides:
Logger.loginfo("Joint order (%s): %s" % (side, str(self._joint_names[side][0:4])))
rounded_offsets = [round(offset, 3) for offset in offsets[side]['avg']] # round due to comms_bridge
Logger.loginfo("Offsets (%s): %s" % (side, str(rounded_offsets)))
# Logger.loginfo("Max deviation from average (%s): %s" % (side, str(offsets[side]['diff'])))
pprint.pprint(offsets) # Pretty print to the "onboard" terminal
# [/MANUAL_FUNC] | behaviors/vigir_behavior_praying_mantis_calibration/src/vigir_behavior_praying_mantis_calibration/praying_mantis_calibration_sm.py |
import roslib; roslib.load_manifest('vigir_behavior_praying_mantis_calibration')
from flexbe_core import Behavior, Autonomy, OperatableStateMachine, Logger
from vigir_flexbe_states.change_control_mode_action_state import ChangeControlModeActionState
from flexbe_states.calculation_state import CalculationState
from flexbe_states.wait_state import WaitState
from vigir_flexbe_states.execute_trajectory_both_arms_state import ExecuteTrajectoryBothArmsState
from vigir_flexbe_states.current_joint_positions_state import CurrentJointPositionsState
from flexbe_states.flexible_calculation_state import FlexibleCalculationState
from vigir_flexbe_states.moveit_starting_point_state import MoveitStartingPointState
from flexbe_states.decision_state import DecisionState
from flexbe_states.operator_decision_state import OperatorDecisionState
from vigir_flexbe_states.update_joint_calibration_state import UpdateJointCalibrationState
from flexbe_states.log_state import LogState
# Additional imports can be added inside the following tags
# [MANUAL_IMPORT]
import os
import time
import pprint
import rospy
from control_msgs.msg import *
from trajectory_msgs.msg import *
from flexbe_core.proxy import ProxyPublisher
from vigir_flexbe_behaviors.atlas_definitions import AtlasDefinitions
from vigir_flexbe_behaviors.atlas_functions import AtlasFunctions
# [/MANUAL_IMPORT]
'''
Created on Sat Feb 14 2015
@author: <NAME>
'''
class PrayingMantisCalibrationSM(Behavior):
'''
A behavior that moves ATLAS into the "praying mantis" pose upon startup in order to get consistent joint encoder offsets for calibration purposes.
'''
def __init__(self):
super(PrayingMantisCalibrationSM, self).__init__()
self.name = 'Praying Mantis Calibration'
# parameters of this behavior
# references to used behaviors
# Additional initialization code can be added inside the following tags
# [MANUAL_INIT]
self._offset_topic = "/flor/controller/encoder_offsets"
self._pub = ProxyPublisher({self._offset_topic: JointTrajectory})
self._joint_limits = AtlasDefinitions.arm_joint_limits
# Define 90 percent positions for both arms (order of joints same as in _joint_names attribute)
# atlas_v5
# - account for fall protection pads
# - ignore the lower 3 joints, ie, the electric motor ones
left_calib_upper = [-1.4252, -1.4649, +0.1588, +2.2767, +0.1, +0.1, +0.1]
left_calib_lower = [+0.5470, +1.2355, +2.9297, +0.1191, -0.1, +1.0, -0.1]
right_calib_upper = [+1.4914, +1.4296, +0.2118, -2.2899, +0.1, +0.1, +0.1]
right_calib_lower = [-0.5470, -1.2355, +2.9297, -0.1191, -0.1, -1.0, -0.1]
# # atlas_v5 (without shoulder pads)
# left_calib_upper = [+0.5470, +1.2355, +2.9297, +2.1576, +0.1, +0.1, +0.1]
# left_calib_lower = [-1.1869, -1.4296, +0.2118, +0.1191, -1.3, +1.0, -0.1]
# right_calib_upper = [-0.5470, -1.2355, +2.9297, -2.1576, +0.1, +0.1, +0.1]
# right_calib_lower = [+1.1869, +1.4296, +0.2118, -0.1191, -1.3, -1.0, -0.1]
self._joint_calib = {'left_arm': {'upper': left_calib_upper, 'lower': left_calib_lower},
'right_arm': {'upper': right_calib_upper, 'lower': right_calib_lower}
}
self._joint_names = AtlasDefinitions.arm_joint_names
# [/MANUAL_INIT]
# Behavior comments:
# O 47 211 /Perform_Checks/Manipulate_Limits
# Without this output_key, Check Behavior complains. Because traj_past_limits could in theory be undefined during runtime.
def create(self):
initial_mode = "stand"
motion_mode = "manipulate"
mantis_mode = "manipulate_limits"
percent_past_limits = 0.10 # before: 0.075
# x:788 y:72, x:474 y:133
_state_machine = OperatableStateMachine(outcomes=['finished', 'failed'])
_state_machine.userdata.target_limits = 'upper'
_state_machine.userdata.cycle_counter = 1
_state_machine.userdata.stand_posture = None # calculated
_state_machine.userdata.offsets = {'left_arm': dict(), 'right_arm': dict()}
# Additional creation code can be added inside the following tags
# [MANUAL_CREATE]
self._percent_past_limits = percent_past_limits
# Create STAND posture trajectory
_state_machine.userdata.stand_posture = AtlasFunctions.gen_stand_posture_trajectory()
# [/MANUAL_CREATE]
# x:222 y:281, x:349 y:167
_sm_determine_offsets_0 = OperatableStateMachine(outcomes=['finished', 'failed'], input_keys=['cycle_counter', 'offsets'], output_keys=['offsets'])
with _sm_determine_offsets_0:
# x:61 y:53
OperatableStateMachine.add('Get_Left_Joint_Positions',
CurrentJointPositionsState(planning_group="l_arm_group"),
transitions={'retrieved': 'Determine_Closest_Limits_Left', 'failed': 'failed'},
autonomy={'retrieved': Autonomy.Off, 'failed': Autonomy.Low},
remapping={'joint_positions': 'joint_positions'})
# x:319 y:54
OperatableStateMachine.add('Determine_Closest_Limits_Left',
CalculationState(calculation=self.get_closest_limits_left),
transitions={'done': 'Store_Offsets_Left'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'joint_positions', 'output_value': 'joint_limits'})
# x:598 y:162
OperatableStateMachine.add('Get_Right_Joint_Positions',
CurrentJointPositionsState(planning_group="r_arm_group"),
transitions={'retrieved': 'Determine_Closest_Limits_Right', 'failed': 'failed'},
autonomy={'retrieved': Autonomy.Off, 'failed': Autonomy.Low},
remapping={'joint_positions': 'joint_positions'})
# x:584 y:275
OperatableStateMachine.add('Determine_Closest_Limits_Right',
CalculationState(calculation=self.get_closest_limits_right),
transitions={'done': 'Store_Offsets_Right'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'joint_positions', 'output_value': 'joint_limits'})
# x:608 y:54
OperatableStateMachine.add('Store_Offsets_Left',
FlexibleCalculationState(calculation=self.store_offsets_left, input_keys=['limits', 'value', 'offsets', 'counter']),
transitions={'done': 'Get_Right_Joint_Positions'},
autonomy={'done': Autonomy.Off},
remapping={'limits': 'joint_limits', 'value': 'joint_positions', 'offsets': 'offsets', 'counter': 'cycle_counter', 'output_value': 'offsets'})
# x:340 y:274
OperatableStateMachine.add('Store_Offsets_Right',
FlexibleCalculationState(calculation=self.store_offsets_right, input_keys=['limits', 'value', 'offsets', 'counter']),
transitions={'done': 'finished'},
autonomy={'done': Autonomy.Off},
remapping={'limits': 'joint_limits', 'value': 'joint_positions', 'offsets': 'offsets', 'counter': 'cycle_counter', 'output_value': 'offsets'})
# x:528 y:401, x:707 y:282
_sm_manipulate_limits_1 = OperatableStateMachine(outcomes=['finished', 'failed'], input_keys=['cycle_counter', 'offsets'], output_keys=['offsets', 'traj_past_limits'])
with _sm_manipulate_limits_1:
# x:100 y:156
OperatableStateMachine.add('Prevent_Runtime_Failure',
CalculationState(calculation=lambda x: dict()),
transitions={'done': 'Go_to_MANIPULATE_LIMITS'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'cycle_counter', 'output_value': 'traj_past_limits'})
# x:387 y:55
OperatableStateMachine.add('Wait_for_Control_Mode_change',
WaitState(wait_time=1.0),
transitions={'done': 'Get_Left_Joint_Positions'},
autonomy={'done': Autonomy.Low})
# x:895 y:279
OperatableStateMachine.add('Gen_Traj_from_90%_to_110%',
CalculationState(calculation=self.gen_traj_past_limits),
transitions={'done': 'Go_to_110%_Joint_Limits'},
autonomy={'done': Autonomy.Low},
remapping={'input_value': 'current_joint_values', 'output_value': 'traj_past_limits'})
# x:893 y:391
OperatableStateMachine.add('Go_to_110%_Joint_Limits',
ExecuteTrajectoryBothArmsState(controllers=['left_arm_traj_controller', 'right_arm_traj_controller']),
transitions={'done': 'Determine_Offsets', 'failed': 'Determine_Offsets'},
autonomy={'done': Autonomy.Off, 'failed': Autonomy.High},
remapping={'trajectories': 'traj_past_limits'})
# x:651 y:385
OperatableStateMachine.add('Determine_Offsets',
_sm_determine_offsets_0,
transitions={'finished': 'finished', 'failed': 'failed'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit},
remapping={'cycle_counter': 'cycle_counter', 'offsets': 'offsets'})
# x:648 y:54
OperatableStateMachine.add('Get_Left_Joint_Positions',
CurrentJointPositionsState(planning_group="l_arm_group"),
transitions={'retrieved': 'Get_Right_Joint_Positions', 'failed': 'failed'},
autonomy={'retrieved': Autonomy.Off, 'failed': Autonomy.High},
remapping={'joint_positions': 'joint_positions_left'})
# x:904 y:53
OperatableStateMachine.add('Get_Right_Joint_Positions',
CurrentJointPositionsState(planning_group="r_arm_group"),
transitions={'retrieved': 'Generate_Joint_Positions_Struct', 'failed': 'failed'},
autonomy={'retrieved': Autonomy.Off, 'failed': Autonomy.High},
remapping={'joint_positions': 'joint_positions_right'})
# x:886 y:168
OperatableStateMachine.add('Generate_Joint_Positions_Struct',
FlexibleCalculationState(calculation=lambda ik: {'left_arm': ik[0], 'right_arm': ik[1]}, input_keys=['left', 'right']),
transitions={'done': 'Gen_Traj_from_90%_to_110%'},
autonomy={'done': Autonomy.Off},
remapping={'left': 'joint_positions_left', 'right': 'joint_positions_right', 'output_value': 'current_joint_values'})
# x:92 y:55
OperatableStateMachine.add('Go_to_MANIPULATE_LIMITS',
ChangeControlModeActionState(target_mode=mantis_mode),
transitions={'changed': 'Wait_for_Control_Mode_change', 'failed': 'failed'},
autonomy={'changed': Autonomy.Off, 'failed': Autonomy.High})
# x:574 y:247, x:276 y:549
_sm_update_calibration_2 = OperatableStateMachine(outcomes=['finished', 'failed'], input_keys=['offsets'])
with _sm_update_calibration_2:
# x:46 y:44
OperatableStateMachine.add('Process_Offsets',
CalculationState(calculation=self.process_offsets),
transitions={'done': 'Print_Offset_Info'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'offsets', 'output_value': 'offsets'})
# x:227 y:45
OperatableStateMachine.add('Print_Offset_Info',
CalculationState(calculation=self.print_offset_info),
transitions={'done': 'Publish_Offsets'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'offsets', 'output_value': 'none'})
# x:390 y:158
OperatableStateMachine.add('Ask_Perform_Update',
OperatorDecisionState(outcomes=['update', 'no_update'], hint="Do you want to apply the calculated offsets for calibration?", suggestion=None),
transitions={'update': 'Convert_Offset_Data', 'no_update': 'finished'},
autonomy={'update': Autonomy.Full, 'no_update': Autonomy.Full})
# x:232 y:337
OperatableStateMachine.add('Update_Calibration',
UpdateJointCalibrationState(joint_names=self._joint_names['left_arm'][0:4] + self._joint_names['right_arm'][0:4]),
transitions={'updated': 'Calibration_Successful', 'failed': 'Calibration_Failed'},
autonomy={'updated': Autonomy.Low, 'failed': Autonomy.High},
remapping={'joint_offsets': 'offset_list'})
# x:241 y:242
OperatableStateMachine.add('Convert_Offset_Data',
CalculationState(calculation=lambda o: o['left_arm']['avg'] + o['right_arm']['avg']),
transitions={'done': 'Update_Calibration'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'offsets', 'output_value': 'offset_list'})
# x:522 y:337
OperatableStateMachine.add('Calibration_Successful',
LogState(text="Successfully updated calibration offsets.", severity=Logger.REPORT_INFO),
transitions={'done': 'finished'},
autonomy={'done': Autonomy.Off})
# x:246 y:445
OperatableStateMachine.add('Calibration_Failed',
LogState(text="Failed to apply calibration offsets!", severity=Logger.REPORT_ERROR),
transitions={'done': 'failed'},
autonomy={'done': Autonomy.Off})
# x:399 y:44
OperatableStateMachine.add('Publish_Offsets',
CalculationState(calculation=self.publish_offsets),
transitions={'done': 'Ask_Perform_Update'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'offsets', 'output_value': 'none'})
# x:978 y:197, x:394 y:80
_sm_perform_checks_3 = OperatableStateMachine(outcomes=['finished', 'failed'], input_keys=['cycle_counter', 'target_limits', 'offsets'], output_keys=['cycle_counter', 'offsets'])
with _sm_perform_checks_3:
# x:105 y:74
OperatableStateMachine.add('Go_to_Intermediate_Mode',
ChangeControlModeActionState(target_mode=motion_mode),
transitions={'changed': 'Gen_Traj_to_90%_Limits', 'failed': 'failed'},
autonomy={'changed': Autonomy.Off, 'failed': Autonomy.High})
# x:653 y:274
OperatableStateMachine.add('Manipulate_Limits',
_sm_manipulate_limits_1,
transitions={'finished': 'Gen_Traj_back_to_90%_Limits', 'failed': 'failed'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit},
remapping={'cycle_counter': 'cycle_counter', 'offsets': 'offsets', 'traj_past_limits': 'traj_past_limits'})
# x:903 y:78
OperatableStateMachine.add('Increment_Cycle_counter',
CalculationState(calculation=lambda counter: counter + 1),
transitions={'done': 'finished'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'cycle_counter', 'output_value': 'cycle_counter'})
# x:344 y:277
OperatableStateMachine.add('Move_to_90%_Joint_Limits',
MoveitStartingPointState(vel_scaling=0.3),
transitions={'reached': 'Manipulate_Limits', 'failed': 'Move_to_90%_Joint_Limits'},
autonomy={'reached': Autonomy.Low, 'failed': Autonomy.Full},
remapping={'trajectories': 'trajectories_90'})
# x:114 y:276
OperatableStateMachine.add('Gen_Traj_to_90%_Limits',
CalculationState(calculation=self.gen_traj_pre_limits),
transitions={'done': 'Move_to_90%_Joint_Limits'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'target_limits', 'output_value': 'trajectories_90'})
# x:636 y:78
OperatableStateMachine.add('Go_back_to_90%_Joint_Limits',
ExecuteTrajectoryBothArmsState(controllers=['left_arm_traj_controller', 'right_arm_traj_controller']),
transitions={'done': 'Increment_Cycle_counter', 'failed': 'failed'},
autonomy={'done': Autonomy.Off, 'failed': Autonomy.High},
remapping={'trajectories': 'traj_back_to_90'})
# x:636 y:172
OperatableStateMachine.add('Gen_Traj_back_to_90%_Limits',
FlexibleCalculationState(calculation=self.gen_traj_back_from_limits, input_keys=['trajectories_90', 'traj_past_limits']),
transitions={'done': 'Go_back_to_90%_Joint_Limits'},
autonomy={'done': Autonomy.Off},
remapping={'trajectories_90': 'trajectories_90', 'traj_past_limits': 'traj_past_limits', 'output_value': 'traj_back_to_90'})
with _state_machine:
# x:110 y:52
OperatableStateMachine.add('Initial_Control_Mode',
ChangeControlModeActionState(target_mode=initial_mode),
transitions={'changed': 'Perform_Checks', 'failed': 'failed'},
autonomy={'changed': Autonomy.High, 'failed': Autonomy.High})
# x:712 y:317
OperatableStateMachine.add('Initial_Mode_before_exit',
ChangeControlModeActionState(target_mode=initial_mode),
transitions={'changed': 'Update_Calibration', 'failed': 'failed'},
autonomy={'changed': Autonomy.Off, 'failed': Autonomy.High})
# x:122 y:302
OperatableStateMachine.add('Perform_Checks',
_sm_perform_checks_3,
transitions={'finished': 'Are_We_Done_Yet?', 'failed': 'Intermediate_Mode_before_exit'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit},
remapping={'cycle_counter': 'cycle_counter', 'target_limits': 'target_limits', 'offsets': 'offsets'})
# x:126 y:505
OperatableStateMachine.add('Are_We_Done_Yet?',
DecisionState(outcomes=["done", "more"], conditions=lambda counter: "done" if counter >= 2 else "more"),
transitions={'done': 'Intermediate_Mode_before_exit', 'more': 'Setup_next_Cycle'},
autonomy={'done': Autonomy.Low, 'more': Autonomy.High},
remapping={'input_value': 'cycle_counter'})
# x:15 y:404
OperatableStateMachine.add('Setup_next_Cycle',
CalculationState(calculation=lambda lim: 'lower' if lim == 'upper' else 'upper'),
transitions={'done': 'Perform_Checks'},
autonomy={'done': Autonomy.Low},
remapping={'input_value': 'target_limits', 'output_value': 'target_limits'})
# x:725 y:186
OperatableStateMachine.add('Update_Calibration',
_sm_update_calibration_2,
transitions={'finished': 'finished', 'failed': 'failed'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit},
remapping={'offsets': 'offsets'})
# x:726 y:427
OperatableStateMachine.add('Move_to_Stand_Posture',
MoveitStartingPointState(vel_scaling=0.3),
transitions={'reached': 'Initial_Mode_before_exit', 'failed': 'Move_to_Stand_Posture'},
autonomy={'reached': Autonomy.Off, 'failed': Autonomy.Full},
remapping={'trajectories': 'stand_posture'})
# x:412 y:427
OperatableStateMachine.add('Intermediate_Mode_before_exit',
ChangeControlModeActionState(target_mode=motion_mode),
transitions={'changed': 'Move_to_Stand_Posture', 'failed': 'failed'},
autonomy={'changed': Autonomy.Off, 'failed': Autonomy.High})
return _state_machine
# Private functions can be added inside the following tags
# [MANUAL_FUNC]
def gen_traj_pre_limits(self, limits_side):
"""Create trajectories for going to 90 percent of joint limits (either upper or lower limits)"""
joint_config = {'left_arm': self._joint_calib['left_arm'][limits_side],
'right_arm': self._joint_calib['right_arm'][limits_side]
}
return AtlasFunctions.gen_arm_trajectory_from_joint_configuration(joint_config)
def _get_closest_limits(self, side, current_values):
"""
Selects the closest limit with respect to the current value (upper or lower bound).
"""
limits = self._joint_limits[side]
closest_limit = list()
for i in range(len(current_values)):
near_limit = 'upper' if abs(limits['upper'][i] - current_values[i]) < abs(limits['lower'][i] - current_values[i]) else 'lower'
closest_limit.append(limits[near_limit][i])
rospy.loginfo("Limit joint positions: %s" % str(closest_limit))
rospy.loginfo("Current joint positions: %s" % str(current_values))
return closest_limit
def get_closest_limits_left(self, current_values):
return self._get_closest_limits('left_arm', current_values)
def get_closest_limits_right(self, current_values):
return self._get_closest_limits('right_arm', current_values)
def gen_traj_past_limits(self, current_joint_values):
"""
Given all joint limits, generate a trajectory that takes the joints to 110%% percent past limits.
atlas_v5 update: Do not push the lower 3 joints (electric ones) path the limits.
"""
result = dict()
for arm in ['left_arm', 'right_arm']:
current_values = current_joint_values[arm]
arm_limits = self._get_closest_limits(arm, current_values)
arm_target = list()
arm_effort = list()
percentage = self._percent_past_limits
# Push the upper 4 joints against the limits
for i in range(0,4):
near_limit = 'upper' if self._joint_limits[arm]['upper'][i] == arm_limits[i] else 'lower'
limit_range = self._joint_limits[arm]['upper'][i] - self._joint_limits[arm]['lower'][i]
offset_sign = 1 if near_limit is 'upper' else -1
arm_target.append(arm_limits[i] + offset_sign * percentage * limit_range)
arm_effort.append(float(offset_sign))
# "Ignore" the lower 3 joints (electric motor ones)
for i in range(4,7):
arm_target.append(current_values[i])
arm_effort.append(0.0) # Zero effort stands for not applying additional force
trajectory = JointTrajectory()
trajectory.joint_names = self._joint_names[arm]
point = JointTrajectoryPoint()
point.positions = arm_target
point.velocities = [0.0] * len(arm_target) # David's controller expects zero velocities
point.effort = arm_effort
point.time_from_start = rospy.Duration.from_sec(2.5)
trajectory.points.append(point)
# rospy.loginfo("110%% joint positions for %s arm: %s" % (arm, str(arm_target[0:4]))) # Only report the relevant joints
result[arm] = trajectory
return result
def gen_traj_back_from_limits(self, input_keys):
"""The resulting trajectory points are the same as for going to 90%% of limits, but with the efforts set for David's controllers."""
traj_pre_limits = input_keys[0]
traj_past_limits = input_keys[1]
traj_back_to_90 = dict()
for arm in ['left_arm', 'right_arm']:
trajectory = traj_pre_limits[arm] # Start with 90% of joint limits as the trajectory points
trajectory.points[0].effort = traj_past_limits[arm].points[0].effort # Set efforts as per David's controllers
trajectory.points[0].time_from_start = rospy.Duration.from_sec(1.0)
# David's controller expects zero velocities
trajectory.points[0].velocities = [0.0] * len(trajectory.points[0].positions)
traj_back_to_90[arm] = trajectory
return traj_back_to_90
def store_offsets(self, side, input_keys):
limits = input_keys[0][0:4] # Ignore the lower 3 joints
values = input_keys[1][0:4] # --//-- --//--
offsets = input_keys[2]
counter = input_keys[3]
offsets[side][counter] = [limit - value for limit, value in zip(limits, values)]
msg = JointTrajectory()
msg.joint_names = self._joint_names[side][0:4] # Ignore the lower 3 joints
point = JointTrajectoryPoint()
point.positions = values
point.velocities = offsets[side][counter]
msg.points.append(point)
self._pub.publish(self._offset_topic, msg)
Logger.loginfo("Publishing %s arm offsets to %s" % (side, self._offset_topic))
return offsets
def publish_offsets(self, offsets, arms = ['left_arm', 'right_arm'], current_values = []):
for side in arms:
msg = JointTrajectory()
msg.joint_names = self._joint_names[side]
point = JointTrajectoryPoint()
point.positions = current_values
point.velocities = offsets[side]['avg']
msg.points.append(point)
self._pub.publish(self._offset_topic, msg)
Logger.loginfo("Publishing %s arm offsets to %s" % (side, self._offset_topic))
def store_offsets_left(self, input_keys):
return self.store_offsets('left_arm', input_keys)
def store_offsets_right(self, input_keys):
return self.store_offsets('right_arm', input_keys)
def process_offsets(self, offsets):
for side in ['left_arm', 'right_arm']:
# transposes list of lists from iteration,joint to joint,iteration
iteration_values = map(list, zip(*offsets[side].values()))
# Calculate the average offset and the deviation from the average
offsets[side]['avg'] = [sum(joint_entries)/float(len(joint_entries)) for joint_entries in iteration_values]
offsets[side]['diff'] = [max(map(lambda x: abs(x-avg),joint_entries)) for joint_entries,avg in zip(iteration_values, offsets[side]['avg'])]
return offsets
def print_offset_info(self, offsets):
sides = ['left_arm', 'right_arm']
for side in sides:
Logger.loginfo("Joint order (%s): %s" % (side, str(self._joint_names[side][0:4])))
rounded_offsets = [round(offset, 3) for offset in offsets[side]['avg']] # round due to comms_bridge
Logger.loginfo("Offsets (%s): %s" % (side, str(rounded_offsets)))
# Logger.loginfo("Max deviation from average (%s): %s" % (side, str(offsets[side]['diff'])))
pprint.pprint(offsets) # Pretty print to the "onboard" terminal
# [/MANUAL_FUNC] | 0.496582 | 0.209268 |
import numpy as np
import anndata as ad
import pandas as pd
from scipy.special import softmax
def generate_normal_uncorrelated(N, D, K, n_total, noise_std_true=1):
"""
Scenario 1: Normally distributed, independent covariates
Parameters
----------
N -- int
Number of samples
D -- int
Number of covariates
K -- int
Number of cell types
n_total -- list
Number of individual cells per sample
noise_std_true -- float
noise level. 0: No noise
Returns
-------
data
Anndata object
"""
# Generate random composition parameters
b_true = np.random.normal(0, 1, size=K).astype(np.float32) # bias (alpha)
w_true = np.random.normal(0, 1, size=(D, K)).astype(np.float32) # weights (beta)
# Generate random covariate matrix
x = np.random.normal(0, 1, size=(N, D)).astype(np.float32)
noise = noise_std_true * np.random.randn(N, 1).astype(np.float32)
# Generate y
y = np.zeros([N, K], dtype=np.float32)
for i in range(N):
# Concentration should sum to 1 for each sample
concentration = softmax(x[i, :].T@w_true + b_true + noise[i, :]).astype(np.float32)
y[i, :] = np.random.multinomial(n_total[i], concentration).astype(np.float32)
x_names = ["x_" + str(n) for n in range(x.shape[1])]
x_df = pd.DataFrame(x, columns=x_names)
data = ad.AnnData(X=y, obs=x_df, uns={"b_true": b_true, "w_true": w_true})
return data
def generate_normal_correlated(N, D, K, n_total, noise_std_true, covariate_mean=None, covariate_var=None):
"""
Scenario 2: Correlated covariates
Parameters
----------
N -- int
Number of samples
D -- int
Number of covariates
K -- int
Number of cell types
n_total -- list
Number of individual cells per sample
noise_std_true -- float
noise level. 0: No noise
covariate_mean -- numpy array [D]
Mean of each covariate
covariate_var -- numpy array [DxD]
Covariance matrix for covariates
Returns
-------
data
Anndata object
"""
if covariate_mean is None:
covariate_mean = np.zeros(shape=D)
# Generate randomized covariate covariance matrix if none is specified
if covariate_var is None:
# Covariates drawn from MvNormal(0, Cov), Cov_ij = p ^|i-j| , p=0.4
# Tibshirani for correlated covariates: Tibshirani (1996)
p = 0.4
covariate_var = np.zeros((D, D))
for i in range(D):
for j in range(D):
covariate_var[i, j] = p**np.abs(i-j)
# Generate random composition parameters
b_true = np.random.normal(0, 1, size=K).astype(np.float32) # bias (alpha)
w_true = np.random.normal(0, 1, size=(D, K)).astype(np.float32) # weights (beta)
# Generate random covariate matrix
x = np.random.multivariate_normal(size=N, mean=covariate_mean, cov=covariate_var).astype(np.float32)
noise = noise_std_true * np.random.randn(N, 1).astype(np.float32)
# Generate y
y = np.zeros([N, K], dtype=np.float32)
for i in range(N):
# Concentration should sum to 1 for each sample
concentration = softmax(x[i, :].T @ w_true + b_true + noise[i, :]).astype(np.float32)
y[i, :] = np.random.multinomial(n_total[i], concentration).astype(np.float32)
x_names = ["x_" + str(n) for n in range(x.shape[1])]
x_df = pd.DataFrame(x, columns=x_names)
data = ad.AnnData(X=y, obs=x_df, uns={"b_true": b_true, "w_true": w_true})
return data
def generate_normal_xy_correlated(N, D, K, n_total, noise_std_true=1,
covariate_mean=None, covariate_var=None, sigma=None):
"""
Scenario 3: Correlated cell types and covariates
Parameters
----------
N -- int
Number of samples
D -- int
Number of covariates
K -- int
Number of cell types
n_total -- list
Number of individual cells per sample
noise_std_true -- float
noise level. 0: No noise
covariate_mean -- numpy array [D]
Mean of each covariate
covariate_var -- numpy array [DxD]
Covariance matrix for all covaraiates
sigma -- numpy array [KxK]
correlation matrix for cell types
Returns
-------
data
Anndata object
"""
if covariate_mean is None:
covariate_mean = np.zeros(shape=D)
if sigma is None:
sigma = np.identity(K)
# Generate randomized covariate covariance matrix if none is specified
if covariate_var is None:
# Covaraits drawn from MvNormal(0, Cov) Cov_ij = p ^|i-j| , p=0.4
# Tibshirani for correlated covariates: Tibshirani (1996)
p = 0.4
covariate_var = np.zeros((D, D))
for i in range(D):
for j in range(D):
covariate_var[i, j] = p**np.abs(i-j)
# Generate random composition parameters
b_true = np.random.normal(0, 1, size=K).astype(np.float32) # bias (alpha)
w_true = np.random.normal(0, 1, size=(D, K)).astype(np.float32) # weights (beta)
# Generate random covariate matrix
x = np.random.multivariate_normal(size=N, mean=covariate_mean, cov=covariate_var).astype(np.float32)
noise = noise_std_true * np.random.randn(N, 1).astype(np.float32)
# Generate y
y = np.zeros([N, K], dtype=np.float32)
for i in range(N):
# Each row of y is now influenced by sigma
alpha = np.random.multivariate_normal(mean=x[i, :].T@w_true + b_true, cov=sigma*noise[i, :]).astype(np.float32)
concentration = softmax(alpha).astype(np.float32)
y[i, :] = np.random.multinomial(n_total[i], concentration).astype(np.float32)
x_names = ["x_" + str(n) for n in range(x.shape[1])]
x_df = pd.DataFrame(x, columns=x_names)
data = ad.AnnData(X=y, obs=x_df, uns={"b_true": b_true, "w_true": w_true})
return data
def sparse_effect_matrix(D, K, n_d, n_k):
"""
Generates a sparse effect matrix
Parameters
----------
D -- int
Number of covariates
K -- int
Number of cell types
n_d -- int
Number of covariates that effect a cell type
n_k -- int
Number of cell types that are affected by any covariate
Returns
-------
w_true
Effect matrix
"""
# Choose indices of affected cell types and covariates randomly
d_eff = np.random.choice(range(D), size=n_d, replace=False)
k_eff = np.random.choice(range(K), size=n_k, replace=False)
# Possible entries of w_true
w_choice = [0.3, 0.5, 1]
w_true = np.zeros((D, K))
# Fill in w_true
for i in d_eff:
for j in k_eff:
c = np.random.choice(3, 1)
w_true[i, j] = w_choice[c]
return w_true
def generate_sparse_xy_correlated(N, D, K, n_total, noise_std_true=1,
covariate_mean=None, covariate_var=None,
sigma=None,
b_true=None, w_true=None):
"""
Scenario 4: Sparse true parameters
Parameters
----------
N -- int
Number of samples
D -- int
Number of covariates
K -- int
Number of cell types
n_total -- list
Number of individual cells per sample
noise_std_true -- float
noise level. 0: No noise
covariate_mean -- numpy array [D]
Mean of each covariate
covariate_var -- numpy array [DxD]
Covariance matrix for all covaraiates
sigma -- numpy array [KxK]
correlation matrix for cell types
b_true -- numpy array [K]
bias coefficients
w_true -- numpy array [DxK]
Effect matrix
Returns
-------
data
Anndata object
"""
if covariate_mean is None:
covariate_mean = np.zeros(shape=D)
if sigma is None:
sigma = np.identity(K)
# Generate randomized covariate covariance matrix if none is specified
if covariate_var is None:
# Covaraits drawn from MvNormal(0, Cov) Cov_ij = p ^|i-j| , p=0.4
# Tibshirani for correlated covariates: Tibshirani (1996)
p = 0.4
covariate_var = np.zeros((D, D))
for i in range(D):
for j in range(D):
covariate_var[i, j] = p ** np.abs(i - j)
# Uniform intercepts if none are specifed
if b_true is None:
b_true = np.random.uniform(-3,3, size=K).astype(np.float32) # bias (alpha)
# Randomly select covariates that should correlate if none are specified
if w_true is None:
n_d = np.random.choice(range(D), size=1)
n_k = np.random.choice(range(K), size=1)
w_true = sparse_effect_matrix(D, K, n_d, n_k)
# Generate random covariate matrix
x = np.random.multivariate_normal(size=N, mean=covariate_mean, cov=covariate_var).astype(np.float32)
noise = noise_std_true * np.random.randn(N, 1).astype(np.float32)
# Generate y
y = np.zeros([N, K], dtype=np.float32)
for i in range(N):
# Each row of y is now influenced by sigma
alpha = np.random.multivariate_normal(mean=x[i, :].T @ w_true + b_true, cov=sigma * noise[i, :]).astype(
np.float32)
concentration = softmax(alpha).astype(np.float32)
y[i, :] = np.random.multinomial(n_total[i], concentration).astype(np.float32)
x_names = ["x_" + str(n) for n in range(x.shape[1])]
x_df = pd.DataFrame(x, columns=x_names)
data = ad.AnnData(X=y, obs=x_df, uns={"b_true": b_true, "w_true": w_true})
return data
def generate_case_control(cases=1, K=5, n_total=1000, n_samples=[5,5], noise_std_true=0,
sigma=None, b_true=None, w_true=None):
"""
Generates compositional data with binary covariates
Parameters
----------
cases -- int
number of covariates
K -- int
Number of cell types
n_total -- int
number of cells per sample
n_samples -- list
Number of samples per case combination as array[2**cases]
noise_std_true -- float
noise level. 0: No noise - Not in use atm!!!
sigma -- numpy array [KxK]
correlation matrix for cell types
b_true -- numpy array [K]
bias coefficients
w_true -- numpy array [DxK]
Effect matrix
Returns
-------
Anndata object
"""
D = cases**2
# Uniform intercepts if none are specifed
if b_true is None:
b_true = np.random.uniform(-3, 3, size=K).astype(np.float32) # bias (alpha)
# Randomly select covariates that should correlate if none are specified
if w_true is None:
n_d = np.random.choice(range(D), size=1)
n_k = np.random.choice(range(K), size=1)
w_true = sparse_effect_matrix(D, K, n_d, n_k)
# Sigma is identity if not specified else
if sigma is None:
sigma = np.identity(K) * 0.05
# noise = noise_std_true * np.random.randn(N, 1).astype(np.float32)
# Initialize x, y
x = np.zeros((sum(n_samples), cases))
y = np.zeros((sum(n_samples), K))
c = 0
# Binary representation of x as list of fixed length
def binary(x, length):
return [int(x_n) for x_n in bin(x)[2:].zfill(length)]
# For all combinations of cases
for i in range(2**cases):
# For each sample with this combination
for j in range(n_samples[i]):
# row of x is binary representation
x[c+j] = binary(i, cases)
# Generate y
alpha = np.random.multivariate_normal(mean=x[c+j, :].T @ w_true + b_true, cov=sigma).astype(
np.float32)
concentration = softmax(alpha).astype(np.float32)
z = np.random.multinomial(n_total, concentration)
y[c+j] = z
c = c+n_samples[i]
x = x.astype(np.float32)
y = y.astype(np.float32)
x_names = ["x_" + str(n) for n in range(x.shape[1])]
x_df = pd.DataFrame(x, columns=x_names)
data = ad.AnnData(X=y, obs=x_df, uns={"b_true": b_true, "w_true": w_true})
return data
def b_w_from_abs_change(counts_before=np.array([200, 200, 200, 200, 200]), abs_change=50, n_total=1000):
"""
Calculates intercepts and slopes from a starting count and an absolute change for the first cell type
Parameters
----------
counts_before -- numpy array
cell counts for control samples
abs_change -- int
change of first cell type in terms of cell counts
n_total -- int
number of cells per sample. This stays constant over all samples!!!
Returns
-------
intercepts -- numpy array
intercept parameters
slopes -- numpy array
slope parameters
"""
K = counts_before.shape[0]
# calculate intercepts for control samples
b = np.log(counts_before / n_total)
# count vector after applying the effect.
# sum(counts_after) = n_total;
# counts_after[0] = counts_before[0] + abs_change
count_0_after = counts_before[0] + abs_change
count_other_after = (n_total - count_0_after) / (K - 1)
counts_after = np.repeat(count_other_after, K)
counts_after[0] = count_0_after
# Get parameter vector with effect
b_after = np.log(counts_after / n_total)
# w is the difference of b before and after
w = b_after - b
# Transform w such that only first entry is nonzero
w = w - w[K - 1]
return b, w
def counts_from_first(b_0=200, n_total=1000, K=5):
b = np.repeat((n_total-b_0)/(K-1), K)
b[0] = b_0
return b | scdcdm/util/data_generation.py | import numpy as np
import anndata as ad
import pandas as pd
from scipy.special import softmax
def generate_normal_uncorrelated(N, D, K, n_total, noise_std_true=1):
"""
Scenario 1: Normally distributed, independent covariates
Parameters
----------
N -- int
Number of samples
D -- int
Number of covariates
K -- int
Number of cell types
n_total -- list
Number of individual cells per sample
noise_std_true -- float
noise level. 0: No noise
Returns
-------
data
Anndata object
"""
# Generate random composition parameters
b_true = np.random.normal(0, 1, size=K).astype(np.float32) # bias (alpha)
w_true = np.random.normal(0, 1, size=(D, K)).astype(np.float32) # weights (beta)
# Generate random covariate matrix
x = np.random.normal(0, 1, size=(N, D)).astype(np.float32)
noise = noise_std_true * np.random.randn(N, 1).astype(np.float32)
# Generate y
y = np.zeros([N, K], dtype=np.float32)
for i in range(N):
# Concentration should sum to 1 for each sample
concentration = softmax(x[i, :].T@w_true + b_true + noise[i, :]).astype(np.float32)
y[i, :] = np.random.multinomial(n_total[i], concentration).astype(np.float32)
x_names = ["x_" + str(n) for n in range(x.shape[1])]
x_df = pd.DataFrame(x, columns=x_names)
data = ad.AnnData(X=y, obs=x_df, uns={"b_true": b_true, "w_true": w_true})
return data
def generate_normal_correlated(N, D, K, n_total, noise_std_true, covariate_mean=None, covariate_var=None):
"""
Scenario 2: Correlated covariates
Parameters
----------
N -- int
Number of samples
D -- int
Number of covariates
K -- int
Number of cell types
n_total -- list
Number of individual cells per sample
noise_std_true -- float
noise level. 0: No noise
covariate_mean -- numpy array [D]
Mean of each covariate
covariate_var -- numpy array [DxD]
Covariance matrix for covariates
Returns
-------
data
Anndata object
"""
if covariate_mean is None:
covariate_mean = np.zeros(shape=D)
# Generate randomized covariate covariance matrix if none is specified
if covariate_var is None:
# Covariates drawn from MvNormal(0, Cov), Cov_ij = p ^|i-j| , p=0.4
# Tibshirani for correlated covariates: Tibshirani (1996)
p = 0.4
covariate_var = np.zeros((D, D))
for i in range(D):
for j in range(D):
covariate_var[i, j] = p**np.abs(i-j)
# Generate random composition parameters
b_true = np.random.normal(0, 1, size=K).astype(np.float32) # bias (alpha)
w_true = np.random.normal(0, 1, size=(D, K)).astype(np.float32) # weights (beta)
# Generate random covariate matrix
x = np.random.multivariate_normal(size=N, mean=covariate_mean, cov=covariate_var).astype(np.float32)
noise = noise_std_true * np.random.randn(N, 1).astype(np.float32)
# Generate y
y = np.zeros([N, K], dtype=np.float32)
for i in range(N):
# Concentration should sum to 1 for each sample
concentration = softmax(x[i, :].T @ w_true + b_true + noise[i, :]).astype(np.float32)
y[i, :] = np.random.multinomial(n_total[i], concentration).astype(np.float32)
x_names = ["x_" + str(n) for n in range(x.shape[1])]
x_df = pd.DataFrame(x, columns=x_names)
data = ad.AnnData(X=y, obs=x_df, uns={"b_true": b_true, "w_true": w_true})
return data
def generate_normal_xy_correlated(N, D, K, n_total, noise_std_true=1,
covariate_mean=None, covariate_var=None, sigma=None):
"""
Scenario 3: Correlated cell types and covariates
Parameters
----------
N -- int
Number of samples
D -- int
Number of covariates
K -- int
Number of cell types
n_total -- list
Number of individual cells per sample
noise_std_true -- float
noise level. 0: No noise
covariate_mean -- numpy array [D]
Mean of each covariate
covariate_var -- numpy array [DxD]
Covariance matrix for all covaraiates
sigma -- numpy array [KxK]
correlation matrix for cell types
Returns
-------
data
Anndata object
"""
if covariate_mean is None:
covariate_mean = np.zeros(shape=D)
if sigma is None:
sigma = np.identity(K)
# Generate randomized covariate covariance matrix if none is specified
if covariate_var is None:
# Covaraits drawn from MvNormal(0, Cov) Cov_ij = p ^|i-j| , p=0.4
# Tibshirani for correlated covariates: Tibshirani (1996)
p = 0.4
covariate_var = np.zeros((D, D))
for i in range(D):
for j in range(D):
covariate_var[i, j] = p**np.abs(i-j)
# Generate random composition parameters
b_true = np.random.normal(0, 1, size=K).astype(np.float32) # bias (alpha)
w_true = np.random.normal(0, 1, size=(D, K)).astype(np.float32) # weights (beta)
# Generate random covariate matrix
x = np.random.multivariate_normal(size=N, mean=covariate_mean, cov=covariate_var).astype(np.float32)
noise = noise_std_true * np.random.randn(N, 1).astype(np.float32)
# Generate y
y = np.zeros([N, K], dtype=np.float32)
for i in range(N):
# Each row of y is now influenced by sigma
alpha = np.random.multivariate_normal(mean=x[i, :].T@w_true + b_true, cov=sigma*noise[i, :]).astype(np.float32)
concentration = softmax(alpha).astype(np.float32)
y[i, :] = np.random.multinomial(n_total[i], concentration).astype(np.float32)
x_names = ["x_" + str(n) for n in range(x.shape[1])]
x_df = pd.DataFrame(x, columns=x_names)
data = ad.AnnData(X=y, obs=x_df, uns={"b_true": b_true, "w_true": w_true})
return data
def sparse_effect_matrix(D, K, n_d, n_k):
"""
Generates a sparse effect matrix
Parameters
----------
D -- int
Number of covariates
K -- int
Number of cell types
n_d -- int
Number of covariates that effect a cell type
n_k -- int
Number of cell types that are affected by any covariate
Returns
-------
w_true
Effect matrix
"""
# Choose indices of affected cell types and covariates randomly
d_eff = np.random.choice(range(D), size=n_d, replace=False)
k_eff = np.random.choice(range(K), size=n_k, replace=False)
# Possible entries of w_true
w_choice = [0.3, 0.5, 1]
w_true = np.zeros((D, K))
# Fill in w_true
for i in d_eff:
for j in k_eff:
c = np.random.choice(3, 1)
w_true[i, j] = w_choice[c]
return w_true
def generate_sparse_xy_correlated(N, D, K, n_total, noise_std_true=1,
covariate_mean=None, covariate_var=None,
sigma=None,
b_true=None, w_true=None):
"""
Scenario 4: Sparse true parameters
Parameters
----------
N -- int
Number of samples
D -- int
Number of covariates
K -- int
Number of cell types
n_total -- list
Number of individual cells per sample
noise_std_true -- float
noise level. 0: No noise
covariate_mean -- numpy array [D]
Mean of each covariate
covariate_var -- numpy array [DxD]
Covariance matrix for all covaraiates
sigma -- numpy array [KxK]
correlation matrix for cell types
b_true -- numpy array [K]
bias coefficients
w_true -- numpy array [DxK]
Effect matrix
Returns
-------
data
Anndata object
"""
if covariate_mean is None:
covariate_mean = np.zeros(shape=D)
if sigma is None:
sigma = np.identity(K)
# Generate randomized covariate covariance matrix if none is specified
if covariate_var is None:
# Covaraits drawn from MvNormal(0, Cov) Cov_ij = p ^|i-j| , p=0.4
# Tibshirani for correlated covariates: Tibshirani (1996)
p = 0.4
covariate_var = np.zeros((D, D))
for i in range(D):
for j in range(D):
covariate_var[i, j] = p ** np.abs(i - j)
# Uniform intercepts if none are specifed
if b_true is None:
b_true = np.random.uniform(-3,3, size=K).astype(np.float32) # bias (alpha)
# Randomly select covariates that should correlate if none are specified
if w_true is None:
n_d = np.random.choice(range(D), size=1)
n_k = np.random.choice(range(K), size=1)
w_true = sparse_effect_matrix(D, K, n_d, n_k)
# Generate random covariate matrix
x = np.random.multivariate_normal(size=N, mean=covariate_mean, cov=covariate_var).astype(np.float32)
noise = noise_std_true * np.random.randn(N, 1).astype(np.float32)
# Generate y
y = np.zeros([N, K], dtype=np.float32)
for i in range(N):
# Each row of y is now influenced by sigma
alpha = np.random.multivariate_normal(mean=x[i, :].T @ w_true + b_true, cov=sigma * noise[i, :]).astype(
np.float32)
concentration = softmax(alpha).astype(np.float32)
y[i, :] = np.random.multinomial(n_total[i], concentration).astype(np.float32)
x_names = ["x_" + str(n) for n in range(x.shape[1])]
x_df = pd.DataFrame(x, columns=x_names)
data = ad.AnnData(X=y, obs=x_df, uns={"b_true": b_true, "w_true": w_true})
return data
def generate_case_control(cases=1, K=5, n_total=1000, n_samples=[5,5], noise_std_true=0,
sigma=None, b_true=None, w_true=None):
"""
Generates compositional data with binary covariates
Parameters
----------
cases -- int
number of covariates
K -- int
Number of cell types
n_total -- int
number of cells per sample
n_samples -- list
Number of samples per case combination as array[2**cases]
noise_std_true -- float
noise level. 0: No noise - Not in use atm!!!
sigma -- numpy array [KxK]
correlation matrix for cell types
b_true -- numpy array [K]
bias coefficients
w_true -- numpy array [DxK]
Effect matrix
Returns
-------
Anndata object
"""
D = cases**2
# Uniform intercepts if none are specifed
if b_true is None:
b_true = np.random.uniform(-3, 3, size=K).astype(np.float32) # bias (alpha)
# Randomly select covariates that should correlate if none are specified
if w_true is None:
n_d = np.random.choice(range(D), size=1)
n_k = np.random.choice(range(K), size=1)
w_true = sparse_effect_matrix(D, K, n_d, n_k)
# Sigma is identity if not specified else
if sigma is None:
sigma = np.identity(K) * 0.05
# noise = noise_std_true * np.random.randn(N, 1).astype(np.float32)
# Initialize x, y
x = np.zeros((sum(n_samples), cases))
y = np.zeros((sum(n_samples), K))
c = 0
# Binary representation of x as list of fixed length
def binary(x, length):
return [int(x_n) for x_n in bin(x)[2:].zfill(length)]
# For all combinations of cases
for i in range(2**cases):
# For each sample with this combination
for j in range(n_samples[i]):
# row of x is binary representation
x[c+j] = binary(i, cases)
# Generate y
alpha = np.random.multivariate_normal(mean=x[c+j, :].T @ w_true + b_true, cov=sigma).astype(
np.float32)
concentration = softmax(alpha).astype(np.float32)
z = np.random.multinomial(n_total, concentration)
y[c+j] = z
c = c+n_samples[i]
x = x.astype(np.float32)
y = y.astype(np.float32)
x_names = ["x_" + str(n) for n in range(x.shape[1])]
x_df = pd.DataFrame(x, columns=x_names)
data = ad.AnnData(X=y, obs=x_df, uns={"b_true": b_true, "w_true": w_true})
return data
def b_w_from_abs_change(counts_before=np.array([200, 200, 200, 200, 200]), abs_change=50, n_total=1000):
"""
Calculates intercepts and slopes from a starting count and an absolute change for the first cell type
Parameters
----------
counts_before -- numpy array
cell counts for control samples
abs_change -- int
change of first cell type in terms of cell counts
n_total -- int
number of cells per sample. This stays constant over all samples!!!
Returns
-------
intercepts -- numpy array
intercept parameters
slopes -- numpy array
slope parameters
"""
K = counts_before.shape[0]
# calculate intercepts for control samples
b = np.log(counts_before / n_total)
# count vector after applying the effect.
# sum(counts_after) = n_total;
# counts_after[0] = counts_before[0] + abs_change
count_0_after = counts_before[0] + abs_change
count_other_after = (n_total - count_0_after) / (K - 1)
counts_after = np.repeat(count_other_after, K)
counts_after[0] = count_0_after
# Get parameter vector with effect
b_after = np.log(counts_after / n_total)
# w is the difference of b before and after
w = b_after - b
# Transform w such that only first entry is nonzero
w = w - w[K - 1]
return b, w
def counts_from_first(b_0=200, n_total=1000, K=5):
b = np.repeat((n_total-b_0)/(K-1), K)
b[0] = b_0
return b | 0.863132 | 0.606061 |
from enum import Enum
from numpy import asarray
from PIL import Image
from face_embedding_engine import FaceEmbeddingModelEnum
# We use descriptive variable and function names so
# disable the pylint warning for long lines
# pylint: disable=line-too-long
SSD_MOBILENET_V2_FACE_MODEL = 'models/ssd_mobilenet_v2_face_quant_postprocess_edgetpu.tflite' # trained with Celebrity imageset
class FaceDetectionMethodEnum(Enum):
''' enum DetectionMethod
Enumerates all methods supported for detecting faces
'''
MTCNN = 1 # currently only supported on Ubuntu
SSD_MOBILENET_V2 = 2 # currently only supported on Coral dev board
class FaceDetectionEngine:
''' class FaceDetectionEngine
Purpose: detect faces in an image
'''
def __init__(self, detection_method):
''' function constructor
Constructor for FaceDetectionEngine
Args:
detection_method (DetectionMethod): Method to use for detection
Returns:
None
'''
# We only want to import these modules at run-time since
# they will only be installed on certain platforms.
# pylint: disable=import-outside-toplevel, import-error
self.detection_method = detection_method
if self.detection_method == FaceDetectionMethodEnum.MTCNN:
# create the MTCNN detector, using default weights
print("Using MTCNN for face detection")
from mtcnn.mtcnn import MTCNN
self.face_detection_engine = MTCNN()
elif self.detection_method == FaceDetectionMethodEnum.SSD_MOBILENET_V2:
# load the MobileNet V2 SSD Face model
print("Using SSD MobileNet V2 for face detection")
from edgetpu.detection.engine import DetectionEngine
self.face_detection_engine = DetectionEngine(SSD_MOBILENET_V2_FACE_MODEL)
else:
raise Exception("Invalid detection method: {}".format(detection_method))
# detect faces
def detect_faces(self, rgb_array):
''' function detect_faces
Detect any faces that are present in the given image.
Args:
rgb_array (numpy.ndarray): An image that may or may not contain faces
Returns:
An array of bounding boxes (top_left_x, top_left_y, width, height)
for each face detected in the given image
'''
results = [] # assume no faces are detected
# detect faces in the image
if self.detection_method == FaceDetectionMethodEnum.MTCNN:
detected_faces = self.face_detection_engine.detect_faces(rgb_array)
# extract the bounding box from the first face
if len(detected_faces) == 0:
return results
for detected_face in detected_faces:
# note the bounding box is in the format we want
results.append(tuple(detected_face['box']))
else: # DetectionMethod.SSD_MOBILENET_V2
frame_as_image = Image.fromarray(rgb_array)
detected_faces = self.face_detection_engine.detect_with_image(
frame_as_image,
threshold=0.5,
keep_aspect_ratio=True,
relative_coord=False,
top_k=5,
resample=Image.BOX)
if len(detected_faces) == 0:
return results
# extract the bounding box from the first face
for detected_face in detected_faces:
# convert the bounding box to the format we want
x_1, y_1, x_2, y_2 = detected_face.bounding_box.flatten().astype("int")
width = abs(x_2 - x_1)
height = abs(y_2 - y_1)
result = (x_1, y_1, width, height)
results.append(result)
return results
def extract_face(self, rgb_array, embedding_model):
''' function extract_face
Extract a single face from the given frame
Args:
rgb_array (numpy.ndarray): The image that may or may not contain
one or more faces
embedding_model (FaceEmbeddingModelEnum): The model being used for generating
embeddings for face images
Returns:
If a face is detected, returns an RGB numpy.ndarray of the face extracted from
the given frame of the dimensions required for the given embedding model.
Otherwise it returns an empty array.
'''
detected_faces = self.detect_faces(rgb_array)
if len(detected_faces) == 0:
return []
if detected_faces[0][2] == 0:
return []
x_1, y_1, width, height = tuple(detected_faces[0])
x_1, y_1 = abs(x_1), abs(y_1)
x_2, y_2 = x_1 + width, y_1 + height
# extract a cropped image of the detected face
face = rgb_array[y_1:y_2, x_1:x_2]
# resize pixels to the dimension required for the specified embedding model
image = Image.fromarray(face)
image = image.resize((160, 160))
# convert image to numpy array
face_rgb_array = asarray(image)
return face_rgb_array | face_detection_engine.py | from enum import Enum
from numpy import asarray
from PIL import Image
from face_embedding_engine import FaceEmbeddingModelEnum
# We use descriptive variable and function names so
# disable the pylint warning for long lines
# pylint: disable=line-too-long
SSD_MOBILENET_V2_FACE_MODEL = 'models/ssd_mobilenet_v2_face_quant_postprocess_edgetpu.tflite' # trained with Celebrity imageset
class FaceDetectionMethodEnum(Enum):
''' enum DetectionMethod
Enumerates all methods supported for detecting faces
'''
MTCNN = 1 # currently only supported on Ubuntu
SSD_MOBILENET_V2 = 2 # currently only supported on Coral dev board
class FaceDetectionEngine:
''' class FaceDetectionEngine
Purpose: detect faces in an image
'''
def __init__(self, detection_method):
''' function constructor
Constructor for FaceDetectionEngine
Args:
detection_method (DetectionMethod): Method to use for detection
Returns:
None
'''
# We only want to import these modules at run-time since
# they will only be installed on certain platforms.
# pylint: disable=import-outside-toplevel, import-error
self.detection_method = detection_method
if self.detection_method == FaceDetectionMethodEnum.MTCNN:
# create the MTCNN detector, using default weights
print("Using MTCNN for face detection")
from mtcnn.mtcnn import MTCNN
self.face_detection_engine = MTCNN()
elif self.detection_method == FaceDetectionMethodEnum.SSD_MOBILENET_V2:
# load the MobileNet V2 SSD Face model
print("Using SSD MobileNet V2 for face detection")
from edgetpu.detection.engine import DetectionEngine
self.face_detection_engine = DetectionEngine(SSD_MOBILENET_V2_FACE_MODEL)
else:
raise Exception("Invalid detection method: {}".format(detection_method))
# detect faces
def detect_faces(self, rgb_array):
''' function detect_faces
Detect any faces that are present in the given image.
Args:
rgb_array (numpy.ndarray): An image that may or may not contain faces
Returns:
An array of bounding boxes (top_left_x, top_left_y, width, height)
for each face detected in the given image
'''
results = [] # assume no faces are detected
# detect faces in the image
if self.detection_method == FaceDetectionMethodEnum.MTCNN:
detected_faces = self.face_detection_engine.detect_faces(rgb_array)
# extract the bounding box from the first face
if len(detected_faces) == 0:
return results
for detected_face in detected_faces:
# note the bounding box is in the format we want
results.append(tuple(detected_face['box']))
else: # DetectionMethod.SSD_MOBILENET_V2
frame_as_image = Image.fromarray(rgb_array)
detected_faces = self.face_detection_engine.detect_with_image(
frame_as_image,
threshold=0.5,
keep_aspect_ratio=True,
relative_coord=False,
top_k=5,
resample=Image.BOX)
if len(detected_faces) == 0:
return results
# extract the bounding box from the first face
for detected_face in detected_faces:
# convert the bounding box to the format we want
x_1, y_1, x_2, y_2 = detected_face.bounding_box.flatten().astype("int")
width = abs(x_2 - x_1)
height = abs(y_2 - y_1)
result = (x_1, y_1, width, height)
results.append(result)
return results
def extract_face(self, rgb_array, embedding_model):
''' function extract_face
Extract a single face from the given frame
Args:
rgb_array (numpy.ndarray): The image that may or may not contain
one or more faces
embedding_model (FaceEmbeddingModelEnum): The model being used for generating
embeddings for face images
Returns:
If a face is detected, returns an RGB numpy.ndarray of the face extracted from
the given frame of the dimensions required for the given embedding model.
Otherwise it returns an empty array.
'''
detected_faces = self.detect_faces(rgb_array)
if len(detected_faces) == 0:
return []
if detected_faces[0][2] == 0:
return []
x_1, y_1, width, height = tuple(detected_faces[0])
x_1, y_1 = abs(x_1), abs(y_1)
x_2, y_2 = x_1 + width, y_1 + height
# extract a cropped image of the detected face
face = rgb_array[y_1:y_2, x_1:x_2]
# resize pixels to the dimension required for the specified embedding model
image = Image.fromarray(face)
image = image.resize((160, 160))
# convert image to numpy array
face_rgb_array = asarray(image)
return face_rgb_array | 0.869645 | 0.449816 |
from __future__ import division
import torch
import torch.nn as nn
from torch.nn import init
import numbers
import torch.nn.functional as F
from logging import getLogger
from libcity.model.abstract_traffic_state_model import AbstractTrafficStateModel
from libcity.model import loss
class NConv(nn.Module):
def __init__(self):
super(NConv, self).__init__()
def forward(self, x, adj):
x = torch.einsum('ncwl,vw->ncvl', (x, adj))
return x.contiguous()
class DyNconv(nn.Module):
def __init__(self):
super(DyNconv, self).__init__()
def forward(self, x, adj):
x = torch.einsum('ncvl,nvwl->ncwl', (x, adj))
return x.contiguous()
class Linear(nn.Module):
def __init__(self, c_in, c_out, bias=True):
super(Linear, self).__init__()
self.mlp = torch.nn.Conv2d(c_in, c_out, kernel_size=(1, 1), padding=(0, 0), stride=(1, 1), bias=bias)
def forward(self, x):
return self.mlp(x)
class Prop(nn.Module):
def __init__(self, c_in, c_out, gdep, dropout, alpha):
super(Prop, self).__init__()
self.nconv = NConv()
self.mlp = Linear(c_in, c_out)
self.gdep = gdep
self.dropout = dropout
self.alpha = alpha
def forward(self, x, adj):
adj = adj + torch.eye(adj.size(0)).to(x.device)
d = adj.sum(1)
h = x
dv = d
a = adj / dv.view(-1, 1)
for i in range(self.gdep):
h = self.alpha*x + (1-self.alpha)*self.nconv(h, a)
ho = self.mlp(h)
return ho
class MixProp(nn.Module):
def __init__(self, c_in, c_out, gdep, dropout, alpha):
super(MixProp, self).__init__()
self.nconv = NConv()
self.mlp = Linear((gdep+1)*c_in, c_out)
self.gdep = gdep
self.dropout = dropout
self.alpha = alpha
def forward(self, x, adj):
adj = adj + torch.eye(adj.size(0)).to(x.device)
d = adj.sum(1)
h = x
out = [h]
a = adj / d.view(-1, 1)
for i in range(self.gdep):
h = self.alpha*x + (1-self.alpha)*self.nconv(h, a)
out.append(h)
ho = torch.cat(out, dim=1)
ho = self.mlp(ho)
return ho
class DyMixprop(nn.Module):
def __init__(self, c_in, c_out, gdep, dropout, alpha):
super(DyMixprop, self).__init__()
self.nconv = DyNconv()
self.mlp1 = Linear((gdep+1)*c_in, c_out)
self.mlp2 = Linear((gdep+1)*c_in, c_out)
self.gdep = gdep
self.dropout = dropout
self.alpha = alpha
self.lin1 = Linear(c_in, c_in)
self.lin2 = Linear(c_in, c_in)
def forward(self, x):
x1 = torch.tanh(self.lin1(x))
x2 = torch.tanh(self.lin2(x))
adj = self.nconv(x1.transpose(2, 1), x2)
adj0 = torch.softmax(adj, dim=2)
adj1 = torch.softmax(adj.transpose(2, 1), dim=2)
h = x
out = [h]
for i in range(self.gdep):
h = self.alpha*x + (1-self.alpha)*self.nconv(h, adj0)
out.append(h)
ho = torch.cat(out, dim=1)
ho1 = self.mlp1(ho)
h = x
out = [h]
for i in range(self.gdep):
h = self.alpha * x + (1 - self.alpha) * self.nconv(h, adj1)
out.append(h)
ho = torch.cat(out, dim=1)
ho2 = self.mlp2(ho)
return ho1+ho2
class Dilated1D(nn.Module):
def __init__(self, cin, cout, dilation_factor=2):
super(Dilated1D, self).__init__()
self.tconv = nn.ModuleList()
self.kernel_set = [2, 3, 6, 7]
self.tconv = nn.Conv2d(cin, cout, (1, 7), dilation=(1, dilation_factor))
def forward(self, inputs):
x = self.tconv(inputs)
return x
class DilatedInception(nn.Module):
def __init__(self, cin, cout, dilation_factor=2):
super(DilatedInception, self).__init__()
self.tconv = nn.ModuleList()
self.kernel_set = [2, 3, 6, 7]
cout = int(cout/len(self.kernel_set))
for kern in self.kernel_set:
self.tconv.append(nn.Conv2d(cin, cout, (1, kern), dilation=(1, dilation_factor)))
def forward(self, input):
x = []
for i in range(len(self.kernel_set)):
x.append(self.tconv[i](input))
for i in range(len(self.kernel_set)):
x[i] = x[i][..., -x[-1].size(3):]
x = torch.cat(x, dim=1)
return x
class GraphConstructor(nn.Module):
def __init__(self, nnodes, k, dim, device, alpha=3, static_feat=None):
super(GraphConstructor, self).__init__()
self.nnodes = nnodes
if static_feat is not None:
xd = static_feat.shape[1]
self.lin1 = nn.Linear(xd, dim)
self.lin2 = nn.Linear(xd, dim)
else:
self.emb1 = nn.Embedding(nnodes, dim)
self.emb2 = nn.Embedding(nnodes, dim)
self.lin1 = nn.Linear(dim, dim)
self.lin2 = nn.Linear(dim, dim)
self.device = device
self.k = k
self.dim = dim
self.alpha = alpha
self.static_feat = static_feat
def forward(self, idx):
if self.static_feat is None:
nodevec1 = self.emb1(idx)
nodevec2 = self.emb2(idx)
else:
nodevec1 = self.static_feat[idx, :]
nodevec2 = nodevec1
nodevec1 = torch.tanh(self.alpha*self.lin1(nodevec1))
nodevec2 = torch.tanh(self.alpha*self.lin2(nodevec2))
a = torch.mm(nodevec1, nodevec2.transpose(1, 0))-torch.mm(nodevec2, nodevec1.transpose(1, 0))
adj = F.relu(torch.tanh(self.alpha*a))
mask = torch.zeros(idx.size(0), idx.size(0)).to(self.device)
mask.fill_(float('0'))
s1, t1 = adj.topk(self.k, 1)
mask.scatter_(1, t1, s1.fill_(1))
adj = adj*mask
return adj
def fulla(self, idx):
if self.static_feat is None:
nodevec1 = self.emb1(idx)
nodevec2 = self.emb2(idx)
else:
nodevec1 = self.static_feat[idx, :]
nodevec2 = nodevec1
nodevec1 = torch.tanh(self.alpha*self.lin1(nodevec1))
nodevec2 = torch.tanh(self.alpha*self.lin2(nodevec2))
a = torch.mm(nodevec1, nodevec2.transpose(1, 0))-torch.mm(nodevec2, nodevec1.transpose(1, 0))
adj = F.relu(torch.tanh(self.alpha*a))
return adj
class GraphGlobal(nn.Module):
def __init__(self, nnodes, k, dim, device, alpha=3, static_feat=None):
super(GraphGlobal, self).__init__()
self.nnodes = nnodes
self.A = nn.Parameter(torch.randn(nnodes, nnodes).to(device), requires_grad=True).to(device)
def forward(self, idx):
return F.relu(self.A)
class GraphUndirected(nn.Module):
def __init__(self, nnodes, k, dim, device, alpha=3, static_feat=None):
super(GraphUndirected, self).__init__()
self.nnodes = nnodes
if static_feat is not None:
xd = static_feat.shape[1]
self.lin1 = nn.Linear(xd, dim)
else:
self.emb1 = nn.Embedding(nnodes, dim)
self.lin1 = nn.Linear(dim, dim)
self.device = device
self.k = k
self.dim = dim
self.alpha = alpha
self.static_feat = static_feat
def forward(self, idx):
if self.static_feat is None:
nodevec1 = self.emb1(idx)
nodevec2 = self.emb1(idx)
else:
nodevec1 = self.static_feat[idx, :]
nodevec2 = nodevec1
nodevec1 = torch.tanh(self.alpha*self.lin1(nodevec1))
nodevec2 = torch.tanh(self.alpha*self.lin1(nodevec2))
a = torch.mm(nodevec1, nodevec2.transpose(1, 0))
adj = F.relu(torch.tanh(self.alpha*a))
mask = torch.zeros(idx.size(0), idx.size(0)).to(self.device)
mask.fill_(float('0'))
s1, t1 = adj.topk(self.k, 1)
mask.scatter_(1, t1, s1.fill_(1))
adj = adj*mask
return adj
class GraphDirected(nn.Module):
def __init__(self, nnodes, k, dim, device, alpha=3, static_feat=None):
super(GraphDirected, self).__init__()
self.nnodes = nnodes
if static_feat is not None:
xd = static_feat.shape[1]
self.lin1 = nn.Linear(xd, dim)
self.lin2 = nn.Linear(xd, dim)
else:
self.emb1 = nn.Embedding(nnodes, dim)
self.emb2 = nn.Embedding(nnodes, dim)
self.lin1 = nn.Linear(dim, dim)
self.lin2 = nn.Linear(dim, dim)
self.device = device
self.k = k
self.dim = dim
self.alpha = alpha
self.static_feat = static_feat
def forward(self, idx):
if self.static_feat is None:
nodevec1 = self.emb1(idx)
nodevec2 = self.emb2(idx)
else:
nodevec1 = self.static_feat[idx, :]
nodevec2 = nodevec1
nodevec1 = torch.tanh(self.alpha*self.lin1(nodevec1))
nodevec2 = torch.tanh(self.alpha*self.lin2(nodevec2))
a = torch.mm(nodevec1, nodevec2.transpose(1, 0))
adj = F.relu(torch.tanh(self.alpha*a))
mask = torch.zeros(idx.size(0), idx.size(0)).to(self.device)
mask.fill_(float('0'))
s1, t1 = adj.topk(self.k, 1)
mask.scatter_(1, t1, s1.fill_(1))
adj = adj*mask
return adj
class LayerNorm(nn.Module):
__constants__ = ['normalized_shape', 'weight', 'bias', 'eps', 'elementwise_affine']
def __init__(self, normalized_shape, eps=1e-5, elementwise_affine=True):
super(LayerNorm, self).__init__()
if isinstance(normalized_shape, numbers.Integral):
normalized_shape = (normalized_shape,)
self.normalized_shape = tuple(normalized_shape)
self.eps = eps
self.elementwise_affine = elementwise_affine
if self.elementwise_affine:
self.weight = nn.Parameter(torch.Tensor(*normalized_shape))
self.bias = nn.Parameter(torch.Tensor(*normalized_shape))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
if self.elementwise_affine:
init.ones_(self.weight)
init.zeros_(self.bias)
def forward(self, inputs, idx):
if self.elementwise_affine:
return F.layer_norm(inputs, tuple(inputs.shape[1:]),
self.weight[:, idx, :], self.bias[:, idx, :], self.eps)
else:
return F.layer_norm(inputs, tuple(inputs.shape[1:]),
self.weight, self.bias, self.eps)
def extra_repr(self):
return '{normalized_shape}, eps={eps}, ' \
'elementwise_affine={elementwise_affine}'.format(**self.__dict__)
class MTGNN(AbstractTrafficStateModel):
def __init__(self, config, data_feature):
super().__init__(config, data_feature)
self.adj_mx = self.data_feature.get('adj_mx')
self.num_nodes = self.data_feature.get('num_nodes', 1)
self.feature_dim = self.data_feature.get('feature_dim', 1)
self.num_batches = self.data_feature.get('num_batches', 1)
self._logger = getLogger()
self._scaler = self.data_feature.get('scaler')
self.input_window = config.get('input_window', 1)
self.output_window = config.get('output_window', 1)
self.output_dim = config.get('output_dim', 1)
self.device = config.get('device', torch.device('cpu'))
self.gcn_true = config.get('gcn_true', True)
self.buildA_true = config.get('buildA_true', True)
self.gcn_depth = config.get('gcn_depth', 2)
self.dropout = config.get('dropout', 0.3)
self.subgraph_size = config.get('subgraph_size', 20)
self.node_dim = config.get('node_dim', 40)
self.dilation_exponential = config.get('dilation_exponential', 1)
self.conv_channels = config.get('conv_channels', 32)
self.residual_channels = config.get('residual_channels', 32)
self.skip_channels = config.get('skip_channels', 64)
self.end_channels = config.get('end_channels', 128)
self.layers = config.get('layers', 3)
self.propalpha = config.get('propalpha', 0.05)
self.tanhalpha = config.get('tanhalpha', 3)
self.layer_norm_affline = config.get('layer_norm_affline', True)
self.use_curriculum_learning = config.get('use_curriculum_learning', False)
self.step_size = config.get('step_size1', 2500)
self.max_epoch = config.get('max_epoch', 100)
if self.max_epoch * self.num_batches < self.step_size * self.output_window:
self._logger.warning('Parameter `step_size1` is too big with {} epochs and '
'the model cannot be trained for all time steps.'.format(self.max_epoch))
self.task_level = config.get('task_level', 0)
self.idx = torch.arange(self.num_nodes).to(self.device)
self.predefined_A = torch.tensor(self.adj_mx) - torch.eye(self.num_nodes)
self.predefined_A = self.predefined_A.to(self.device)
self.static_feat = None
self.filter_convs = nn.ModuleList()
self.gate_convs = nn.ModuleList()
self.residual_convs = nn.ModuleList()
self.skip_convs = nn.ModuleList()
self.gconv1 = nn.ModuleList()
self.gconv2 = nn.ModuleList()
self.norm = nn.ModuleList()
self.start_conv = nn.Conv2d(in_channels=self.feature_dim,
out_channels=self.residual_channels,
kernel_size=(1, 1))
self.gc = GraphConstructor(self.num_nodes, self.subgraph_size, self.node_dim,
self.device, alpha=self.tanhalpha, static_feat=self.static_feat)
kernel_size = 7
if self.dilation_exponential > 1:
self.receptive_field = int(self.output_dim + (kernel_size-1) * (self.dilation_exponential**self.layers-1)
/ (self.dilation_exponential - 1))
else:
self.receptive_field = self.layers * (kernel_size-1) + self.output_dim
for i in range(1):
if self.dilation_exponential > 1:
rf_size_i = int(1 + i * (kernel_size-1) * (self.dilation_exponential**self.layers-1)
/ (self.dilation_exponential - 1))
else:
rf_size_i = i * self.layers * (kernel_size - 1) + 1
new_dilation = 1
for j in range(1, self.layers+1):
if self.dilation_exponential > 1:
rf_size_j = int(rf_size_i + (kernel_size-1) * (self.dilation_exponential**j - 1)
/ (self.dilation_exponential - 1))
else:
rf_size_j = rf_size_i+j*(kernel_size-1)
self.filter_convs.append(DilatedInception(self.residual_channels,
self.conv_channels, dilation_factor=new_dilation))
self.gate_convs.append(DilatedInception(self.residual_channels,
self.conv_channels, dilation_factor=new_dilation))
self.residual_convs.append(nn.Conv2d(in_channels=self.conv_channels,
out_channels=self.residual_channels, kernel_size=(1, 1)))
if self.input_window > self.receptive_field:
self.skip_convs.append(nn.Conv2d(in_channels=self.conv_channels, out_channels=self.skip_channels,
kernel_size=(1, self.input_window-rf_size_j+1)))
else:
self.skip_convs.append(nn.Conv2d(in_channels=self.conv_channels, out_channels=self.skip_channels,
kernel_size=(1, self.receptive_field-rf_size_j+1)))
if self.gcn_true:
self.gconv1.append(MixProp(self.conv_channels, self.residual_channels,
self.gcn_depth, self.dropout, self.propalpha))
self.gconv2.append(MixProp(self.conv_channels, self.residual_channels,
self.gcn_depth, self.dropout, self.propalpha))
if self.input_window > self.receptive_field:
self.norm.append(LayerNorm((self.residual_channels, self.num_nodes,
self.input_window - rf_size_j + 1),
elementwise_affine=self.layer_norm_affline))
else:
self.norm.append(LayerNorm((self.residual_channels, self.num_nodes,
self.receptive_field - rf_size_j + 1),
elementwise_affine=self.layer_norm_affline))
new_dilation *= self.dilation_exponential
self.end_conv_1 = nn.Conv2d(in_channels=self.skip_channels,
out_channels=self.end_channels, kernel_size=(1, 1), bias=True)
self.end_conv_2 = nn.Conv2d(in_channels=self.end_channels,
out_channels=self.output_window, kernel_size=(1, 1), bias=True)
if self.input_window > self.receptive_field:
self.skip0 = nn.Conv2d(in_channels=self.feature_dim,
out_channels=self.skip_channels,
kernel_size=(1, self.input_window), bias=True)
self.skipE = nn.Conv2d(in_channels=self.residual_channels,
out_channels=self.skip_channels,
kernel_size=(1, self.input_window-self.receptive_field+1), bias=True)
else:
self.skip0 = nn.Conv2d(in_channels=self.feature_dim,
out_channels=self.skip_channels, kernel_size=(1, self.receptive_field), bias=True)
self.skipE = nn.Conv2d(in_channels=self.residual_channels,
out_channels=self.skip_channels, kernel_size=(1, 1), bias=True)
self._logger.info('receptive_field: ' + str(self.receptive_field))
def forward(self, batch, idx=None):
inputs = batch['X'] # (batch_size, input_window, num_nodes, feature_dim)
inputs = inputs.transpose(1, 3) # (batch_size, feature_dim, num_nodes, input_window)
assert inputs.size(3) == self.input_window, 'input sequence length not equal to preset sequence length'
if self.input_window < self.receptive_field:
inputs = nn.functional.pad(inputs, (self.receptive_field-self.input_window, 0, 0, 0))
if self.gcn_true:
if self.buildA_true:
if idx is None:
adp = self.gc(self.idx)
else:
adp = self.gc(idx)
else:
adp = self.predefined_A
x = self.start_conv(inputs)
skip = self.skip0(F.dropout(inputs, self.dropout, training=self.training))
for i in range(self.layers):
residual = x
filters = self.filter_convs[i](x)
filters = torch.tanh(filters)
gate = self.gate_convs[i](x)
gate = torch.sigmoid(gate)
x = filters * gate
x = F.dropout(x, self.dropout, training=self.training)
s = x
s = self.skip_convs[i](s)
skip = s + skip
if self.gcn_true:
x = self.gconv1[i](x, adp)+self.gconv2[i](x, adp.transpose(1, 0))
else:
x = self.residual_convs[i](x)
x = x + residual[:, :, :, -x.size(3):]
if idx is None:
x = self.norm[i](x, self.idx)
else:
x = self.norm[i](x, idx)
skip = self.skipE(x) + skip
x = F.relu(skip)
x = F.relu(self.end_conv_1(x))
x = self.end_conv_2(x)
return x
def calculate_loss(self, batch, idx=None, batches_seen=None):
if idx is not None:
idx = torch.tensor(idx).to(self.device)
tx = batch['X'][:, :, idx, :].clone() # 避免batch[X]被修改 下一次idx索引就不对了
y_true = batch['y'][:, :, idx, :]
batch_new = {'X': tx}
y_predicted = self.predict(batch_new, idx)
else:
y_true = batch['y']
y_predicted = self.predict(batch)
# print('y_true', y_true.shape)
# print('y_predicted', y_predicted.shape)
y_true = self._scaler.inverse_transform(y_true[..., :self.output_dim])
y_predicted = self._scaler.inverse_transform(y_predicted[..., :self.output_dim])
if self.training:
if batches_seen % self.step_size == 0 and self.task_level < self.output_window:
self.task_level += 1
self._logger.info('Training: task_level increase from {} to {}'.format(
self.task_level-1, self.task_level))
self._logger.info('Current batches_seen is {}'.format(batches_seen))
if self.use_curriculum_learning:
return loss.masked_mae_torch(y_predicted[:, :self.task_level, :, :],
y_true[:, :self.task_level, :, :], 0)
else:
return loss.masked_mae_torch(y_predicted, y_true, 0)
else:
return loss.masked_mae_torch(y_predicted, y_true, 0)
def predict(self, batch, idx=None):
return self.forward(batch, idx) | libcity/model/traffic_speed_prediction/MTGNN.py | from __future__ import division
import torch
import torch.nn as nn
from torch.nn import init
import numbers
import torch.nn.functional as F
from logging import getLogger
from libcity.model.abstract_traffic_state_model import AbstractTrafficStateModel
from libcity.model import loss
class NConv(nn.Module):
def __init__(self):
super(NConv, self).__init__()
def forward(self, x, adj):
x = torch.einsum('ncwl,vw->ncvl', (x, adj))
return x.contiguous()
class DyNconv(nn.Module):
def __init__(self):
super(DyNconv, self).__init__()
def forward(self, x, adj):
x = torch.einsum('ncvl,nvwl->ncwl', (x, adj))
return x.contiguous()
class Linear(nn.Module):
def __init__(self, c_in, c_out, bias=True):
super(Linear, self).__init__()
self.mlp = torch.nn.Conv2d(c_in, c_out, kernel_size=(1, 1), padding=(0, 0), stride=(1, 1), bias=bias)
def forward(self, x):
return self.mlp(x)
class Prop(nn.Module):
def __init__(self, c_in, c_out, gdep, dropout, alpha):
super(Prop, self).__init__()
self.nconv = NConv()
self.mlp = Linear(c_in, c_out)
self.gdep = gdep
self.dropout = dropout
self.alpha = alpha
def forward(self, x, adj):
adj = adj + torch.eye(adj.size(0)).to(x.device)
d = adj.sum(1)
h = x
dv = d
a = adj / dv.view(-1, 1)
for i in range(self.gdep):
h = self.alpha*x + (1-self.alpha)*self.nconv(h, a)
ho = self.mlp(h)
return ho
class MixProp(nn.Module):
def __init__(self, c_in, c_out, gdep, dropout, alpha):
super(MixProp, self).__init__()
self.nconv = NConv()
self.mlp = Linear((gdep+1)*c_in, c_out)
self.gdep = gdep
self.dropout = dropout
self.alpha = alpha
def forward(self, x, adj):
adj = adj + torch.eye(adj.size(0)).to(x.device)
d = adj.sum(1)
h = x
out = [h]
a = adj / d.view(-1, 1)
for i in range(self.gdep):
h = self.alpha*x + (1-self.alpha)*self.nconv(h, a)
out.append(h)
ho = torch.cat(out, dim=1)
ho = self.mlp(ho)
return ho
class DyMixprop(nn.Module):
def __init__(self, c_in, c_out, gdep, dropout, alpha):
super(DyMixprop, self).__init__()
self.nconv = DyNconv()
self.mlp1 = Linear((gdep+1)*c_in, c_out)
self.mlp2 = Linear((gdep+1)*c_in, c_out)
self.gdep = gdep
self.dropout = dropout
self.alpha = alpha
self.lin1 = Linear(c_in, c_in)
self.lin2 = Linear(c_in, c_in)
def forward(self, x):
x1 = torch.tanh(self.lin1(x))
x2 = torch.tanh(self.lin2(x))
adj = self.nconv(x1.transpose(2, 1), x2)
adj0 = torch.softmax(adj, dim=2)
adj1 = torch.softmax(adj.transpose(2, 1), dim=2)
h = x
out = [h]
for i in range(self.gdep):
h = self.alpha*x + (1-self.alpha)*self.nconv(h, adj0)
out.append(h)
ho = torch.cat(out, dim=1)
ho1 = self.mlp1(ho)
h = x
out = [h]
for i in range(self.gdep):
h = self.alpha * x + (1 - self.alpha) * self.nconv(h, adj1)
out.append(h)
ho = torch.cat(out, dim=1)
ho2 = self.mlp2(ho)
return ho1+ho2
class Dilated1D(nn.Module):
def __init__(self, cin, cout, dilation_factor=2):
super(Dilated1D, self).__init__()
self.tconv = nn.ModuleList()
self.kernel_set = [2, 3, 6, 7]
self.tconv = nn.Conv2d(cin, cout, (1, 7), dilation=(1, dilation_factor))
def forward(self, inputs):
x = self.tconv(inputs)
return x
class DilatedInception(nn.Module):
def __init__(self, cin, cout, dilation_factor=2):
super(DilatedInception, self).__init__()
self.tconv = nn.ModuleList()
self.kernel_set = [2, 3, 6, 7]
cout = int(cout/len(self.kernel_set))
for kern in self.kernel_set:
self.tconv.append(nn.Conv2d(cin, cout, (1, kern), dilation=(1, dilation_factor)))
def forward(self, input):
x = []
for i in range(len(self.kernel_set)):
x.append(self.tconv[i](input))
for i in range(len(self.kernel_set)):
x[i] = x[i][..., -x[-1].size(3):]
x = torch.cat(x, dim=1)
return x
class GraphConstructor(nn.Module):
def __init__(self, nnodes, k, dim, device, alpha=3, static_feat=None):
super(GraphConstructor, self).__init__()
self.nnodes = nnodes
if static_feat is not None:
xd = static_feat.shape[1]
self.lin1 = nn.Linear(xd, dim)
self.lin2 = nn.Linear(xd, dim)
else:
self.emb1 = nn.Embedding(nnodes, dim)
self.emb2 = nn.Embedding(nnodes, dim)
self.lin1 = nn.Linear(dim, dim)
self.lin2 = nn.Linear(dim, dim)
self.device = device
self.k = k
self.dim = dim
self.alpha = alpha
self.static_feat = static_feat
def forward(self, idx):
if self.static_feat is None:
nodevec1 = self.emb1(idx)
nodevec2 = self.emb2(idx)
else:
nodevec1 = self.static_feat[idx, :]
nodevec2 = nodevec1
nodevec1 = torch.tanh(self.alpha*self.lin1(nodevec1))
nodevec2 = torch.tanh(self.alpha*self.lin2(nodevec2))
a = torch.mm(nodevec1, nodevec2.transpose(1, 0))-torch.mm(nodevec2, nodevec1.transpose(1, 0))
adj = F.relu(torch.tanh(self.alpha*a))
mask = torch.zeros(idx.size(0), idx.size(0)).to(self.device)
mask.fill_(float('0'))
s1, t1 = adj.topk(self.k, 1)
mask.scatter_(1, t1, s1.fill_(1))
adj = adj*mask
return adj
def fulla(self, idx):
if self.static_feat is None:
nodevec1 = self.emb1(idx)
nodevec2 = self.emb2(idx)
else:
nodevec1 = self.static_feat[idx, :]
nodevec2 = nodevec1
nodevec1 = torch.tanh(self.alpha*self.lin1(nodevec1))
nodevec2 = torch.tanh(self.alpha*self.lin2(nodevec2))
a = torch.mm(nodevec1, nodevec2.transpose(1, 0))-torch.mm(nodevec2, nodevec1.transpose(1, 0))
adj = F.relu(torch.tanh(self.alpha*a))
return adj
class GraphGlobal(nn.Module):
def __init__(self, nnodes, k, dim, device, alpha=3, static_feat=None):
super(GraphGlobal, self).__init__()
self.nnodes = nnodes
self.A = nn.Parameter(torch.randn(nnodes, nnodes).to(device), requires_grad=True).to(device)
def forward(self, idx):
return F.relu(self.A)
class GraphUndirected(nn.Module):
def __init__(self, nnodes, k, dim, device, alpha=3, static_feat=None):
super(GraphUndirected, self).__init__()
self.nnodes = nnodes
if static_feat is not None:
xd = static_feat.shape[1]
self.lin1 = nn.Linear(xd, dim)
else:
self.emb1 = nn.Embedding(nnodes, dim)
self.lin1 = nn.Linear(dim, dim)
self.device = device
self.k = k
self.dim = dim
self.alpha = alpha
self.static_feat = static_feat
def forward(self, idx):
if self.static_feat is None:
nodevec1 = self.emb1(idx)
nodevec2 = self.emb1(idx)
else:
nodevec1 = self.static_feat[idx, :]
nodevec2 = nodevec1
nodevec1 = torch.tanh(self.alpha*self.lin1(nodevec1))
nodevec2 = torch.tanh(self.alpha*self.lin1(nodevec2))
a = torch.mm(nodevec1, nodevec2.transpose(1, 0))
adj = F.relu(torch.tanh(self.alpha*a))
mask = torch.zeros(idx.size(0), idx.size(0)).to(self.device)
mask.fill_(float('0'))
s1, t1 = adj.topk(self.k, 1)
mask.scatter_(1, t1, s1.fill_(1))
adj = adj*mask
return adj
class GraphDirected(nn.Module):
def __init__(self, nnodes, k, dim, device, alpha=3, static_feat=None):
super(GraphDirected, self).__init__()
self.nnodes = nnodes
if static_feat is not None:
xd = static_feat.shape[1]
self.lin1 = nn.Linear(xd, dim)
self.lin2 = nn.Linear(xd, dim)
else:
self.emb1 = nn.Embedding(nnodes, dim)
self.emb2 = nn.Embedding(nnodes, dim)
self.lin1 = nn.Linear(dim, dim)
self.lin2 = nn.Linear(dim, dim)
self.device = device
self.k = k
self.dim = dim
self.alpha = alpha
self.static_feat = static_feat
def forward(self, idx):
if self.static_feat is None:
nodevec1 = self.emb1(idx)
nodevec2 = self.emb2(idx)
else:
nodevec1 = self.static_feat[idx, :]
nodevec2 = nodevec1
nodevec1 = torch.tanh(self.alpha*self.lin1(nodevec1))
nodevec2 = torch.tanh(self.alpha*self.lin2(nodevec2))
a = torch.mm(nodevec1, nodevec2.transpose(1, 0))
adj = F.relu(torch.tanh(self.alpha*a))
mask = torch.zeros(idx.size(0), idx.size(0)).to(self.device)
mask.fill_(float('0'))
s1, t1 = adj.topk(self.k, 1)
mask.scatter_(1, t1, s1.fill_(1))
adj = adj*mask
return adj
class LayerNorm(nn.Module):
__constants__ = ['normalized_shape', 'weight', 'bias', 'eps', 'elementwise_affine']
def __init__(self, normalized_shape, eps=1e-5, elementwise_affine=True):
super(LayerNorm, self).__init__()
if isinstance(normalized_shape, numbers.Integral):
normalized_shape = (normalized_shape,)
self.normalized_shape = tuple(normalized_shape)
self.eps = eps
self.elementwise_affine = elementwise_affine
if self.elementwise_affine:
self.weight = nn.Parameter(torch.Tensor(*normalized_shape))
self.bias = nn.Parameter(torch.Tensor(*normalized_shape))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
if self.elementwise_affine:
init.ones_(self.weight)
init.zeros_(self.bias)
def forward(self, inputs, idx):
if self.elementwise_affine:
return F.layer_norm(inputs, tuple(inputs.shape[1:]),
self.weight[:, idx, :], self.bias[:, idx, :], self.eps)
else:
return F.layer_norm(inputs, tuple(inputs.shape[1:]),
self.weight, self.bias, self.eps)
def extra_repr(self):
return '{normalized_shape}, eps={eps}, ' \
'elementwise_affine={elementwise_affine}'.format(**self.__dict__)
class MTGNN(AbstractTrafficStateModel):
def __init__(self, config, data_feature):
super().__init__(config, data_feature)
self.adj_mx = self.data_feature.get('adj_mx')
self.num_nodes = self.data_feature.get('num_nodes', 1)
self.feature_dim = self.data_feature.get('feature_dim', 1)
self.num_batches = self.data_feature.get('num_batches', 1)
self._logger = getLogger()
self._scaler = self.data_feature.get('scaler')
self.input_window = config.get('input_window', 1)
self.output_window = config.get('output_window', 1)
self.output_dim = config.get('output_dim', 1)
self.device = config.get('device', torch.device('cpu'))
self.gcn_true = config.get('gcn_true', True)
self.buildA_true = config.get('buildA_true', True)
self.gcn_depth = config.get('gcn_depth', 2)
self.dropout = config.get('dropout', 0.3)
self.subgraph_size = config.get('subgraph_size', 20)
self.node_dim = config.get('node_dim', 40)
self.dilation_exponential = config.get('dilation_exponential', 1)
self.conv_channels = config.get('conv_channels', 32)
self.residual_channels = config.get('residual_channels', 32)
self.skip_channels = config.get('skip_channels', 64)
self.end_channels = config.get('end_channels', 128)
self.layers = config.get('layers', 3)
self.propalpha = config.get('propalpha', 0.05)
self.tanhalpha = config.get('tanhalpha', 3)
self.layer_norm_affline = config.get('layer_norm_affline', True)
self.use_curriculum_learning = config.get('use_curriculum_learning', False)
self.step_size = config.get('step_size1', 2500)
self.max_epoch = config.get('max_epoch', 100)
if self.max_epoch * self.num_batches < self.step_size * self.output_window:
self._logger.warning('Parameter `step_size1` is too big with {} epochs and '
'the model cannot be trained for all time steps.'.format(self.max_epoch))
self.task_level = config.get('task_level', 0)
self.idx = torch.arange(self.num_nodes).to(self.device)
self.predefined_A = torch.tensor(self.adj_mx) - torch.eye(self.num_nodes)
self.predefined_A = self.predefined_A.to(self.device)
self.static_feat = None
self.filter_convs = nn.ModuleList()
self.gate_convs = nn.ModuleList()
self.residual_convs = nn.ModuleList()
self.skip_convs = nn.ModuleList()
self.gconv1 = nn.ModuleList()
self.gconv2 = nn.ModuleList()
self.norm = nn.ModuleList()
self.start_conv = nn.Conv2d(in_channels=self.feature_dim,
out_channels=self.residual_channels,
kernel_size=(1, 1))
self.gc = GraphConstructor(self.num_nodes, self.subgraph_size, self.node_dim,
self.device, alpha=self.tanhalpha, static_feat=self.static_feat)
kernel_size = 7
if self.dilation_exponential > 1:
self.receptive_field = int(self.output_dim + (kernel_size-1) * (self.dilation_exponential**self.layers-1)
/ (self.dilation_exponential - 1))
else:
self.receptive_field = self.layers * (kernel_size-1) + self.output_dim
for i in range(1):
if self.dilation_exponential > 1:
rf_size_i = int(1 + i * (kernel_size-1) * (self.dilation_exponential**self.layers-1)
/ (self.dilation_exponential - 1))
else:
rf_size_i = i * self.layers * (kernel_size - 1) + 1
new_dilation = 1
for j in range(1, self.layers+1):
if self.dilation_exponential > 1:
rf_size_j = int(rf_size_i + (kernel_size-1) * (self.dilation_exponential**j - 1)
/ (self.dilation_exponential - 1))
else:
rf_size_j = rf_size_i+j*(kernel_size-1)
self.filter_convs.append(DilatedInception(self.residual_channels,
self.conv_channels, dilation_factor=new_dilation))
self.gate_convs.append(DilatedInception(self.residual_channels,
self.conv_channels, dilation_factor=new_dilation))
self.residual_convs.append(nn.Conv2d(in_channels=self.conv_channels,
out_channels=self.residual_channels, kernel_size=(1, 1)))
if self.input_window > self.receptive_field:
self.skip_convs.append(nn.Conv2d(in_channels=self.conv_channels, out_channels=self.skip_channels,
kernel_size=(1, self.input_window-rf_size_j+1)))
else:
self.skip_convs.append(nn.Conv2d(in_channels=self.conv_channels, out_channels=self.skip_channels,
kernel_size=(1, self.receptive_field-rf_size_j+1)))
if self.gcn_true:
self.gconv1.append(MixProp(self.conv_channels, self.residual_channels,
self.gcn_depth, self.dropout, self.propalpha))
self.gconv2.append(MixProp(self.conv_channels, self.residual_channels,
self.gcn_depth, self.dropout, self.propalpha))
if self.input_window > self.receptive_field:
self.norm.append(LayerNorm((self.residual_channels, self.num_nodes,
self.input_window - rf_size_j + 1),
elementwise_affine=self.layer_norm_affline))
else:
self.norm.append(LayerNorm((self.residual_channels, self.num_nodes,
self.receptive_field - rf_size_j + 1),
elementwise_affine=self.layer_norm_affline))
new_dilation *= self.dilation_exponential
self.end_conv_1 = nn.Conv2d(in_channels=self.skip_channels,
out_channels=self.end_channels, kernel_size=(1, 1), bias=True)
self.end_conv_2 = nn.Conv2d(in_channels=self.end_channels,
out_channels=self.output_window, kernel_size=(1, 1), bias=True)
if self.input_window > self.receptive_field:
self.skip0 = nn.Conv2d(in_channels=self.feature_dim,
out_channels=self.skip_channels,
kernel_size=(1, self.input_window), bias=True)
self.skipE = nn.Conv2d(in_channels=self.residual_channels,
out_channels=self.skip_channels,
kernel_size=(1, self.input_window-self.receptive_field+1), bias=True)
else:
self.skip0 = nn.Conv2d(in_channels=self.feature_dim,
out_channels=self.skip_channels, kernel_size=(1, self.receptive_field), bias=True)
self.skipE = nn.Conv2d(in_channels=self.residual_channels,
out_channels=self.skip_channels, kernel_size=(1, 1), bias=True)
self._logger.info('receptive_field: ' + str(self.receptive_field))
def forward(self, batch, idx=None):
inputs = batch['X'] # (batch_size, input_window, num_nodes, feature_dim)
inputs = inputs.transpose(1, 3) # (batch_size, feature_dim, num_nodes, input_window)
assert inputs.size(3) == self.input_window, 'input sequence length not equal to preset sequence length'
if self.input_window < self.receptive_field:
inputs = nn.functional.pad(inputs, (self.receptive_field-self.input_window, 0, 0, 0))
if self.gcn_true:
if self.buildA_true:
if idx is None:
adp = self.gc(self.idx)
else:
adp = self.gc(idx)
else:
adp = self.predefined_A
x = self.start_conv(inputs)
skip = self.skip0(F.dropout(inputs, self.dropout, training=self.training))
for i in range(self.layers):
residual = x
filters = self.filter_convs[i](x)
filters = torch.tanh(filters)
gate = self.gate_convs[i](x)
gate = torch.sigmoid(gate)
x = filters * gate
x = F.dropout(x, self.dropout, training=self.training)
s = x
s = self.skip_convs[i](s)
skip = s + skip
if self.gcn_true:
x = self.gconv1[i](x, adp)+self.gconv2[i](x, adp.transpose(1, 0))
else:
x = self.residual_convs[i](x)
x = x + residual[:, :, :, -x.size(3):]
if idx is None:
x = self.norm[i](x, self.idx)
else:
x = self.norm[i](x, idx)
skip = self.skipE(x) + skip
x = F.relu(skip)
x = F.relu(self.end_conv_1(x))
x = self.end_conv_2(x)
return x
def calculate_loss(self, batch, idx=None, batches_seen=None):
if idx is not None:
idx = torch.tensor(idx).to(self.device)
tx = batch['X'][:, :, idx, :].clone() # 避免batch[X]被修改 下一次idx索引就不对了
y_true = batch['y'][:, :, idx, :]
batch_new = {'X': tx}
y_predicted = self.predict(batch_new, idx)
else:
y_true = batch['y']
y_predicted = self.predict(batch)
# print('y_true', y_true.shape)
# print('y_predicted', y_predicted.shape)
y_true = self._scaler.inverse_transform(y_true[..., :self.output_dim])
y_predicted = self._scaler.inverse_transform(y_predicted[..., :self.output_dim])
if self.training:
if batches_seen % self.step_size == 0 and self.task_level < self.output_window:
self.task_level += 1
self._logger.info('Training: task_level increase from {} to {}'.format(
self.task_level-1, self.task_level))
self._logger.info('Current batches_seen is {}'.format(batches_seen))
if self.use_curriculum_learning:
return loss.masked_mae_torch(y_predicted[:, :self.task_level, :, :],
y_true[:, :self.task_level, :, :], 0)
else:
return loss.masked_mae_torch(y_predicted, y_true, 0)
else:
return loss.masked_mae_torch(y_predicted, y_true, 0)
def predict(self, batch, idx=None):
return self.forward(batch, idx) | 0.934567 | 0.318538 |
import cv2
import sqlite3
import numpy as np
import os
import threading
import time
import PIL.Image
import PIL.ExifTags
import datetime
from shutil import copyfile
import subprocess
from upload_video import upload_video
imagePath = '/timelapse/'
hdrPath = '/timelapse/hdr/'
weekTemp = '/timelapse/tmp/week/'
monthTemp = '/timelapse/tmp/month/'
everythingTemp = '/timelapse/tmp/everything/'
weekVid = '/timelapse/video/week/'
monthVid = '/timelapse/video/month/'
everythingVid = '/timelapse/video/everything/'
vccDb = '/home/timelapse/VCC-Timelapse/vccTimelapse.db'
running = True
evs = ['_ev_-10','_ev_-5','','_ev_5','_ev_10']
ffmpegBegin ="ffmpeg -y -r 60 -i \""
ffmpegEnd = "image%08d.jpg\" -format rgb32 -s 2874x2160 -vcodec libx264 "
ffmpegWeek = ffmpegBegin+weekTemp+ffmpegEnd
ffmpegMonth = ffmpegBegin+monthTemp+ffmpegEnd
ffmpegEverything = ffmpegBegin+everythingTemp+ffmpegEnd
day0 = datetime.date(2018,3,8)
def firstGenDb():
conn = sqlite3.connect(vccDb)
c = conn.cursor()
c.execute('''CREATE TABLE images (year integer, month integer,
day integer, hours integer, minutes integer,week integer,weekday integer,dayRec integer)''')
c.execute('''CREATE TABLE video (youtube text, duration text,
year integer, month integer,
day integer,week integer)''')
conn.commit()
conn.close()
def pather(path,expId):
if not os.path.exists(path):
os.makedirs(path)
path = path + expId+ '/'
if not os.path.exists(path):
os.makedirs(path)
return path
def fileNamer(year,month,day,hours,minutes):
year = str(year)
month = str(month).zfill(2)
day = str(day).zfill(2)
hours = str(hours).zfill(2)
minutes = str(minutes).zfill(2)
return hdrPath+year+'-'+month+'-'+day+'_'+hours+minutes+'.jpg'
def dbFiller(today = False,tSleep = 7*60*60*24):
print( ' -- Image Cropper Started -- ')
while running:
files = os.listdir(imagePath)
fileDate = []
for file in files:
fileDate.append(file[0:15])
fileDate = np.unique(fileDate)
for date in fileDate:
if len(date) == 15 and date[0] == '2':
year = date[0:4]
month = date[5:7]
day = date[8:10]
todayDate = datetime.date.today()
day1 = datetime.date(int(year),int(month),int(day))
if (day1 == todayDate and today) or (day1!=todayDate and not today):
hours = date[11:13]
minutes = date[13:15]
conn = sqlite3.connect(vccDb)
c = conn.cursor()
c.execute("Select * from images where year = ? and month = ? and day = ? and hours = ? and minutes = ?",(int(year),int(month),int(day),int(hours),int(minutes),))
F = c.fetchall()
if len(F) == 0:
images = []
times = []
for ev in evs:
imName = imagePath+year+'-'+month+'-'+day+'_'+hours+minutes+ev+'.jpg'
image = cv2.imread(imName)
if image is not None and np.sum(image)>2500000000 :
img = PIL.Image.open(imName)
exif = {
PIL.ExifTags.TAGS[k]: v
for k, v in img._getexif().items()
if k in PIL.ExifTags.TAGS
}
images.append(image)
times.append(exif['ExposureTime'][0]/exif['ExposureTime'][1])
if len(images)>0:
times = np.array(times).astype(np.float32)
alignMTB = cv2.createAlignMTB()
alignMTB.process(images, images)
calibrateDebevec = cv2.createCalibrateDebevec()
responseDebevec = calibrateDebevec.process(images,times)
# Merge images into an HDR linear image
mergeDebevec = cv2.createMergeDebevec()
hdrDebevec = mergeDebevec.process(images, times, responseDebevec)
tonemap1 = cv2.createTonemapDurand(gamma=2.2)
res_debevec = tonemap1.process(hdrDebevec.copy())
# Save HDR image.
res_debevec_8bit = np.clip(res_debevec*255, 0, 255).astype('uint8')
final_image = cv2.resize(res_debevec_8bit,None,fx=2874.0/3280.0,fy=2160.0/2464.0)
cv2.imwrite(fileNamer(year,month,day,hours,minutes), final_image)
iYear,week,weekday = datetime.date(int(year),int(month),int(day)).isocalendar()
dayRec = (day1-day0).days
week = np.floor((day1-day0).days/7.0).astype(int)
values = [year,month,day,hours,minutes,int(week),weekday,dayRec]
c.execute("INSERT INTO images VALUES (?,?,?,?,?,?,?,?)",values)
print(year+' '+month+' '+day+' '+hours+':'+minutes + ' week : '+str(week) + ' day : '+str(weekday) +' dayRec : '+str(dayRec))
conn.commit()
conn.close()
time.sleep(tSleep)
print(fileDate)
def weeklyVideo():
print( ' -- Weekly Video Started -- ')
while running:
currentWeek = np.floor((datetime.date.today()-day0).days/7.0).astype(int)
conn = sqlite3.connect(vccDb)
c = conn.cursor()
c.execute("Select week from images")
F = c.fetchall()
weeks = np.unique(F)
for week in weeks:
c.execute("Select * from video where week = ? and duration = ?",(int(week),'week'))
F = c.fetchall()
if len(F) == 0 and week<currentWeek:
step = 0
for f in os.listdir(weekTemp):
os.remove(os.path.join(weekTemp, f))
c.execute("Select dayRec from images where week = ?",(int(week),))
F = c.fetchall()
days = np.sort(np.unique(F))
for day in days:
c.execute("Select hours from images where dayRec = ?",(int(day),))
F = c.fetchall()
hours = np.sort(np.unique(F))
c.execute("Select year,month,day from images where dayRec = ?",(int(day),))
year,month,dayPic = c.fetchall()[0]
for hour in hours:
c.execute("Select minutes from images where dayRec = ? and hours = ?",(int(day),int(hour)))
F = c.fetchall()
minutes = np.sort(np.unique(F))
for minute in minutes:
path = fileNamer(year,month,dayPic,hour,minute)
copyfile(path, weekTemp + 'image'+str(step).zfill(8)+'.jpg')
step = step+1
videoName = 'week'+str(week).zfill(5)+'.mp4'
videoLine = ffmpegWeek + weekTemp+videoName
print(videoLine)
subprocess.call(videoLine,shell=True)
copyfile(weekTemp+videoName,pather(weekVid,str(week).zfill(5))+videoName)
print(weekTemp+videoName)
videoId = upload_video(weekTemp+videoName,title = "Week "+str(week))
videoId =''
values = [videoId,"week",year,month,int(day),int(week)]
c.execute("INSERT INTO video VALUES (?,?,?,?,?,?)",values)
conn.commit()
for f in os.listdir(weekTemp):
os.remove(os.path.join(weekTemp, f))
conn.close()
tSleep = 25-datetime.datetime.now().hour
print('sleeping for '+str(tSleep)+' hours')
try:
os.remove(weekTemp+'*.jpg')
os.remove(weekTemp+'*.mp4')
except:
pass
time.sleep(3600*tSleep)
def monthlyVideo():
print( ' -- Monthly Video Started -- ')
stepMonth = 2
while running:
currentMonth = datetime.date.today().month
currentYear = datetime.date.today().year
conn = sqlite3.connect(vccDb)
c = conn.cursor()
c.execute("Select year,month from images")
F = c.fetchall()
months = list(set(F))
print(months)
for month in months:
c.execute("Select * from video where year = ? and month = ? and duration = ?",(int(month[0]),int(month[1]),'month'))
F = c.fetchall()
if len(F) == 0 and month != (currentYear,currentMonth):
step = 0
image = 0
for f in os.listdir(monthTemp):
os.remove(os.path.join(monthTemp, f))
c.execute("Select dayRec from images where year = ? and month = ?",month)
F = c.fetchall()
days = np.sort(np.unique(F))
for day in days:
c.execute("Select hours from images where dayRec = ?",(int(day),))
F = c.fetchall()
hours = np.sort(np.unique(F))
c.execute("Select day from images where dayRec = ?",(int(day),))
dayPic = c.fetchall()[0][0]
for hour in hours:
c.execute("Select minutes from images where dayRec = ? and hours = ?",(int(day),int(hour)))
F = c.fetchall()
minutes = np.sort(np.unique(F))
for minute in minutes:
if image%stepMonth == 0:
path = fileNamer(int(month[0]),int(month[1]),dayPic,hour,minute)
copyfile(path, monthTemp + 'image'+str(step).zfill(8)+'.jpg')
step = step+1
image=image+1
videoName = 'month_'+str(month[0])+'_'+str(month[1]).zfill(2)+'.mp4'
videoLine = ffmpegMonth + monthTemp+videoName
print(videoLine)
subprocess.call(videoLine,shell=True)
copyfile(monthTemp+videoName,pather(monthVid,str(month[0])+'_'+str(month[1]).zfill(2))+videoName)
videoId = upload_video(monthTemp+videoName,title = "Month "+str(month))
values = [videoId,"month",int(month[0]),int(month[1]),day,0]
c.execute("INSERT INTO video VALUES (?,?,?,?,?,?)",values)
conn.commit()
for f in os.listdir(monthTemp):
os.remove(os.path.join(monthTemp, f))
conn.close()
try:
os.remove(monthTemp+'*.jpg')
os.remove(monthTemp+'*.mp4')
except:
pass
tSleep = 26-datetime.datetime.now().hour
time.sleep(3600*tSleep)
def everythingVideo():
#tSleep = 27-dt.datetime.now().hour
#print('sleeping for '+str(tSleep)+' hours')
print( ' -- Everything Video Started -- ')
while running:
image = 0
step = 0
todayDate = datetime.date.today()
dayRecToday = (todayDate-day0).days
conn = sqlite3.connect(vccDb)
c = conn.cursor()
c.execute("Select dayRec from images")
F = c.fetchall()
days = np.sort(np.unique(F))
for f in os.listdir(everythingTemp):
os.remove(os.path.join(everythingTemp, f))
stepEverything = int(np.ceil(len(days)/30.0))
for day in days:
if day !=dayRecToday:
c.execute("Select hours from images where dayRec = ?",(int(day),))
F = c.fetchall()
hours = np.sort(np.unique(F))
c.execute("Select year,month,day from images where dayRec = ?",(int(day),))
year,month,dayPic = c.fetchall()[0]
for hour in hours:
c.execute("Select minutes from images where dayRec = ? and hours = ?",(int(day),int(hour)))
F = c.fetchall()
minutes = np.sort(np.unique(F))
for minute in minutes:
if image%stepEverything == 0:
path = fileNamer(year,month,dayPic,hour,minute)
copyfile(path, everythingTemp + 'image'+str(step).zfill(8)+'.jpg')
step = step+1
image=image+1
videoName = 'everything_'+str(todayDate.year)+'-'+str(todayDate.month).zfill(4)+'-'+str(todayDate.day).zfill(4)+'.mp4'
videoLine = ffmpegEverything + everythingTemp+videoName
print(videoLine)
subprocess.call(videoLine,shell = True)
copyfile(everythingTemp+videoName,pather(everythingVid,str(todayDate.year)+'-'+str(todayDate.month).zfill(4)+'-'+str(todayDate.day).zfill(4))+videoName)
videoId = upload_video(everythingTemp+videoName,title = "Everything up to "+str(todayDate))
values = [videoId,"everything",todayDate.year,todayDate.month,todayDate.day,0]
c.execute("INSERT INTO video VALUES (?,?,?,?,?,?)",values)
conn.commit()
conn.close()
for f in os.listdir(everythingTemp):
os.remove(os.path.join(everythingTemp, f))
tSleep = 27-datetime.datetime.now().hour+2*24
print('sleeping for '+str(tSleep)+' hours')
time.sleep(tSleep*3600)
def main():
if not os.path.isfile(vccDb):
firstGenDb()
checkFilesThread = threading.Thread(target=dbFiller,args = (False,7*24*60*60))
checkFilesThread.daemon = True
checkFilesThread.start()
checkFilesThread = threading.Thread(target=dbFiller,args = (True,5*60))
checkFilesThread.daemon = True
checkFilesThread.start()
weekThread = threading.Thread(target=weeklyVideo)
weekThread.daemon = True
weekThread.start()
monthThread = threading.Thread(target=monthlyVideo)
monthThread.daemon = True
monthThread.start()
everythingThread = threading.Thread(target=everythingVideo)
everythingThread.daemon = True
everythingThread.start()
t0 =time.time()
while running:
time.sleep(60*60)
print(' -----> Making Timelapses since '+str(int((time.time()-t0)/3600)) +' hours')
if __name__ == '__main__':
main() | timeLapser.py | import cv2
import sqlite3
import numpy as np
import os
import threading
import time
import PIL.Image
import PIL.ExifTags
import datetime
from shutil import copyfile
import subprocess
from upload_video import upload_video
imagePath = '/timelapse/'
hdrPath = '/timelapse/hdr/'
weekTemp = '/timelapse/tmp/week/'
monthTemp = '/timelapse/tmp/month/'
everythingTemp = '/timelapse/tmp/everything/'
weekVid = '/timelapse/video/week/'
monthVid = '/timelapse/video/month/'
everythingVid = '/timelapse/video/everything/'
vccDb = '/home/timelapse/VCC-Timelapse/vccTimelapse.db'
running = True
evs = ['_ev_-10','_ev_-5','','_ev_5','_ev_10']
ffmpegBegin ="ffmpeg -y -r 60 -i \""
ffmpegEnd = "image%08d.jpg\" -format rgb32 -s 2874x2160 -vcodec libx264 "
ffmpegWeek = ffmpegBegin+weekTemp+ffmpegEnd
ffmpegMonth = ffmpegBegin+monthTemp+ffmpegEnd
ffmpegEverything = ffmpegBegin+everythingTemp+ffmpegEnd
day0 = datetime.date(2018,3,8)
def firstGenDb():
conn = sqlite3.connect(vccDb)
c = conn.cursor()
c.execute('''CREATE TABLE images (year integer, month integer,
day integer, hours integer, minutes integer,week integer,weekday integer,dayRec integer)''')
c.execute('''CREATE TABLE video (youtube text, duration text,
year integer, month integer,
day integer,week integer)''')
conn.commit()
conn.close()
def pather(path,expId):
if not os.path.exists(path):
os.makedirs(path)
path = path + expId+ '/'
if not os.path.exists(path):
os.makedirs(path)
return path
def fileNamer(year,month,day,hours,minutes):
year = str(year)
month = str(month).zfill(2)
day = str(day).zfill(2)
hours = str(hours).zfill(2)
minutes = str(minutes).zfill(2)
return hdrPath+year+'-'+month+'-'+day+'_'+hours+minutes+'.jpg'
def dbFiller(today = False,tSleep = 7*60*60*24):
print( ' -- Image Cropper Started -- ')
while running:
files = os.listdir(imagePath)
fileDate = []
for file in files:
fileDate.append(file[0:15])
fileDate = np.unique(fileDate)
for date in fileDate:
if len(date) == 15 and date[0] == '2':
year = date[0:4]
month = date[5:7]
day = date[8:10]
todayDate = datetime.date.today()
day1 = datetime.date(int(year),int(month),int(day))
if (day1 == todayDate and today) or (day1!=todayDate and not today):
hours = date[11:13]
minutes = date[13:15]
conn = sqlite3.connect(vccDb)
c = conn.cursor()
c.execute("Select * from images where year = ? and month = ? and day = ? and hours = ? and minutes = ?",(int(year),int(month),int(day),int(hours),int(minutes),))
F = c.fetchall()
if len(F) == 0:
images = []
times = []
for ev in evs:
imName = imagePath+year+'-'+month+'-'+day+'_'+hours+minutes+ev+'.jpg'
image = cv2.imread(imName)
if image is not None and np.sum(image)>2500000000 :
img = PIL.Image.open(imName)
exif = {
PIL.ExifTags.TAGS[k]: v
for k, v in img._getexif().items()
if k in PIL.ExifTags.TAGS
}
images.append(image)
times.append(exif['ExposureTime'][0]/exif['ExposureTime'][1])
if len(images)>0:
times = np.array(times).astype(np.float32)
alignMTB = cv2.createAlignMTB()
alignMTB.process(images, images)
calibrateDebevec = cv2.createCalibrateDebevec()
responseDebevec = calibrateDebevec.process(images,times)
# Merge images into an HDR linear image
mergeDebevec = cv2.createMergeDebevec()
hdrDebevec = mergeDebevec.process(images, times, responseDebevec)
tonemap1 = cv2.createTonemapDurand(gamma=2.2)
res_debevec = tonemap1.process(hdrDebevec.copy())
# Save HDR image.
res_debevec_8bit = np.clip(res_debevec*255, 0, 255).astype('uint8')
final_image = cv2.resize(res_debevec_8bit,None,fx=2874.0/3280.0,fy=2160.0/2464.0)
cv2.imwrite(fileNamer(year,month,day,hours,minutes), final_image)
iYear,week,weekday = datetime.date(int(year),int(month),int(day)).isocalendar()
dayRec = (day1-day0).days
week = np.floor((day1-day0).days/7.0).astype(int)
values = [year,month,day,hours,minutes,int(week),weekday,dayRec]
c.execute("INSERT INTO images VALUES (?,?,?,?,?,?,?,?)",values)
print(year+' '+month+' '+day+' '+hours+':'+minutes + ' week : '+str(week) + ' day : '+str(weekday) +' dayRec : '+str(dayRec))
conn.commit()
conn.close()
time.sleep(tSleep)
print(fileDate)
def weeklyVideo():
print( ' -- Weekly Video Started -- ')
while running:
currentWeek = np.floor((datetime.date.today()-day0).days/7.0).astype(int)
conn = sqlite3.connect(vccDb)
c = conn.cursor()
c.execute("Select week from images")
F = c.fetchall()
weeks = np.unique(F)
for week in weeks:
c.execute("Select * from video where week = ? and duration = ?",(int(week),'week'))
F = c.fetchall()
if len(F) == 0 and week<currentWeek:
step = 0
for f in os.listdir(weekTemp):
os.remove(os.path.join(weekTemp, f))
c.execute("Select dayRec from images where week = ?",(int(week),))
F = c.fetchall()
days = np.sort(np.unique(F))
for day in days:
c.execute("Select hours from images where dayRec = ?",(int(day),))
F = c.fetchall()
hours = np.sort(np.unique(F))
c.execute("Select year,month,day from images where dayRec = ?",(int(day),))
year,month,dayPic = c.fetchall()[0]
for hour in hours:
c.execute("Select minutes from images where dayRec = ? and hours = ?",(int(day),int(hour)))
F = c.fetchall()
minutes = np.sort(np.unique(F))
for minute in minutes:
path = fileNamer(year,month,dayPic,hour,minute)
copyfile(path, weekTemp + 'image'+str(step).zfill(8)+'.jpg')
step = step+1
videoName = 'week'+str(week).zfill(5)+'.mp4'
videoLine = ffmpegWeek + weekTemp+videoName
print(videoLine)
subprocess.call(videoLine,shell=True)
copyfile(weekTemp+videoName,pather(weekVid,str(week).zfill(5))+videoName)
print(weekTemp+videoName)
videoId = upload_video(weekTemp+videoName,title = "Week "+str(week))
videoId =''
values = [videoId,"week",year,month,int(day),int(week)]
c.execute("INSERT INTO video VALUES (?,?,?,?,?,?)",values)
conn.commit()
for f in os.listdir(weekTemp):
os.remove(os.path.join(weekTemp, f))
conn.close()
tSleep = 25-datetime.datetime.now().hour
print('sleeping for '+str(tSleep)+' hours')
try:
os.remove(weekTemp+'*.jpg')
os.remove(weekTemp+'*.mp4')
except:
pass
time.sleep(3600*tSleep)
def monthlyVideo():
print( ' -- Monthly Video Started -- ')
stepMonth = 2
while running:
currentMonth = datetime.date.today().month
currentYear = datetime.date.today().year
conn = sqlite3.connect(vccDb)
c = conn.cursor()
c.execute("Select year,month from images")
F = c.fetchall()
months = list(set(F))
print(months)
for month in months:
c.execute("Select * from video where year = ? and month = ? and duration = ?",(int(month[0]),int(month[1]),'month'))
F = c.fetchall()
if len(F) == 0 and month != (currentYear,currentMonth):
step = 0
image = 0
for f in os.listdir(monthTemp):
os.remove(os.path.join(monthTemp, f))
c.execute("Select dayRec from images where year = ? and month = ?",month)
F = c.fetchall()
days = np.sort(np.unique(F))
for day in days:
c.execute("Select hours from images where dayRec = ?",(int(day),))
F = c.fetchall()
hours = np.sort(np.unique(F))
c.execute("Select day from images where dayRec = ?",(int(day),))
dayPic = c.fetchall()[0][0]
for hour in hours:
c.execute("Select minutes from images where dayRec = ? and hours = ?",(int(day),int(hour)))
F = c.fetchall()
minutes = np.sort(np.unique(F))
for minute in minutes:
if image%stepMonth == 0:
path = fileNamer(int(month[0]),int(month[1]),dayPic,hour,minute)
copyfile(path, monthTemp + 'image'+str(step).zfill(8)+'.jpg')
step = step+1
image=image+1
videoName = 'month_'+str(month[0])+'_'+str(month[1]).zfill(2)+'.mp4'
videoLine = ffmpegMonth + monthTemp+videoName
print(videoLine)
subprocess.call(videoLine,shell=True)
copyfile(monthTemp+videoName,pather(monthVid,str(month[0])+'_'+str(month[1]).zfill(2))+videoName)
videoId = upload_video(monthTemp+videoName,title = "Month "+str(month))
values = [videoId,"month",int(month[0]),int(month[1]),day,0]
c.execute("INSERT INTO video VALUES (?,?,?,?,?,?)",values)
conn.commit()
for f in os.listdir(monthTemp):
os.remove(os.path.join(monthTemp, f))
conn.close()
try:
os.remove(monthTemp+'*.jpg')
os.remove(monthTemp+'*.mp4')
except:
pass
tSleep = 26-datetime.datetime.now().hour
time.sleep(3600*tSleep)
def everythingVideo():
#tSleep = 27-dt.datetime.now().hour
#print('sleeping for '+str(tSleep)+' hours')
print( ' -- Everything Video Started -- ')
while running:
image = 0
step = 0
todayDate = datetime.date.today()
dayRecToday = (todayDate-day0).days
conn = sqlite3.connect(vccDb)
c = conn.cursor()
c.execute("Select dayRec from images")
F = c.fetchall()
days = np.sort(np.unique(F))
for f in os.listdir(everythingTemp):
os.remove(os.path.join(everythingTemp, f))
stepEverything = int(np.ceil(len(days)/30.0))
for day in days:
if day !=dayRecToday:
c.execute("Select hours from images where dayRec = ?",(int(day),))
F = c.fetchall()
hours = np.sort(np.unique(F))
c.execute("Select year,month,day from images where dayRec = ?",(int(day),))
year,month,dayPic = c.fetchall()[0]
for hour in hours:
c.execute("Select minutes from images where dayRec = ? and hours = ?",(int(day),int(hour)))
F = c.fetchall()
minutes = np.sort(np.unique(F))
for minute in minutes:
if image%stepEverything == 0:
path = fileNamer(year,month,dayPic,hour,minute)
copyfile(path, everythingTemp + 'image'+str(step).zfill(8)+'.jpg')
step = step+1
image=image+1
videoName = 'everything_'+str(todayDate.year)+'-'+str(todayDate.month).zfill(4)+'-'+str(todayDate.day).zfill(4)+'.mp4'
videoLine = ffmpegEverything + everythingTemp+videoName
print(videoLine)
subprocess.call(videoLine,shell = True)
copyfile(everythingTemp+videoName,pather(everythingVid,str(todayDate.year)+'-'+str(todayDate.month).zfill(4)+'-'+str(todayDate.day).zfill(4))+videoName)
videoId = upload_video(everythingTemp+videoName,title = "Everything up to "+str(todayDate))
values = [videoId,"everything",todayDate.year,todayDate.month,todayDate.day,0]
c.execute("INSERT INTO video VALUES (?,?,?,?,?,?)",values)
conn.commit()
conn.close()
for f in os.listdir(everythingTemp):
os.remove(os.path.join(everythingTemp, f))
tSleep = 27-datetime.datetime.now().hour+2*24
print('sleeping for '+str(tSleep)+' hours')
time.sleep(tSleep*3600)
def main():
if not os.path.isfile(vccDb):
firstGenDb()
checkFilesThread = threading.Thread(target=dbFiller,args = (False,7*24*60*60))
checkFilesThread.daemon = True
checkFilesThread.start()
checkFilesThread = threading.Thread(target=dbFiller,args = (True,5*60))
checkFilesThread.daemon = True
checkFilesThread.start()
weekThread = threading.Thread(target=weeklyVideo)
weekThread.daemon = True
weekThread.start()
monthThread = threading.Thread(target=monthlyVideo)
monthThread.daemon = True
monthThread.start()
everythingThread = threading.Thread(target=everythingVideo)
everythingThread.daemon = True
everythingThread.start()
t0 =time.time()
while running:
time.sleep(60*60)
print(' -----> Making Timelapses since '+str(int((time.time()-t0)/3600)) +' hours')
if __name__ == '__main__':
main() | 0.105326 | 0.087486 |
from __future__ import unicode_literals
from ...attrs import LIKE_NUM
_num_words = [
"zero",
"um",
"dois",
"três",
"tres",
"quatro",
"cinco",
"seis",
"sete",
"oito",
"nove",
"dez",
"onze",
"doze",
"dúzia",
"dúzias",
"duzia",
"duzias",
"treze",
"catorze",
"quinze",
"dezasseis",
"dezassete",
"dezoito",
"dezanove",
"vinte",
"trinta",
"quarenta",
"cinquenta",
"sessenta",
"setenta",
"oitenta",
"noventa",
"cem",
"cento",
"duzentos",
"trezentos",
"quatrocentos",
"quinhentos",
"seicentos",
"setecentos",
"oitocentos",
"novecentos",
"mil",
"milhão",
"milhao",
"milhões",
"milhoes",
"bilhão",
"bilhao",
"bilhões",
"bilhoes",
"trilhão",
"trilhao",
"trilhões",
"trilhoes",
"quadrilhão",
"quadrilhao",
"quadrilhões",
"quadrilhoes",
]
_ordinal_words = [
"primeiro",
"segundo",
"terceiro",
"quarto",
"quinto",
"sexto",
"sétimo",
"oitavo",
"nono",
"décimo",
"vigésimo",
"trigésimo",
"quadragésimo",
"quinquagésimo",
"sexagésimo",
"septuagésimo",
"octogésimo",
"nonagésimo",
"centésimo",
"ducentésimo",
"trecentésimo",
"quadringentésimo",
"quingentésimo",
"sexcentésimo",
"septingentésimo",
"octingentésimo",
"nongentésimo",
"milésimo",
"milionésimo",
"bilionésimo",
]
def like_num(text):
if text.startswith(("+", "-", "±", "~")):
text = text[1:]
text = text.replace(",", "").replace(".", "").replace("º", "").replace("ª", "")
if text.isdigit():
return True
if text.count("/") == 1:
num, denom = text.split("/")
if num.isdigit() and denom.isdigit():
return True
if text.lower() in _num_words:
return True
if text.lower() in _ordinal_words:
return True
return False
LEX_ATTRS = {LIKE_NUM: like_num} | spacy/lang/pt/lex_attrs.py | from __future__ import unicode_literals
from ...attrs import LIKE_NUM
_num_words = [
"zero",
"um",
"dois",
"três",
"tres",
"quatro",
"cinco",
"seis",
"sete",
"oito",
"nove",
"dez",
"onze",
"doze",
"dúzia",
"dúzias",
"duzia",
"duzias",
"treze",
"catorze",
"quinze",
"dezasseis",
"dezassete",
"dezoito",
"dezanove",
"vinte",
"trinta",
"quarenta",
"cinquenta",
"sessenta",
"setenta",
"oitenta",
"noventa",
"cem",
"cento",
"duzentos",
"trezentos",
"quatrocentos",
"quinhentos",
"seicentos",
"setecentos",
"oitocentos",
"novecentos",
"mil",
"milhão",
"milhao",
"milhões",
"milhoes",
"bilhão",
"bilhao",
"bilhões",
"bilhoes",
"trilhão",
"trilhao",
"trilhões",
"trilhoes",
"quadrilhão",
"quadrilhao",
"quadrilhões",
"quadrilhoes",
]
_ordinal_words = [
"primeiro",
"segundo",
"terceiro",
"quarto",
"quinto",
"sexto",
"sétimo",
"oitavo",
"nono",
"décimo",
"vigésimo",
"trigésimo",
"quadragésimo",
"quinquagésimo",
"sexagésimo",
"septuagésimo",
"octogésimo",
"nonagésimo",
"centésimo",
"ducentésimo",
"trecentésimo",
"quadringentésimo",
"quingentésimo",
"sexcentésimo",
"septingentésimo",
"octingentésimo",
"nongentésimo",
"milésimo",
"milionésimo",
"bilionésimo",
]
def like_num(text):
if text.startswith(("+", "-", "±", "~")):
text = text[1:]
text = text.replace(",", "").replace(".", "").replace("º", "").replace("ª", "")
if text.isdigit():
return True
if text.count("/") == 1:
num, denom = text.split("/")
if num.isdigit() and denom.isdigit():
return True
if text.lower() in _num_words:
return True
if text.lower() in _ordinal_words:
return True
return False
LEX_ATTRS = {LIKE_NUM: like_num} | 0.435061 | 0.339691 |
from litex.build.generic_platform import *
from litex.build.altera import AlteraPlatform
from litex.build.altera.programmer import USBBlaster
# IOs ----------------------------------------------------------------------------------------------
_io = [
# Clk
("clk50", 0, Pins("T2"), IOStandard("3.3-V LVTTL")),
# Button
("key", 0, Pins("Y13"), IOStandard("3.3-V LVTTL")),
("key", 1, Pins("W13"), IOStandard("3.3-V LVTTL")),
# SPIFlash (W25Q64)
("spiflash", 0,
# clk
Subsignal("cs_n", Pins("E2")),
Subsignal("clk", Pins("K2")),
Subsignal("mosi", Pins("D1")),
Subsignal("miso", Pins("E2")),
IOStandard("3.3-V LVTTL"),
),
# SDR SDRAM
("sdram_clock", 0, Pins("Y6"), IOStandard("3.3-V LVTTL")),
("sdram", 0,
Subsignal("a", Pins(
"V2 V1 U2 U1 V3 V4 Y2 AA1",
"Y3 V5 W1 Y4 V6")),
Subsignal("ba", Pins("Y1 W2")),
Subsignal("cs_n", Pins("AA3")),
Subsignal("cke", Pins("W6")),
Subsignal("ras_n", Pins("AB3")),
Subsignal("cas_n", Pins("AA4")),
Subsignal("we_n", Pins("AB4")),
Subsignal("dq", Pins(
"AA10 AB9 AA9 AB8 AA8 AB7 AA7 AB5",
"Y7 W8 Y8 V9 V10 Y10 W10 V11")),
Subsignal("dm", Pins("AA5 W7")),
IOStandard("3.3-V LVTTL")
),
]
# The connectors are named after the daughterboard, not the core board
# because on the different core boards the names vary, but on the
# daughterboard they stay the same, which we need to connect the
# daughterboard peripherals to the core board.
# On this board J2 is U7 and J3 is U8
_connectors = [
("J2", {
# odd row even row
7: "R1", 8: "R2",
9: "P1", 10: "P2",
11: "N1", 12: "N2",
13: "M1", 14: "M2",
15: "J1", 16: "J2",
17: "H1", 18: "H2",
19: "F1", 20: "F2",
21: "E1", 22: "D2",
23: "C1", 24: "C2",
25: "B1", 26: "B2",
27: "B3", 28: "A3",
29: "B4", 30: "A4",
31: "C4", 32: "C3",
33: "B5", 34: "A5",
35: "B6", 36: "A6",
37: "B7", 38: "A7",
39: "B8", 40: "A8",
41: "B9", 42: "A9",
43: "B10", 44: "A10",
45: "B13", 46: "A13",
47: "B14", 48: "A14",
49: "B15", 50: "A15",
51: "B16", 52: "A16",
53: "B17", 54: "A17",
55: "B18", 56: "A18",
57: "B19", 58: "A19",
59: "B20", 60: "A20",
}),
("J3", {
# odd row even row
7: "AA13", 8: "AB13",
9: "AA14", 10: "AB14",
11: "AA15", 12: "AB15",
13: "AA16", 14: "AB16",
15: "AA17", 16: "AB17",
17: "AA18", 18: "AB18",
19: "AA19", 20: "AB19",
21: "AA20", 22: "AB20",
23: "Y22", 24: "Y21",
25: "W22", 26: "W21",
27: "V22", 28: "V21",
29: "U22", 30: "U21",
31: "R22", 32: "R21",
33: "P22", 34: "P21",
35: "N22", 36: "N21",
37: "M22", 38: "M21",
39: "L22", 40: "L21",
41: "K22", 42: "K21",
43: "J22", 44: "J21",
45: "H22", 46: "H21",
47: "F22", 48: "F21",
49: "E22", 50: "E21",
51: "D22", 52: "D21",
53: "C22", 54: "C21",
55: "B22", 56: "B21",
57: "N20", 58: "N19",
59: "M20", 60: "M19",
})
]
# Platform -----------------------------------------------------------------------------------------
class Platform(AlteraPlatform):
default_clk_name = "clk50"
default_clk_period = 1e9/50e6
core_resources = [
("user_led", 0, Pins("E4"), IOStandard("3.3-V LVTTL")),
("serial", 0,
Subsignal("tx", Pins("J3:7"), IOStandard("3.3-V LVTTL")),
Subsignal("rx", Pins("J3:8"), IOStandard("3.3-V LVTTL"))
),
]
def __init__(self, variant="ep4ce15", toolchain="quartus", with_daughterboard=False):
device = {
"ep4ce15": "EP4CE15F23C8",
"ep4ce55": "EP4CE55F23C8"
}[variant]
io = _io
connectors = _connectors
if with_daughterboard:
from litex_boards.platforms.qmtech_daughterboard import QMTechDaughterboard
daughterboard = QMTechDaughterboard(IOStandard("3.3-V LVTTL"))
io += daughterboard.io
connectors += daughterboard.connectors
else:
io += self.core_resources
AlteraPlatform.__init__(self, device, io, connectors, toolchain=toolchain)
if with_daughterboard:
# an ethernet pin takes K22, so make it available
self.add_platform_command("set_global_assignment -name CYCLONEII_RESERVE_NCEO_AFTER_CONFIGURATION \"USE AS REGULAR IO\"")
def create_programmer(self):
return USBBlaster()
def do_finalize(self, fragment):
AlteraPlatform.do_finalize(self, fragment)
self.add_period_constraint(self.lookup_request("clk50", loose=True), 1e9/50e6) | litex_boards/platforms/qmtech_ep4cex5.py |
from litex.build.generic_platform import *
from litex.build.altera import AlteraPlatform
from litex.build.altera.programmer import USBBlaster
# IOs ----------------------------------------------------------------------------------------------
_io = [
# Clk
("clk50", 0, Pins("T2"), IOStandard("3.3-V LVTTL")),
# Button
("key", 0, Pins("Y13"), IOStandard("3.3-V LVTTL")),
("key", 1, Pins("W13"), IOStandard("3.3-V LVTTL")),
# SPIFlash (W25Q64)
("spiflash", 0,
# clk
Subsignal("cs_n", Pins("E2")),
Subsignal("clk", Pins("K2")),
Subsignal("mosi", Pins("D1")),
Subsignal("miso", Pins("E2")),
IOStandard("3.3-V LVTTL"),
),
# SDR SDRAM
("sdram_clock", 0, Pins("Y6"), IOStandard("3.3-V LVTTL")),
("sdram", 0,
Subsignal("a", Pins(
"V2 V1 U2 U1 V3 V4 Y2 AA1",
"Y3 V5 W1 Y4 V6")),
Subsignal("ba", Pins("Y1 W2")),
Subsignal("cs_n", Pins("AA3")),
Subsignal("cke", Pins("W6")),
Subsignal("ras_n", Pins("AB3")),
Subsignal("cas_n", Pins("AA4")),
Subsignal("we_n", Pins("AB4")),
Subsignal("dq", Pins(
"AA10 AB9 AA9 AB8 AA8 AB7 AA7 AB5",
"Y7 W8 Y8 V9 V10 Y10 W10 V11")),
Subsignal("dm", Pins("AA5 W7")),
IOStandard("3.3-V LVTTL")
),
]
# The connectors are named after the daughterboard, not the core board
# because on the different core boards the names vary, but on the
# daughterboard they stay the same, which we need to connect the
# daughterboard peripherals to the core board.
# On this board J2 is U7 and J3 is U8
_connectors = [
("J2", {
# odd row even row
7: "R1", 8: "R2",
9: "P1", 10: "P2",
11: "N1", 12: "N2",
13: "M1", 14: "M2",
15: "J1", 16: "J2",
17: "H1", 18: "H2",
19: "F1", 20: "F2",
21: "E1", 22: "D2",
23: "C1", 24: "C2",
25: "B1", 26: "B2",
27: "B3", 28: "A3",
29: "B4", 30: "A4",
31: "C4", 32: "C3",
33: "B5", 34: "A5",
35: "B6", 36: "A6",
37: "B7", 38: "A7",
39: "B8", 40: "A8",
41: "B9", 42: "A9",
43: "B10", 44: "A10",
45: "B13", 46: "A13",
47: "B14", 48: "A14",
49: "B15", 50: "A15",
51: "B16", 52: "A16",
53: "B17", 54: "A17",
55: "B18", 56: "A18",
57: "B19", 58: "A19",
59: "B20", 60: "A20",
}),
("J3", {
# odd row even row
7: "AA13", 8: "AB13",
9: "AA14", 10: "AB14",
11: "AA15", 12: "AB15",
13: "AA16", 14: "AB16",
15: "AA17", 16: "AB17",
17: "AA18", 18: "AB18",
19: "AA19", 20: "AB19",
21: "AA20", 22: "AB20",
23: "Y22", 24: "Y21",
25: "W22", 26: "W21",
27: "V22", 28: "V21",
29: "U22", 30: "U21",
31: "R22", 32: "R21",
33: "P22", 34: "P21",
35: "N22", 36: "N21",
37: "M22", 38: "M21",
39: "L22", 40: "L21",
41: "K22", 42: "K21",
43: "J22", 44: "J21",
45: "H22", 46: "H21",
47: "F22", 48: "F21",
49: "E22", 50: "E21",
51: "D22", 52: "D21",
53: "C22", 54: "C21",
55: "B22", 56: "B21",
57: "N20", 58: "N19",
59: "M20", 60: "M19",
})
]
# Platform -----------------------------------------------------------------------------------------
class Platform(AlteraPlatform):
default_clk_name = "clk50"
default_clk_period = 1e9/50e6
core_resources = [
("user_led", 0, Pins("E4"), IOStandard("3.3-V LVTTL")),
("serial", 0,
Subsignal("tx", Pins("J3:7"), IOStandard("3.3-V LVTTL")),
Subsignal("rx", Pins("J3:8"), IOStandard("3.3-V LVTTL"))
),
]
def __init__(self, variant="ep4ce15", toolchain="quartus", with_daughterboard=False):
device = {
"ep4ce15": "EP4CE15F23C8",
"ep4ce55": "EP4CE55F23C8"
}[variant]
io = _io
connectors = _connectors
if with_daughterboard:
from litex_boards.platforms.qmtech_daughterboard import QMTechDaughterboard
daughterboard = QMTechDaughterboard(IOStandard("3.3-V LVTTL"))
io += daughterboard.io
connectors += daughterboard.connectors
else:
io += self.core_resources
AlteraPlatform.__init__(self, device, io, connectors, toolchain=toolchain)
if with_daughterboard:
# an ethernet pin takes K22, so make it available
self.add_platform_command("set_global_assignment -name CYCLONEII_RESERVE_NCEO_AFTER_CONFIGURATION \"USE AS REGULAR IO\"")
def create_programmer(self):
return USBBlaster()
def do_finalize(self, fragment):
AlteraPlatform.do_finalize(self, fragment)
self.add_period_constraint(self.lookup_request("clk50", loose=True), 1e9/50e6) | 0.391522 | 0.270598 |
import sys, os
import argparse
from collections import OrderedDict, defaultdict
import gffutils as gff
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from io import StringIO
import numpy as np
from random import sample
from dendropy.simulate import treesim
from dendropy.model import reconcile
from dendropy import TaxonNamespace
import copy
import math
codons = [
'ATA', 'ATC', 'ATT', 'ATG', 'ACA', 'ACC', 'ACG', 'ACT', 'AAC', 'AAT',
'AAA', 'AAG', 'AGC', 'AGT', 'AGA', 'AGG', 'CTA', 'CTC', 'CTG', 'CTT',
'CCA', 'CCC', 'CCG', 'CCT', 'CAC', 'CAT', 'CAA', 'CAG', 'CGA', 'CGC',
'CGG', 'CGT', 'GTA', 'GTC', 'GTG', 'GTT', 'GCA', 'GCC', 'GCG', 'GCT',
'GAC', 'GAT', 'GAA', 'GAG', 'GGA', 'GGC', 'GGG', 'GGT', 'TCA', 'TCC',
'TCG', 'TCT', 'TTC', 'TTT', 'TTA', 'TTG', 'TAC', 'TAT', 'TGC', 'TGT', 'TGG'
]
codons = [Seq(c) for c in codons]
translation_table = np.array([[[b'K', b'N', b'K', b'N', b'X'],
[b'T', b'T', b'T', b'T', b'T'],
[b'R', b'S', b'R', b'S', b'X'],
[b'I', b'I', b'M', b'I', b'X'],
[b'X', b'X', b'X', b'X', b'X']],
[[b'Q', b'H', b'Q', b'H', b'X'],
[b'P', b'P', b'P', b'P', b'P'],
[b'R', b'R', b'R', b'R', b'R'],
[b'L', b'L', b'L', b'L', b'L'],
[b'X', b'X', b'X', b'X', b'X']],
[[b'E', b'D', b'E', b'D', b'X'],
[b'A', b'A', b'A', b'A', b'A'],
[b'G', b'G', b'G', b'G', b'G'],
[b'V', b'V', b'V', b'V', b'V'],
[b'X', b'X', b'X', b'X', b'X']],
[[b'*', b'Y', b'*', b'Y', b'X'],
[b'S', b'S', b'S', b'S', b'S'],
[b'*', b'C', b'W', b'C', b'X'],
[b'L', b'F', b'L', b'F', b'X'],
[b'X', b'X', b'X', b'X', b'X']],
[[b'X', b'X', b'X', b'X', b'X'],
[b'X', b'X', b'X', b'X', b'X'],
[b'X', b'X', b'X', b'X', b'X'],
[b'X', b'X', b'X', b'X', b'X'],
[b'X', b'X', b'X', b'X', b'X']]])
reduce_array = np.full(200, 4)
reduce_array[[65, 97]] = 0
reduce_array[[67, 99]] = 1
reduce_array[[71, 103]] = 2
reduce_array[[84, 116]] = 3
def translate(seq):
indices = reduce_array[np.fromstring(seq, dtype=np.int8)]
return translation_table[
indices[np.arange(0, len(seq), 3)], indices[np.arange(1, len(seq), 3)],
indices[np.arange(2, len(seq), 3)]].tostring().decode('ascii')
def get_codon(index, strand="+"):
codon = codons[index]
if strand == "-":
codon = codon.reverse_complement()
return np.array(list(str(codon)))
def clean_gff_string(gff_string):
splitlines = gff_string.splitlines()
lines_to_delete = []
for index in range(len(splitlines)):
if '##sequence-region' in splitlines[index]:
lines_to_delete.append(index)
for index in sorted(lines_to_delete, reverse=True):
del splitlines[index]
cleaned_gff = "\n".join(splitlines)
return cleaned_gff
def simulate_img_with_mutation(in_tree,
gain_rate,
loss_rate,
mutation_rate,
ngenes=100,
min_ncore=10,
max_ncore=99999999):
# simulate accessory p/a using infintely many genes model
n_additions = 0
for node in in_tree.preorder_node_iter():
node.acc_genes = []
if node.parent_node is not None:
# simulate loss of genes from previous node
node.acc_genes = [
g for g in node.acc_genes
if np.random.poisson(lam=node.edge.length * loss_rate / 2.0,
size=1) > 0
]
# simulate new genes with lengths sampled uniformly.
n_new = np.random.poisson(lam=node.edge.length * gain_rate / 2.0,
size=1)[0]
lengths = np.random.uniform(low=0.0,
high=node.edge.length,
size=n_new)
for l in lengths:
# simulate loss using this length
if np.random.poisson(lam=l * loss_rate / 2.0, size=1)[0] > 0:
n_new -= 1
# add new genes to node
node.acc_genes = node.parent_node.acc_genes + list(
range(n_additions, n_additions + n_new))
n_additions += n_new
print("accessory size: ", n_additions)
# Now add core
ncore = ngenes - n_additions
if ncore < min_ncore:
ncore = min_ncore
if ncore > max_ncore:
ncore = max_ncore
core_genes = list(range(n_additions, n_additions + ncore))
for node in in_tree.preorder_node_iter():
node.acc_genes += core_genes
# Now add mutations
n_condons = len(codons)
for node in in_tree.preorder_node_iter():
node.gene_mutations = defaultdict(list)
if node.parent_node is not None:
# copy mutations from parent
for g in node.acc_genes:
if g in node.parent_node.gene_mutations:
node.gene_mutations[g] = node.parent_node.gene_mutations[
g].copy()
# add mutations
for g in node.acc_genes:
n_new = np.random.poisson(lam=node.edge.length *
mutation_rate / 2.0,
size=1)[0]
locations = list(np.random.uniform(low=0.0, high=1,
size=n_new))
mutations = [(sample(range(0, n_condons), 1)[0], l)
for l in locations]
node.gene_mutations[g] += mutations
return in_tree
def simulate_pangenome(ngenes, nisolates, effective_pop_size, gain_rate,
loss_rate, mutation_rate, max_core):
# simulate a phylogeny using the coalscent
sim_tree = treesim.pure_kingman_tree(taxon_namespace=TaxonNamespace(
[str(i) for i in range(1, 1 + nisolates)]),
pop_size=effective_pop_size)
basic_tree = copy.deepcopy(sim_tree)
# simulate gene p/a and mutation
sim_tree = simulate_img_with_mutation(sim_tree,
gain_rate=gain_rate,
loss_rate=loss_rate,
mutation_rate=mutation_rate,
ngenes=ngenes,
max_ncore=max_core)
# get genes and mutations for each isolate
gene_mutations = []
for leaf in sim_tree.leaf_node_iter():
gene_mutations.append([[g, leaf.gene_mutations[g]]
for g in leaf.acc_genes])
return (gene_mutations, basic_tree)
def add_diversity(gfffile, nisolates, effective_pop_size, gain_rate, loss_rate,
mutation_rate, n_sim_genes, prefix, max_core):
with open(gfffile, 'r') as infile:
lines = infile.read().replace(',','')
split = lines.split('##FASTA')
if len(split) != 2:
print("Problem reading GFF3 file: ", gfffile)
raise RuntimeError("Error reading GFF3 input!")
with StringIO(split[1]) as temp_fasta:
sequences = list(SeqIO.parse(temp_fasta, 'fasta'))
seq_dict = OrderedDict()
for seq in sequences:
seq_dict[seq.id] = np.array(list(str(seq.seq)))
parsed_gff = gff.create_db(clean_gff_string(split[0]),
dbfn=":memory:",
force=True,
keep_order=False,
merge_strategy="create_unique",
sort_attribute_values=True,
from_string=True)
#Get gene entries to modify
all_gene_locations = []
gene_locations = []
prev_end = -1
gene_seqs = []
for entry in parsed_gff.all_features(featuretype=()):
if "CDS" not in entry.featuretype: continue
left = entry.start - 1
right = entry.stop
gene_sequence = Seq(''.join(seq_dict[entry.seqid][left:right]))
if entry.strand == "-":
gene_sequence = gene_sequence.reverse_complement()
gene_sequence = gene_sequence.translate()
gene_seqs.append(SeqRecord(gene_sequence, id=entry.id, description=""))
all_gene_locations.append(entry)
if entry.start < prev_end:
prev_end = entry.end
gene_locations = gene_locations[0:-1]
continue
prev_end = entry.end
gene_locations.append(entry)
# sub-sample genes so that some are conserved
gene_locations = sample(gene_locations, n_sim_genes)
# simulate presence/absence matrix and gene mutations (only swap codons)
pan_sim, sim_tree = simulate_pangenome(
ngenes=len(gene_locations),
nisolates=nisolates,
effective_pop_size=effective_pop_size,
gain_rate=gain_rate,
loss_rate=loss_rate,
mutation_rate=mutation_rate,
max_core=max_core)
# write out tree
sim_tree.write(path=prefix + "_sim_tree.nwk", schema="newick")
#Modify each gene
for i, pan in enumerate(pan_sim):
temp_seq_dict = copy.deepcopy(seq_dict)
included_genes = set()
n_mutations = 0
for gene in pan:
entry = gene_locations[gene[0]]
included_genes.add(gene[0])
left = entry.start - 1
right = entry.stop
if right < left: raise RuntimeError("Error issue with left/right!")
start_sites = list(range(left, right, 3))[1:-1]
n_mutations += len(gene[1])
# swap codons at chosen start sites
for mutation in gene[1]:
# find start site of codon swap
start = start_sites[math.floor(mutation[1] * len(start_sites))]
cod = get_codon(index=mutation[0], strand=entry.strand)
if (start < left) or ((start + 3) > (right)):
raise RuntimeError("Error issue with start!")
temp_seq_dict[entry.seqid][start:(start + 3)] = cod
# remove genes not in the accessory
deleted_genes = 0
d_index = defaultdict(lambda: np.array([]))
for g, entry in enumerate(gene_locations):
left = entry.start - 1
right = entry.stop
if right < left: raise RuntimeError("Error issue with left/right!")
if g not in included_genes:
deleted_genes += 1
d_index[entry.seqid] = np.append(d_index[entry.seqid],
np.arange(left, right))
gene_sequence = Seq(''.join(
temp_seq_dict[entry.seqid][left:right]))
if entry.strand == "-":
gene_sequence = gene_sequence.reverse_complement()
gene_sequence = gene_sequence.translate()
gene_seqs.append(
SeqRecord(gene_sequence, id=entry.id, description=""))
for entryid in d_index:
temp_seq_dict[entryid] = np.delete(temp_seq_dict[entry.seqid],
d_index[entryid])
print("mutations in genome: ", n_mutations)
print("genes deleted: ", deleted_genes)
# write out sequences
out_name = prefix + "_iso_" + str(i) + ".fasta"
outfile = open(out_name, 'w')
sequences = [
SeqRecord(Seq(''.join(temp_seq_dict[s])), id=s, description="")
for s in temp_seq_dict
]
SeqIO.write(sequences, outfile, 'fasta')
# close file
outfile.close()
# write out database for prokka
prokka_db_name = prefix + "_prokka_DB.fasta"
with open(prokka_db_name, 'w') as dboutfile:
SeqIO.write(gene_seqs, dboutfile, 'fasta')
# write presence/absence file
pa_by_iso = []
for i, pan in enumerate(pan_sim):
pa = set()
for gene in pan:
pa.add(gene[0])
pa_by_iso.append(pa)
out_name = prefix + "_presence_absence.csv"
seen = set()
with open(out_name, 'w') as outfile:
outfile.write("\t".join(
["Gene"] + ["iso" + str(i)
for i in range(1, nisolates + 1)]) + "\n")
for g, entry in enumerate(gene_locations):
seen.add(entry.id)
outfile.write("\t".join(
[entry.id] +
["1" if g in pa_by_iso[i] else "0"
for i in range(nisolates)]) + "\n")
for g, entry in enumerate(all_gene_locations):
if entry.id in seen: continue
outfile.write("\t".join([entry.id] +
["1" for i in range(nisolates)]) + "\n")
return
def main():
parser = argparse.ArgumentParser(description=(
'Simulates a pangenome using the infinitely many genes ' +
'model and adds mutational variation to genes. Takes a gff3 file as input.'
))
parser.add_argument('-g',
'--gff',
dest='gff',
type=str,
required=True,
help='input gff file name')
parser.add_argument('--nisolates',
dest='nisolates',
type=int,
required=True,
help='number of genomes to simulate')
parser.add_argument('--mutation_rate',
dest='mutation_rate',
type=float,
required=True,
help='mutation rate of genes')
parser.add_argument('--gain_rate',
dest='gain_rate',
type=float,
required=True,
help='gain rate of accessory genes')
parser.add_argument('--loss_rate',
dest='loss_rate',
type=float,
required=True,
help='loss rate of accessory genes')
parser.add_argument('--pop_size',
dest='pop_size',
type=float,
required=True,
help='effective population size')
parser.add_argument(
'--n_sim_genes',
dest='n_sim_genes',
type=int,
required=True,
help=('max number of genes that may be ' +
'affected by the simulation. The rest' + ' will be left as is.'))
parser.add_argument('--max_core',
dest='max_core',
type=int,
default=99999999,
help=('max number of core genes' +
'default=n_sim-accessory'))
parser.add_argument('-o',
'--out',
dest='output_dir',
type=str,
required=True,
help='output directory')
args = parser.parse_args()
args.pop_size = math.floor(args.pop_size)
args.output_dir = os.path.join(args.output_dir, "")
prefix = (args.output_dir + "pan_sim_gr_" + str(args.gain_rate) + "_lr_" +
str(args.loss_rate) + "_mu_" + str(args.mutation_rate))
# adjust rates for popsize
args.gain_rate = 2.0 * args.gain_rate * args.pop_size
args.loss_rate = 2.0 * args.loss_rate * args.pop_size
args.mutation_rate = 2.0 * args.mutation_rate * args.pop_size
add_diversity(gfffile=args.gff,
nisolates=args.nisolates,
effective_pop_size=args.pop_size,
gain_rate=args.gain_rate,
loss_rate=args.loss_rate,
mutation_rate=args.mutation_rate,
n_sim_genes=args.n_sim_genes,
prefix=prefix,
max_core=args.max_core)
return
if __name__ == '__main__':
main() | scripts/pseudo_full_pangenome.py | import sys, os
import argparse
from collections import OrderedDict, defaultdict
import gffutils as gff
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from io import StringIO
import numpy as np
from random import sample
from dendropy.simulate import treesim
from dendropy.model import reconcile
from dendropy import TaxonNamespace
import copy
import math
codons = [
'ATA', 'ATC', 'ATT', 'ATG', 'ACA', 'ACC', 'ACG', 'ACT', 'AAC', 'AAT',
'AAA', 'AAG', 'AGC', 'AGT', 'AGA', 'AGG', 'CTA', 'CTC', 'CTG', 'CTT',
'CCA', 'CCC', 'CCG', 'CCT', 'CAC', 'CAT', 'CAA', 'CAG', 'CGA', 'CGC',
'CGG', 'CGT', 'GTA', 'GTC', 'GTG', 'GTT', 'GCA', 'GCC', 'GCG', 'GCT',
'GAC', 'GAT', 'GAA', 'GAG', 'GGA', 'GGC', 'GGG', 'GGT', 'TCA', 'TCC',
'TCG', 'TCT', 'TTC', 'TTT', 'TTA', 'TTG', 'TAC', 'TAT', 'TGC', 'TGT', 'TGG'
]
codons = [Seq(c) for c in codons]
translation_table = np.array([[[b'K', b'N', b'K', b'N', b'X'],
[b'T', b'T', b'T', b'T', b'T'],
[b'R', b'S', b'R', b'S', b'X'],
[b'I', b'I', b'M', b'I', b'X'],
[b'X', b'X', b'X', b'X', b'X']],
[[b'Q', b'H', b'Q', b'H', b'X'],
[b'P', b'P', b'P', b'P', b'P'],
[b'R', b'R', b'R', b'R', b'R'],
[b'L', b'L', b'L', b'L', b'L'],
[b'X', b'X', b'X', b'X', b'X']],
[[b'E', b'D', b'E', b'D', b'X'],
[b'A', b'A', b'A', b'A', b'A'],
[b'G', b'G', b'G', b'G', b'G'],
[b'V', b'V', b'V', b'V', b'V'],
[b'X', b'X', b'X', b'X', b'X']],
[[b'*', b'Y', b'*', b'Y', b'X'],
[b'S', b'S', b'S', b'S', b'S'],
[b'*', b'C', b'W', b'C', b'X'],
[b'L', b'F', b'L', b'F', b'X'],
[b'X', b'X', b'X', b'X', b'X']],
[[b'X', b'X', b'X', b'X', b'X'],
[b'X', b'X', b'X', b'X', b'X'],
[b'X', b'X', b'X', b'X', b'X'],
[b'X', b'X', b'X', b'X', b'X'],
[b'X', b'X', b'X', b'X', b'X']]])
reduce_array = np.full(200, 4)
reduce_array[[65, 97]] = 0
reduce_array[[67, 99]] = 1
reduce_array[[71, 103]] = 2
reduce_array[[84, 116]] = 3
def translate(seq):
indices = reduce_array[np.fromstring(seq, dtype=np.int8)]
return translation_table[
indices[np.arange(0, len(seq), 3)], indices[np.arange(1, len(seq), 3)],
indices[np.arange(2, len(seq), 3)]].tostring().decode('ascii')
def get_codon(index, strand="+"):
codon = codons[index]
if strand == "-":
codon = codon.reverse_complement()
return np.array(list(str(codon)))
def clean_gff_string(gff_string):
splitlines = gff_string.splitlines()
lines_to_delete = []
for index in range(len(splitlines)):
if '##sequence-region' in splitlines[index]:
lines_to_delete.append(index)
for index in sorted(lines_to_delete, reverse=True):
del splitlines[index]
cleaned_gff = "\n".join(splitlines)
return cleaned_gff
def simulate_img_with_mutation(in_tree,
gain_rate,
loss_rate,
mutation_rate,
ngenes=100,
min_ncore=10,
max_ncore=99999999):
# simulate accessory p/a using infintely many genes model
n_additions = 0
for node in in_tree.preorder_node_iter():
node.acc_genes = []
if node.parent_node is not None:
# simulate loss of genes from previous node
node.acc_genes = [
g for g in node.acc_genes
if np.random.poisson(lam=node.edge.length * loss_rate / 2.0,
size=1) > 0
]
# simulate new genes with lengths sampled uniformly.
n_new = np.random.poisson(lam=node.edge.length * gain_rate / 2.0,
size=1)[0]
lengths = np.random.uniform(low=0.0,
high=node.edge.length,
size=n_new)
for l in lengths:
# simulate loss using this length
if np.random.poisson(lam=l * loss_rate / 2.0, size=1)[0] > 0:
n_new -= 1
# add new genes to node
node.acc_genes = node.parent_node.acc_genes + list(
range(n_additions, n_additions + n_new))
n_additions += n_new
print("accessory size: ", n_additions)
# Now add core
ncore = ngenes - n_additions
if ncore < min_ncore:
ncore = min_ncore
if ncore > max_ncore:
ncore = max_ncore
core_genes = list(range(n_additions, n_additions + ncore))
for node in in_tree.preorder_node_iter():
node.acc_genes += core_genes
# Now add mutations
n_condons = len(codons)
for node in in_tree.preorder_node_iter():
node.gene_mutations = defaultdict(list)
if node.parent_node is not None:
# copy mutations from parent
for g in node.acc_genes:
if g in node.parent_node.gene_mutations:
node.gene_mutations[g] = node.parent_node.gene_mutations[
g].copy()
# add mutations
for g in node.acc_genes:
n_new = np.random.poisson(lam=node.edge.length *
mutation_rate / 2.0,
size=1)[0]
locations = list(np.random.uniform(low=0.0, high=1,
size=n_new))
mutations = [(sample(range(0, n_condons), 1)[0], l)
for l in locations]
node.gene_mutations[g] += mutations
return in_tree
def simulate_pangenome(ngenes, nisolates, effective_pop_size, gain_rate,
loss_rate, mutation_rate, max_core):
# simulate a phylogeny using the coalscent
sim_tree = treesim.pure_kingman_tree(taxon_namespace=TaxonNamespace(
[str(i) for i in range(1, 1 + nisolates)]),
pop_size=effective_pop_size)
basic_tree = copy.deepcopy(sim_tree)
# simulate gene p/a and mutation
sim_tree = simulate_img_with_mutation(sim_tree,
gain_rate=gain_rate,
loss_rate=loss_rate,
mutation_rate=mutation_rate,
ngenes=ngenes,
max_ncore=max_core)
# get genes and mutations for each isolate
gene_mutations = []
for leaf in sim_tree.leaf_node_iter():
gene_mutations.append([[g, leaf.gene_mutations[g]]
for g in leaf.acc_genes])
return (gene_mutations, basic_tree)
def add_diversity(gfffile, nisolates, effective_pop_size, gain_rate, loss_rate,
mutation_rate, n_sim_genes, prefix, max_core):
with open(gfffile, 'r') as infile:
lines = infile.read().replace(',','')
split = lines.split('##FASTA')
if len(split) != 2:
print("Problem reading GFF3 file: ", gfffile)
raise RuntimeError("Error reading GFF3 input!")
with StringIO(split[1]) as temp_fasta:
sequences = list(SeqIO.parse(temp_fasta, 'fasta'))
seq_dict = OrderedDict()
for seq in sequences:
seq_dict[seq.id] = np.array(list(str(seq.seq)))
parsed_gff = gff.create_db(clean_gff_string(split[0]),
dbfn=":memory:",
force=True,
keep_order=False,
merge_strategy="create_unique",
sort_attribute_values=True,
from_string=True)
#Get gene entries to modify
all_gene_locations = []
gene_locations = []
prev_end = -1
gene_seqs = []
for entry in parsed_gff.all_features(featuretype=()):
if "CDS" not in entry.featuretype: continue
left = entry.start - 1
right = entry.stop
gene_sequence = Seq(''.join(seq_dict[entry.seqid][left:right]))
if entry.strand == "-":
gene_sequence = gene_sequence.reverse_complement()
gene_sequence = gene_sequence.translate()
gene_seqs.append(SeqRecord(gene_sequence, id=entry.id, description=""))
all_gene_locations.append(entry)
if entry.start < prev_end:
prev_end = entry.end
gene_locations = gene_locations[0:-1]
continue
prev_end = entry.end
gene_locations.append(entry)
# sub-sample genes so that some are conserved
gene_locations = sample(gene_locations, n_sim_genes)
# simulate presence/absence matrix and gene mutations (only swap codons)
pan_sim, sim_tree = simulate_pangenome(
ngenes=len(gene_locations),
nisolates=nisolates,
effective_pop_size=effective_pop_size,
gain_rate=gain_rate,
loss_rate=loss_rate,
mutation_rate=mutation_rate,
max_core=max_core)
# write out tree
sim_tree.write(path=prefix + "_sim_tree.nwk", schema="newick")
#Modify each gene
for i, pan in enumerate(pan_sim):
temp_seq_dict = copy.deepcopy(seq_dict)
included_genes = set()
n_mutations = 0
for gene in pan:
entry = gene_locations[gene[0]]
included_genes.add(gene[0])
left = entry.start - 1
right = entry.stop
if right < left: raise RuntimeError("Error issue with left/right!")
start_sites = list(range(left, right, 3))[1:-1]
n_mutations += len(gene[1])
# swap codons at chosen start sites
for mutation in gene[1]:
# find start site of codon swap
start = start_sites[math.floor(mutation[1] * len(start_sites))]
cod = get_codon(index=mutation[0], strand=entry.strand)
if (start < left) or ((start + 3) > (right)):
raise RuntimeError("Error issue with start!")
temp_seq_dict[entry.seqid][start:(start + 3)] = cod
# remove genes not in the accessory
deleted_genes = 0
d_index = defaultdict(lambda: np.array([]))
for g, entry in enumerate(gene_locations):
left = entry.start - 1
right = entry.stop
if right < left: raise RuntimeError("Error issue with left/right!")
if g not in included_genes:
deleted_genes += 1
d_index[entry.seqid] = np.append(d_index[entry.seqid],
np.arange(left, right))
gene_sequence = Seq(''.join(
temp_seq_dict[entry.seqid][left:right]))
if entry.strand == "-":
gene_sequence = gene_sequence.reverse_complement()
gene_sequence = gene_sequence.translate()
gene_seqs.append(
SeqRecord(gene_sequence, id=entry.id, description=""))
for entryid in d_index:
temp_seq_dict[entryid] = np.delete(temp_seq_dict[entry.seqid],
d_index[entryid])
print("mutations in genome: ", n_mutations)
print("genes deleted: ", deleted_genes)
# write out sequences
out_name = prefix + "_iso_" + str(i) + ".fasta"
outfile = open(out_name, 'w')
sequences = [
SeqRecord(Seq(''.join(temp_seq_dict[s])), id=s, description="")
for s in temp_seq_dict
]
SeqIO.write(sequences, outfile, 'fasta')
# close file
outfile.close()
# write out database for prokka
prokka_db_name = prefix + "_prokka_DB.fasta"
with open(prokka_db_name, 'w') as dboutfile:
SeqIO.write(gene_seqs, dboutfile, 'fasta')
# write presence/absence file
pa_by_iso = []
for i, pan in enumerate(pan_sim):
pa = set()
for gene in pan:
pa.add(gene[0])
pa_by_iso.append(pa)
out_name = prefix + "_presence_absence.csv"
seen = set()
with open(out_name, 'w') as outfile:
outfile.write("\t".join(
["Gene"] + ["iso" + str(i)
for i in range(1, nisolates + 1)]) + "\n")
for g, entry in enumerate(gene_locations):
seen.add(entry.id)
outfile.write("\t".join(
[entry.id] +
["1" if g in pa_by_iso[i] else "0"
for i in range(nisolates)]) + "\n")
for g, entry in enumerate(all_gene_locations):
if entry.id in seen: continue
outfile.write("\t".join([entry.id] +
["1" for i in range(nisolates)]) + "\n")
return
def main():
parser = argparse.ArgumentParser(description=(
'Simulates a pangenome using the infinitely many genes ' +
'model and adds mutational variation to genes. Takes a gff3 file as input.'
))
parser.add_argument('-g',
'--gff',
dest='gff',
type=str,
required=True,
help='input gff file name')
parser.add_argument('--nisolates',
dest='nisolates',
type=int,
required=True,
help='number of genomes to simulate')
parser.add_argument('--mutation_rate',
dest='mutation_rate',
type=float,
required=True,
help='mutation rate of genes')
parser.add_argument('--gain_rate',
dest='gain_rate',
type=float,
required=True,
help='gain rate of accessory genes')
parser.add_argument('--loss_rate',
dest='loss_rate',
type=float,
required=True,
help='loss rate of accessory genes')
parser.add_argument('--pop_size',
dest='pop_size',
type=float,
required=True,
help='effective population size')
parser.add_argument(
'--n_sim_genes',
dest='n_sim_genes',
type=int,
required=True,
help=('max number of genes that may be ' +
'affected by the simulation. The rest' + ' will be left as is.'))
parser.add_argument('--max_core',
dest='max_core',
type=int,
default=99999999,
help=('max number of core genes' +
'default=n_sim-accessory'))
parser.add_argument('-o',
'--out',
dest='output_dir',
type=str,
required=True,
help='output directory')
args = parser.parse_args()
args.pop_size = math.floor(args.pop_size)
args.output_dir = os.path.join(args.output_dir, "")
prefix = (args.output_dir + "pan_sim_gr_" + str(args.gain_rate) + "_lr_" +
str(args.loss_rate) + "_mu_" + str(args.mutation_rate))
# adjust rates for popsize
args.gain_rate = 2.0 * args.gain_rate * args.pop_size
args.loss_rate = 2.0 * args.loss_rate * args.pop_size
args.mutation_rate = 2.0 * args.mutation_rate * args.pop_size
add_diversity(gfffile=args.gff,
nisolates=args.nisolates,
effective_pop_size=args.pop_size,
gain_rate=args.gain_rate,
loss_rate=args.loss_rate,
mutation_rate=args.mutation_rate,
n_sim_genes=args.n_sim_genes,
prefix=prefix,
max_core=args.max_core)
return
if __name__ == '__main__':
main() | 0.325521 | 0.29151 |
from django.test import TestCase
from unittest.mock import patch, call
# Import module
from backend.object_detector import *
class DetectObjectsTest(TestCase):
"""
Hard to test full call because of threading.
"""
@patch('backend.database_wrapper.create_hash_sum')
def setUp(self, mock_create_hash_sum) -> None:
mock_create_hash_sum.return_value = '1234'
self.cm_name = 'Test camera name'
self.fid = create_root_folder(path='home/user/', name='test_folder')
self.st = timezone.now()
self.et = timezone.now() + timezone.timedelta(seconds=5)
self.cids = []
for i in range(1, 4):
self.cids.append(
create_clip(clip_name='test_clip{}'.format(i), fid=self.fid, video_format='tvf', latitude=Decimal('0.0'),
longitude=Decimal('0.0'), start_time=self.st + timezone.timedelta(seconds=3 * i - 2),
end_time=self.et + timezone.timedelta(seconds=3 * i - 2),
width=256, height=240, frame_rate=42, camera_name=self.cm_name))
@patch('backend.object_detector.ObjectDetector')
@patch('backend.object_detector.threading')
def test_basic(self, mock_threading, mock_od):
"""
Makes a simple call.
"""
code, res = detect_objects({CLIP_IDS: self.cids, RATE: 1})
self.assertEqual(code, 200)
self.assertEqual(res, {PROGRESS_ID: 1})
class GetProgressTest(TestCase):
def setUp(self) -> None:
"""
Create a progress object.
"""
self.pid = create_progress(total=1337, current=42)
def test_basic(self):
"""
Test simple call.
"""
code, res = get_progress(data={PROGRESS_ID: self.pid})
self.assertEqual(code, 200)
self.assertEqual(res, {TOTAL: 1337, CURRENT: 42})
def test_non_existing_progress(self):
"""
Test with a non existing progress id.
"""
code, res = get_progress(data={PROGRESS_ID: 42})
self.assertEqual(code, 204)
self.assertEqual(res, {})
class DeleteProgressTest(TestCase):
def setUp(self) -> None:
"""
Create a progress object.
"""
self.pid = create_progress(total=1337, current=42)
def test_basic(self):
"""
Test simple call.
"""
code, res = delete_progress(data={PROGRESS_ID: self.pid})
self.assertEqual(code, 200)
self.assertEqual(res, {})
self.assertEqual(Progress.objects.count(), 0)
def test_non_existing_progress(self):
"""
Test with a non existing progress id.
"""
code, res = delete_progress(data={PROGRESS_ID: 42})
self.assertEqual(code, 200)
self.assertEqual(res, {})
self.assertEqual(Progress.objects.count(), 1)
class RunObjectDetectionTest(TestCase):
@patch('backend.database_wrapper.create_hash_sum')
def setUp(self, mock_create_hash_sum) -> None:
mock_create_hash_sum.return_value = '1234'
self.cm_name = 'Test camera name'
self.fid = create_root_folder(path='home/user/', name='test_folder')
self.st = timezone.now()
self.et = timezone.now() + timezone.timedelta(seconds=5)
self.cids = []
for i in range(1, 4):
self.cids.append(
create_clip(clip_name='test_clip{}'.format(i), fid=self.fid, video_format='tvf', latitude=Decimal('0.0'),
longitude=Decimal('0.0'), start_time=self.st + timezone.timedelta(seconds=2 * i - 3),
end_time=self.et + timezone.timedelta(seconds=2 * i - 3),
width=256, height=240, frame_rate=42, camera_name=self.cm_name))
self.od = ObjectDetector()
self.pid = create_progress(total=len(self.cids))
@patch('backend.object_detector.replace_sep', side_effect=lambda x: x)
@patch('backend.object_detector.ObjectDetector.detect')
def test_basic(self, mock_detect, mock_replace_sep):
"""
Make a simple call.
"""
mock_detect.return_value = [('monkey', 1), ('frog', 2)]
self.od.run_object_detection(cids=self.cids, pid=self.pid, rate=1, start_time=self.st, end_time=self.et)
self.assertEqual(mock_detect.call_count, 3)
mock_detect.assert_has_calls([call(clip='home/user/test_folder/test_clip1.tvf', rate=1, start=1, end=5),
call(clip='home/user/test_folder/test_clip2.tvf', rate=1, start=0, end=4),
call(clip='home/user/test_folder/test_clip3.tvf', rate=1, start=0, end=2)])
self.assertEqual(get_progress_by_id(pid=self.pid).current, 3)
for i in range(1, 4):
objects = get_objects_in_detection(odid=1)
self.assertEqual(str(objects[0].object_class), 'monkey')
self.assertEqual(str(objects[1].object_class), 'frog') | backend/test/test_integration/test_object_detector.py | from django.test import TestCase
from unittest.mock import patch, call
# Import module
from backend.object_detector import *
class DetectObjectsTest(TestCase):
"""
Hard to test full call because of threading.
"""
@patch('backend.database_wrapper.create_hash_sum')
def setUp(self, mock_create_hash_sum) -> None:
mock_create_hash_sum.return_value = '1234'
self.cm_name = 'Test camera name'
self.fid = create_root_folder(path='home/user/', name='test_folder')
self.st = timezone.now()
self.et = timezone.now() + timezone.timedelta(seconds=5)
self.cids = []
for i in range(1, 4):
self.cids.append(
create_clip(clip_name='test_clip{}'.format(i), fid=self.fid, video_format='tvf', latitude=Decimal('0.0'),
longitude=Decimal('0.0'), start_time=self.st + timezone.timedelta(seconds=3 * i - 2),
end_time=self.et + timezone.timedelta(seconds=3 * i - 2),
width=256, height=240, frame_rate=42, camera_name=self.cm_name))
@patch('backend.object_detector.ObjectDetector')
@patch('backend.object_detector.threading')
def test_basic(self, mock_threading, mock_od):
"""
Makes a simple call.
"""
code, res = detect_objects({CLIP_IDS: self.cids, RATE: 1})
self.assertEqual(code, 200)
self.assertEqual(res, {PROGRESS_ID: 1})
class GetProgressTest(TestCase):
def setUp(self) -> None:
"""
Create a progress object.
"""
self.pid = create_progress(total=1337, current=42)
def test_basic(self):
"""
Test simple call.
"""
code, res = get_progress(data={PROGRESS_ID: self.pid})
self.assertEqual(code, 200)
self.assertEqual(res, {TOTAL: 1337, CURRENT: 42})
def test_non_existing_progress(self):
"""
Test with a non existing progress id.
"""
code, res = get_progress(data={PROGRESS_ID: 42})
self.assertEqual(code, 204)
self.assertEqual(res, {})
class DeleteProgressTest(TestCase):
def setUp(self) -> None:
"""
Create a progress object.
"""
self.pid = create_progress(total=1337, current=42)
def test_basic(self):
"""
Test simple call.
"""
code, res = delete_progress(data={PROGRESS_ID: self.pid})
self.assertEqual(code, 200)
self.assertEqual(res, {})
self.assertEqual(Progress.objects.count(), 0)
def test_non_existing_progress(self):
"""
Test with a non existing progress id.
"""
code, res = delete_progress(data={PROGRESS_ID: 42})
self.assertEqual(code, 200)
self.assertEqual(res, {})
self.assertEqual(Progress.objects.count(), 1)
class RunObjectDetectionTest(TestCase):
@patch('backend.database_wrapper.create_hash_sum')
def setUp(self, mock_create_hash_sum) -> None:
mock_create_hash_sum.return_value = '1234'
self.cm_name = 'Test camera name'
self.fid = create_root_folder(path='home/user/', name='test_folder')
self.st = timezone.now()
self.et = timezone.now() + timezone.timedelta(seconds=5)
self.cids = []
for i in range(1, 4):
self.cids.append(
create_clip(clip_name='test_clip{}'.format(i), fid=self.fid, video_format='tvf', latitude=Decimal('0.0'),
longitude=Decimal('0.0'), start_time=self.st + timezone.timedelta(seconds=2 * i - 3),
end_time=self.et + timezone.timedelta(seconds=2 * i - 3),
width=256, height=240, frame_rate=42, camera_name=self.cm_name))
self.od = ObjectDetector()
self.pid = create_progress(total=len(self.cids))
@patch('backend.object_detector.replace_sep', side_effect=lambda x: x)
@patch('backend.object_detector.ObjectDetector.detect')
def test_basic(self, mock_detect, mock_replace_sep):
"""
Make a simple call.
"""
mock_detect.return_value = [('monkey', 1), ('frog', 2)]
self.od.run_object_detection(cids=self.cids, pid=self.pid, rate=1, start_time=self.st, end_time=self.et)
self.assertEqual(mock_detect.call_count, 3)
mock_detect.assert_has_calls([call(clip='home/user/test_folder/test_clip1.tvf', rate=1, start=1, end=5),
call(clip='home/user/test_folder/test_clip2.tvf', rate=1, start=0, end=4),
call(clip='home/user/test_folder/test_clip3.tvf', rate=1, start=0, end=2)])
self.assertEqual(get_progress_by_id(pid=self.pid).current, 3)
for i in range(1, 4):
objects = get_objects_in_detection(odid=1)
self.assertEqual(str(objects[0].object_class), 'monkey')
self.assertEqual(str(objects[1].object_class), 'frog') | 0.628407 | 0.346652 |
from pynq import DefaultHierarchy, DefaultIP, allocate
from pynq import Overlay
from datetime import datetime
import pynq.lib.dma
import numpy as np
class NeuralNetworkOverlay(Overlay):
def __init__(self, bitfile_name, x_shape, y_shape, dtype=np.float32, dtbo=None, download=True, ignore_version=False,
device=None):
super().__init__(bitfile_name, dtbo=None, download=True, ignore_version=False, device=None)
self.sendchannel = self.hier_0.axi_dma_0.sendchannel
self.recvchannel = self.hier_0.axi_dma_0.recvchannel
self.input_buffer = allocate(shape=x_shape, dtype=dtype)
self.output_buffer = allocate(shape=y_shape, dtype=dtype)
def _print_dt(self, timea, timeb, N):
dt = (timeb - timea)
dts = dt.seconds + dt.microseconds * 10 ** -6
rate = N / dts
print("Classified {} samples in {} seconds ({} inferences / s)".format(N, dts, rate))
return dts, rate
def predict(self, X, debug=False, profile=False, encode=None, decode=None):
"""
Obtain the predictions of the NN implemented in the FPGA.
Parameters:
- X : the input vector. Should be numpy ndarray.
- dtype : the data type of the elements of the input/output vectors.
Note: it should be set depending on the interface of the accelerator; if it uses 'float'
types for the 'data' AXI-Stream field, 'np.float32' dtype is the correct one to use.
Instead if it uses 'ap_fixed<A,B>', 'np.intA' is the correct one to use (note that A cannot
any integer value, but it can assume {..., 8, 16, 32, ...} values. Check `numpy`
doc for more info).
In this case the encoding/decoding has to be computed by the PS. For example for
'ap_fixed<16,6>' type the following 2 functions are the correct one to use for encode/decode
'float' -> 'ap_fixed<16,6>':
```
def encode(xi):
return np.int16(round(xi * 2**10)) # note 2**10 = 2**(A-B)
def decode(yi):
return yi * 2**-10
encode_v = np.vectorize(encode) # to apply them element-wise
decode_v = np.vectorize(decode)
```
- profile : boolean. Set it to `True` to print the performance of the algorithm in term of `inference/s`.
- encode/decode: function pointers. See `dtype` section for more information.
- return: an output array based on `np.ndarray` with a shape equal to `y_shape` and a `dtype` equal to
the namesake parameter.
"""
if profile:
timea = datetime.now()
if encode is not None:
X = encode(X)
self.input_buffer[:] = X
self.sendchannel.transfer(self.input_buffer)
self.recvchannel.transfer(self.output_buffer)
if debug:
print("Transfer OK")
self.sendchannel.wait()
if debug:
print("Send OK")
self.recvchannel.wait()
if debug:
print("Receive OK")
# result = self.output_buffer.copy()
if decode is not None:
self.output_buffer = decode(self.output_buffer)
if profile:
timeb = datetime.now()
dts, rate = self._print_dt(timea, timeb, len(X))
return self.output_buffer, dts, rate
else:
return self.output_buffer | hls4ml/templates/vivado_accelerator/zcu102/python_drivers/axi_stream_driver.py | from pynq import DefaultHierarchy, DefaultIP, allocate
from pynq import Overlay
from datetime import datetime
import pynq.lib.dma
import numpy as np
class NeuralNetworkOverlay(Overlay):
def __init__(self, bitfile_name, x_shape, y_shape, dtype=np.float32, dtbo=None, download=True, ignore_version=False,
device=None):
super().__init__(bitfile_name, dtbo=None, download=True, ignore_version=False, device=None)
self.sendchannel = self.hier_0.axi_dma_0.sendchannel
self.recvchannel = self.hier_0.axi_dma_0.recvchannel
self.input_buffer = allocate(shape=x_shape, dtype=dtype)
self.output_buffer = allocate(shape=y_shape, dtype=dtype)
def _print_dt(self, timea, timeb, N):
dt = (timeb - timea)
dts = dt.seconds + dt.microseconds * 10 ** -6
rate = N / dts
print("Classified {} samples in {} seconds ({} inferences / s)".format(N, dts, rate))
return dts, rate
def predict(self, X, debug=False, profile=False, encode=None, decode=None):
"""
Obtain the predictions of the NN implemented in the FPGA.
Parameters:
- X : the input vector. Should be numpy ndarray.
- dtype : the data type of the elements of the input/output vectors.
Note: it should be set depending on the interface of the accelerator; if it uses 'float'
types for the 'data' AXI-Stream field, 'np.float32' dtype is the correct one to use.
Instead if it uses 'ap_fixed<A,B>', 'np.intA' is the correct one to use (note that A cannot
any integer value, but it can assume {..., 8, 16, 32, ...} values. Check `numpy`
doc for more info).
In this case the encoding/decoding has to be computed by the PS. For example for
'ap_fixed<16,6>' type the following 2 functions are the correct one to use for encode/decode
'float' -> 'ap_fixed<16,6>':
```
def encode(xi):
return np.int16(round(xi * 2**10)) # note 2**10 = 2**(A-B)
def decode(yi):
return yi * 2**-10
encode_v = np.vectorize(encode) # to apply them element-wise
decode_v = np.vectorize(decode)
```
- profile : boolean. Set it to `True` to print the performance of the algorithm in term of `inference/s`.
- encode/decode: function pointers. See `dtype` section for more information.
- return: an output array based on `np.ndarray` with a shape equal to `y_shape` and a `dtype` equal to
the namesake parameter.
"""
if profile:
timea = datetime.now()
if encode is not None:
X = encode(X)
self.input_buffer[:] = X
self.sendchannel.transfer(self.input_buffer)
self.recvchannel.transfer(self.output_buffer)
if debug:
print("Transfer OK")
self.sendchannel.wait()
if debug:
print("Send OK")
self.recvchannel.wait()
if debug:
print("Receive OK")
# result = self.output_buffer.copy()
if decode is not None:
self.output_buffer = decode(self.output_buffer)
if profile:
timeb = datetime.now()
dts, rate = self._print_dt(timea, timeb, len(X))
return self.output_buffer, dts, rate
else:
return self.output_buffer | 0.787278 | 0.58433 |
r"""Spherical Harmonics as polynomials of x, y, z
"""
import math
from functools import partial
import jax
import jax.numpy as jnp
from jax.numpy import sqrt
from e3nn_jax import Irreps, IrrepsData, wigner_3j_sympy
@partial(jax.jit, static_argnums=(0, 2, 3), inline=True)
def spherical_harmonics(
irreps_out,
x,
normalize: bool,
normalization: str = 'integral'
) -> IrrepsData:
r"""Spherical harmonics
.. image:: https://user-images.githubusercontent.com/333780/79220728-dbe82c00-7e54-11ea-82c7-b3acbd9b2246.gif
| Polynomials defined on the 3d space :math:`Y^l: \mathbb{R}^3 \longrightarrow \mathbb{R}^{2l+1}`
| Usually restricted on the sphere (with ``normalize=True``) :math:`Y^l: S^2 \longrightarrow \mathbb{R}^{2l+1}`
| who satisfies the following properties:
* are polynomials of the cartesian coordinates ``x, y, z``
* is equivariant :math:`Y^l(R x) = D^l(R) Y^l(x)`
* are orthogonal :math:`\int_{S^2} Y^l_m(x) Y^j_n(x) dx = \text{cste} \; \delta_{lj} \delta_{mn}`
The value of the constant depends on the choice of normalization.
It obeys the following property:
.. math::
Y^{l+1}_i(x) &= \text{cste}(l) \; & C_{ijk} Y^l_j(x) x_k
\partial_k Y^{l+1}_i(x) &= \text{cste}(l) \; (l+1) & C_{ijk} Y^l_j(x)
Where :math:`C` are the `wigner_3j`.
.. note::
This function match with this table of standard real spherical harmonics from Wikipedia_
when ``normalize=True``, ``normalization='integral'`` and is called with the argument in the order ``y,z,x`` (instead of ``x,y,z``).
.. _Wikipedia: https://en.wikipedia.org/wiki/Table_of_spherical_harmonics#Real_spherical_harmonics
Args:
irreps_out (`Irreps`): output irreps
x (`jnp.ndarray`): cartesian coordinates
normalize (bool): if True, the polynomials are restricted to the sphere
normalization (str): normalization of the constant :math:`\text{cste}`. Default is 'integral'
Returns:
`jnp.ndarray`: polynomials of the spherical harmonics
"""
assert normalization in ['integral', 'component', 'norm']
irreps_out = Irreps(irreps_out)
assert all([l % 2 == 1 or p == 1 for _, (l, p) in irreps_out])
assert len(set([p for _, (l, p) in irreps_out if l % 2 == 1])) <= 1
_lmax = 8
if irreps_out.lmax > _lmax:
raise NotImplementedError(f'spherical_harmonics maximum l implemented is {_lmax}, send us an email to ask for more')
if normalize:
r = jnp.linalg.norm(x, ord=2, axis=-1, keepdims=True)
x = x / jnp.where(r == 0.0, 1.0, r)
sh = _spherical_harmonics(x[..., 0], x[..., 1], x[..., 2])
sh = [jnp.stack(next(sh), axis=-1) for _ in range(irreps_out.lmax + 1)]
sh = [jnp.repeat(sh[ir.l][..., None, :], mul, -2) for mul, ir in irreps_out]
if normalization == 'integral':
sh = [
(math.sqrt(ir.dim) / math.sqrt(4 * math.pi)) * y
for (_, ir), y in zip(irreps_out, sh)
]
elif normalization == 'component':
sh = [
math.sqrt(ir.dim) * y
for (_, ir), y in zip(irreps_out, sh)
]
return IrrepsData.from_list(irreps_out, sh, x.shape[:-1])
def _spherical_harmonics(x, y, z):
sh0_0 = jnp.ones_like(x)
yield [sh0_0]
sh1_0 = x
sh1_1 = y
sh1_2 = z
yield [sh1_0, sh1_1, sh1_2]
sh2_0 = sqrt(3)*x*z
sh2_1 = sqrt(3)*x*y
sh2_2 = -x**2/2 + y**2 - z**2/2
sh2_3 = sqrt(3)*y*z
sh2_4 = sqrt(3)*(-x**2 + z**2)/2
yield [sh2_0, sh2_1, sh2_2, sh2_3, sh2_4]
sh3_0 = sqrt(30)*(sh2_0*z + sh2_4*x)/6
sh3_1 = sqrt(5)*(sh2_0*y + sh2_1*z + sh2_3*x)/3
sh3_2 = -sqrt(2)*sh2_0*z/6 + 2*sqrt(2)*sh2_1*y/3 + sqrt(6)*sh2_2*x/3 + sqrt(2)*sh2_4*x/6
sh3_3 = -sqrt(3)*sh2_1*x/3 + sh2_2*y - sqrt(3)*sh2_3*z/3
sh3_4 = -sqrt(2)*sh2_0*x/6 + sqrt(6)*sh2_2*z/3 + 2*sqrt(2)*sh2_3*y/3 - sqrt(2)*sh2_4*z/6
sh3_5 = sqrt(5)*(-sh2_1*x + sh2_3*z + sh2_4*y)/3
sh3_6 = sqrt(30)*(-sh2_0*x + sh2_4*z)/6
yield [sh3_0, sh3_1, sh3_2, sh3_3, sh3_4, sh3_5, sh3_6]
sh4_0 = sqrt(14)*(sh3_0*z + sh3_6*x)/4
sh4_1 = sqrt(7)*(2*sh3_0*y + sqrt(6)*sh3_1*z + sqrt(6)*sh3_5*x)/8
sh4_2 = -sqrt(2)*sh3_0*z/8 + sqrt(3)*sh3_1*y/2 + sqrt(30)*sh3_2*z/8 + sqrt(30)*sh3_4*x/8 + sqrt(2)*sh3_6*x/8
sh4_3 = -sqrt(6)*sh3_1*z/8 + sqrt(15)*sh3_2*y/4 + sqrt(10)*sh3_3*x/4 + sqrt(6)*sh3_5*x/8
sh4_4 = -sqrt(6)*sh3_2*x/4 + sh3_3*y - sqrt(6)*sh3_4*z/4
sh4_5 = -sqrt(6)*sh3_1*x/8 + sqrt(10)*sh3_3*z/4 + sqrt(15)*sh3_4*y/4 - sqrt(6)*sh3_5*z/8
sh4_6 = -sqrt(2)*sh3_0*x/8 - sqrt(30)*sh3_2*x/8 + sqrt(30)*sh3_4*z/8 + sqrt(3)*sh3_5*y/2 - sqrt(2)*sh3_6*z/8
sh4_7 = sqrt(7)*(-sqrt(6)*sh3_1*x + sqrt(6)*sh3_5*z + 2*sh3_6*y)/8
sh4_8 = sqrt(14)*(-sh3_0*x + sh3_6*z)/4
yield [sh4_0, sh4_1, sh4_2, sh4_3, sh4_4, sh4_5, sh4_6, sh4_7, sh4_8]
sh5_0 = 3*sqrt(10)*(sh4_0*z + sh4_8*x)/10
sh5_1 = 3*sh4_0*y/5 + 3*sqrt(2)*sh4_1*z/5 + 3*sqrt(2)*sh4_7*x/5
sh5_2 = -sqrt(2)*sh4_0*z/10 + 4*sh4_1*y/5 + sqrt(14)*sh4_2*z/5 + sqrt(14)*sh4_6*x/5 + sqrt(2)*sh4_8*x/10
sh5_3 = -sqrt(6)*sh4_1*z/10 + sqrt(21)*sh4_2*y/5 + sqrt(42)*sh4_3*z/10 + sqrt(42)*sh4_5*x/10 + sqrt(6)*sh4_7*x/10
sh5_4 = -sqrt(3)*sh4_2*z/5 + 2*sqrt(6)*sh4_3*y/5 + sqrt(15)*sh4_4*x/5 + sqrt(3)*sh4_6*x/5
sh5_5 = -sqrt(10)*sh4_3*x/5 + sh4_4*y - sqrt(10)*sh4_5*z/5
sh5_6 = -sqrt(3)*sh4_2*x/5 + sqrt(15)*sh4_4*z/5 + 2*sqrt(6)*sh4_5*y/5 - sqrt(3)*sh4_6*z/5
sh5_7 = -sqrt(6)*sh4_1*x/10 - sqrt(42)*sh4_3*x/10 + sqrt(42)*sh4_5*z/10 + sqrt(21)*sh4_6*y/5 - sqrt(6)*sh4_7*z/10
sh5_8 = -sqrt(2)*sh4_0*x/10 - sqrt(14)*sh4_2*x/5 + sqrt(14)*sh4_6*z/5 + 4*sh4_7*y/5 - sqrt(2)*sh4_8*z/10
sh5_9 = -3*sqrt(2)*sh4_1*x/5 + 3*sqrt(2)*sh4_7*z/5 + 3*sh4_8*y/5
sh5_10 = 3*sqrt(10)*(-sh4_0*x + sh4_8*z)/10
yield [sh5_0, sh5_1, sh5_2, sh5_3, sh5_4, sh5_5, sh5_6, sh5_7, sh5_8, sh5_9, sh5_10]
sh6_0 = sqrt(33)*(sh5_0*z + sh5_10*x)/6
sh6_1 = sqrt(11)*sh5_0*y/6 + sqrt(110)*sh5_1*z/12 + sqrt(110)*sh5_9*x/12
sh6_2 = -sqrt(2)*sh5_0*z/12 + sqrt(5)*sh5_1*y/3 + sqrt(2)*sh5_10*x/12 + sqrt(10)*sh5_2*z/4 + sqrt(10)*sh5_8*x/4
sh6_3 = -sqrt(6)*sh5_1*z/12 + sqrt(3)*sh5_2*y/2 + sqrt(2)*sh5_3*z/2 + sqrt(2)*sh5_7*x/2 + sqrt(6)*sh5_9*x/12
sh6_4 = -sqrt(3)*sh5_2*z/6 + 2*sqrt(2)*sh5_3*y/3 + sqrt(14)*sh5_4*z/6 + sqrt(14)*sh5_6*x/6 + sqrt(3)*sh5_8*x/6
sh6_5 = -sqrt(5)*sh5_3*z/6 + sqrt(35)*sh5_4*y/6 + sqrt(21)*sh5_5*x/6 + sqrt(5)*sh5_7*x/6
sh6_6 = -sqrt(15)*sh5_4*x/6 + sh5_5*y - sqrt(15)*sh5_6*z/6
sh6_7 = -sqrt(5)*sh5_3*x/6 + sqrt(21)*sh5_5*z/6 + sqrt(35)*sh5_6*y/6 - sqrt(5)*sh5_7*z/6
sh6_8 = -sqrt(3)*sh5_2*x/6 - sqrt(14)*sh5_4*x/6 + sqrt(14)*sh5_6*z/6 + 2*sqrt(2)*sh5_7*y/3 - sqrt(3)*sh5_8*z/6
sh6_9 = -sqrt(6)*sh5_1*x/12 - sqrt(2)*sh5_3*x/2 + sqrt(2)*sh5_7*z/2 + sqrt(3)*sh5_8*y/2 - sqrt(6)*sh5_9*z/12
sh6_10 = -sqrt(2)*sh5_0*x/12 - sqrt(2)*sh5_10*z/12 - sqrt(10)*sh5_2*x/4 + sqrt(10)*sh5_8*z/4 + sqrt(5)*sh5_9*y/3
sh6_11 = -sqrt(110)*sh5_1*x/12 + sqrt(11)*sh5_10*y/6 + sqrt(110)*sh5_9*z/12
sh6_12 = sqrt(33)*(-sh5_0*x + sh5_10*z)/6
yield [sh6_0, sh6_1, sh6_2, sh6_3, sh6_4, sh6_5, sh6_6, sh6_7, sh6_8, sh6_9, sh6_10, sh6_11, sh6_12]
sh7_0 = sqrt(182)*(sh6_0*z + sh6_12*x)/14
sh7_1 = sqrt(13)*sh6_0*y/7 + sqrt(39)*sh6_1*z/7 + sqrt(39)*sh6_11*x/7
sh7_2 = -sqrt(2)*sh6_0*z/14 + 2*sqrt(6)*sh6_1*y/7 + sqrt(33)*sh6_10*x/7 + sqrt(2)*sh6_12*x/14 + sqrt(33)*sh6_2*z/7
sh7_3 = -sqrt(6)*sh6_1*z/14 + sqrt(6)*sh6_11*x/14 + sqrt(33)*sh6_2*y/7 + sqrt(110)*sh6_3*z/14 + sqrt(110)*sh6_9*x/14
sh7_4 = sqrt(3)*sh6_10*x/7 - sqrt(3)*sh6_2*z/7 + 2*sqrt(10)*sh6_3*y/7 + 3*sqrt(10)*sh6_4*z/14 + 3*sqrt(10)*sh6_8*x/14
sh7_5 = -sqrt(5)*sh6_3*z/7 + 3*sqrt(5)*sh6_4*y/7 + 3*sqrt(2)*sh6_5*z/7 + 3*sqrt(2)*sh6_7*x/7 + sqrt(5)*sh6_9*x/7
sh7_6 = -sqrt(30)*sh6_4*z/14 + 4*sqrt(3)*sh6_5*y/7 + 2*sqrt(7)*sh6_6*x/7 + sqrt(30)*sh6_8*x/14
sh7_7 = -sqrt(21)*sh6_5*x/7 + sh6_6*y - sqrt(21)*sh6_7*z/7
sh7_8 = -sqrt(30)*sh6_4*x/14 + 2*sqrt(7)*sh6_6*z/7 + 4*sqrt(3)*sh6_7*y/7 - sqrt(30)*sh6_8*z/14
sh7_9 = -sqrt(5)*sh6_3*x/7 - 3*sqrt(2)*sh6_5*x/7 + 3*sqrt(2)*sh6_7*z/7 + 3*sqrt(5)*sh6_8*y/7 - sqrt(5)*sh6_9*z/7
sh7_10 = -sqrt(3)*sh6_10*z/7 - sqrt(3)*sh6_2*x/7 - 3*sqrt(10)*sh6_4*x/14 + 3*sqrt(10)*sh6_8*z/14 + 2*sqrt(10)*sh6_9*y/7
sh7_11 = -sqrt(6)*sh6_1*x/14 + sqrt(33)*sh6_10*y/7 - sqrt(6)*sh6_11*z/14 - sqrt(110)*sh6_3*x/14 + sqrt(110)*sh6_9*z/14
sh7_12 = -sqrt(2)*sh6_0*x/14 + sqrt(33)*sh6_10*z/7 + 2*sqrt(6)*sh6_11*y/7 - sqrt(2)*sh6_12*z/14 - sqrt(33)*sh6_2*x/7
sh7_13 = -sqrt(39)*sh6_1*x/7 + sqrt(39)*sh6_11*z/7 + sqrt(13)*sh6_12*y/7
sh7_14 = sqrt(182)*(-sh6_0*x + sh6_12*z)/14
yield [sh7_0, sh7_1, sh7_2, sh7_3, sh7_4, sh7_5, sh7_6, sh7_7, sh7_8, sh7_9, sh7_10, sh7_11, sh7_12, sh7_13, sh7_14]
sh8_0 = sqrt(15)*(sh7_0*z + sh7_14*x)/4
sh8_1 = sqrt(15)*sh7_0*y/8 + sqrt(210)*sh7_1*z/16 + sqrt(210)*sh7_13*x/16
sh8_2 = -sqrt(2)*sh7_0*z/16 + sqrt(7)*sh7_1*y/4 + sqrt(182)*sh7_12*x/16 + sqrt(2)*sh7_14*x/16 + sqrt(182)*sh7_2*z/16
sh8_3 = sqrt(510)*(-sqrt(85)*sh7_1*z + sqrt(2210)*sh7_11*x + sqrt(85)*sh7_13*x + sqrt(2210)*sh7_2*y + sqrt(2210)*sh7_3*z)/1360
sh8_4 = sqrt(33)*sh7_10*x/8 + sqrt(3)*sh7_12*x/8 - sqrt(3)*sh7_2*z/8 + sqrt(3)*sh7_3*y/2 + sqrt(33)*sh7_4*z/8
sh8_5 = sqrt(510)*(sqrt(102)*sh7_11*x - sqrt(102)*sh7_3*z + sqrt(1122)*sh7_4*y + sqrt(561)*sh7_5*z + sqrt(561)*sh7_9*x)/816
sh8_6 = sqrt(30)*sh7_10*x/16 - sqrt(30)*sh7_4*z/16 + sqrt(15)*sh7_5*y/4 + 3*sqrt(10)*sh7_6*z/16 + 3*sqrt(10)*sh7_8*x/16
sh8_7 = -sqrt(42)*sh7_5*z/16 + 3*sqrt(7)*sh7_6*y/8 + 3*sh7_7*x/4 + sqrt(42)*sh7_9*x/16
sh8_8 = -sqrt(7)*sh7_6*x/4 + sh7_7*y - sqrt(7)*sh7_8*z/4
sh8_9 = -sqrt(42)*sh7_5*x/16 + 3*sh7_7*z/4 + 3*sqrt(7)*sh7_8*y/8 - sqrt(42)*sh7_9*z/16
sh8_10 = -sqrt(30)*sh7_10*z/16 - sqrt(30)*sh7_4*x/16 - 3*sqrt(10)*sh7_6*x/16 + 3*sqrt(10)*sh7_8*z/16 + sqrt(15)*sh7_9*y/4
sh8_11 = sqrt(510)*(sqrt(1122)*sh7_10*y - sqrt(102)*sh7_11*z - sqrt(102)*sh7_3*x - sqrt(561)*sh7_5*x + sqrt(561)*sh7_9*z)/816
sh8_12 = sqrt(33)*sh7_10*z/8 + sqrt(3)*sh7_11*y/2 - sqrt(3)*sh7_12*z/8 - sqrt(3)*sh7_2*x/8 - sqrt(33)*sh7_4*x/8
sh8_13 = sqrt(510)*(-sqrt(85)*sh7_1*x + sqrt(2210)*sh7_11*z + sqrt(2210)*sh7_12*y - sqrt(85)*sh7_13*z - sqrt(2210)*sh7_3*x)/1360
sh8_14 = -sqrt(2)*sh7_0*x/16 + sqrt(182)*sh7_12*z/16 + sqrt(7)*sh7_13*y/4 - sqrt(2)*sh7_14*z/16 - sqrt(182)*sh7_2*x/16
sh8_15 = -sqrt(210)*sh7_1*x/16 + sqrt(210)*sh7_13*z/16 + sqrt(15)*sh7_14*y/8
sh8_16 = sqrt(15)*(-sh7_0*x + sh7_14*z)/4
yield [sh8_0, sh8_1, sh8_2, sh8_3, sh8_4, sh8_5, sh8_6, sh8_7, sh8_8, sh8_9, sh8_10, sh8_11, sh8_12, sh8_13, sh8_14, sh8_15, sh8_16]
def generate_spherical_harmonics(): # pragma: no cover
import sympy
xyz = sympy.symbols("x, y, z")
print("sh0_0 = 1")
print("yield [sh0_0]\n")
sph_x = {
0: sympy.Array([1]),
}
sph_1 = {
0: sympy.Array([1]),
}
for l in range(8):
d = 2 * l + 1
names = [sympy.symbols(f"sh{l}_{m}") for m in range(d)]
w = wigner_3j_sympy(1, l, l + 1)
yx = sympy.Array([sum(xyz[i] * names[n] * w[i, n, m] for i in range(3) for n in range(d)) for m in range(d + 2)])
if l <= 1:
yx = yx.subs(zip(names, sph_x[l]))
y1 = yx.subs(zip(xyz, (1, 0, 0))).subs(zip(names, sph_1[l]))
norm = sympy.sqrt(sum(y1.applyfunc(lambda x: x**2)))
y1 = y1 / norm
yx = yx / norm
yx = sympy.simplify(yx)
sph_x[l + 1] = yx
sph_1[l + 1] = y1
# print code
for m, p in enumerate(yx):
print(f"sh{l+1}_{m} = {p}")
print(f"yield [{', '.join([f'sh{l+1}_{m}' for m in range(d + 2)])}]\n") | e3nn_jax/_spherical_harmonics.py | r"""Spherical Harmonics as polynomials of x, y, z
"""
import math
from functools import partial
import jax
import jax.numpy as jnp
from jax.numpy import sqrt
from e3nn_jax import Irreps, IrrepsData, wigner_3j_sympy
@partial(jax.jit, static_argnums=(0, 2, 3), inline=True)
def spherical_harmonics(
irreps_out,
x,
normalize: bool,
normalization: str = 'integral'
) -> IrrepsData:
r"""Spherical harmonics
.. image:: https://user-images.githubusercontent.com/333780/79220728-dbe82c00-7e54-11ea-82c7-b3acbd9b2246.gif
| Polynomials defined on the 3d space :math:`Y^l: \mathbb{R}^3 \longrightarrow \mathbb{R}^{2l+1}`
| Usually restricted on the sphere (with ``normalize=True``) :math:`Y^l: S^2 \longrightarrow \mathbb{R}^{2l+1}`
| who satisfies the following properties:
* are polynomials of the cartesian coordinates ``x, y, z``
* is equivariant :math:`Y^l(R x) = D^l(R) Y^l(x)`
* are orthogonal :math:`\int_{S^2} Y^l_m(x) Y^j_n(x) dx = \text{cste} \; \delta_{lj} \delta_{mn}`
The value of the constant depends on the choice of normalization.
It obeys the following property:
.. math::
Y^{l+1}_i(x) &= \text{cste}(l) \; & C_{ijk} Y^l_j(x) x_k
\partial_k Y^{l+1}_i(x) &= \text{cste}(l) \; (l+1) & C_{ijk} Y^l_j(x)
Where :math:`C` are the `wigner_3j`.
.. note::
This function match with this table of standard real spherical harmonics from Wikipedia_
when ``normalize=True``, ``normalization='integral'`` and is called with the argument in the order ``y,z,x`` (instead of ``x,y,z``).
.. _Wikipedia: https://en.wikipedia.org/wiki/Table_of_spherical_harmonics#Real_spherical_harmonics
Args:
irreps_out (`Irreps`): output irreps
x (`jnp.ndarray`): cartesian coordinates
normalize (bool): if True, the polynomials are restricted to the sphere
normalization (str): normalization of the constant :math:`\text{cste}`. Default is 'integral'
Returns:
`jnp.ndarray`: polynomials of the spherical harmonics
"""
assert normalization in ['integral', 'component', 'norm']
irreps_out = Irreps(irreps_out)
assert all([l % 2 == 1 or p == 1 for _, (l, p) in irreps_out])
assert len(set([p for _, (l, p) in irreps_out if l % 2 == 1])) <= 1
_lmax = 8
if irreps_out.lmax > _lmax:
raise NotImplementedError(f'spherical_harmonics maximum l implemented is {_lmax}, send us an email to ask for more')
if normalize:
r = jnp.linalg.norm(x, ord=2, axis=-1, keepdims=True)
x = x / jnp.where(r == 0.0, 1.0, r)
sh = _spherical_harmonics(x[..., 0], x[..., 1], x[..., 2])
sh = [jnp.stack(next(sh), axis=-1) for _ in range(irreps_out.lmax + 1)]
sh = [jnp.repeat(sh[ir.l][..., None, :], mul, -2) for mul, ir in irreps_out]
if normalization == 'integral':
sh = [
(math.sqrt(ir.dim) / math.sqrt(4 * math.pi)) * y
for (_, ir), y in zip(irreps_out, sh)
]
elif normalization == 'component':
sh = [
math.sqrt(ir.dim) * y
for (_, ir), y in zip(irreps_out, sh)
]
return IrrepsData.from_list(irreps_out, sh, x.shape[:-1])
def _spherical_harmonics(x, y, z):
sh0_0 = jnp.ones_like(x)
yield [sh0_0]
sh1_0 = x
sh1_1 = y
sh1_2 = z
yield [sh1_0, sh1_1, sh1_2]
sh2_0 = sqrt(3)*x*z
sh2_1 = sqrt(3)*x*y
sh2_2 = -x**2/2 + y**2 - z**2/2
sh2_3 = sqrt(3)*y*z
sh2_4 = sqrt(3)*(-x**2 + z**2)/2
yield [sh2_0, sh2_1, sh2_2, sh2_3, sh2_4]
sh3_0 = sqrt(30)*(sh2_0*z + sh2_4*x)/6
sh3_1 = sqrt(5)*(sh2_0*y + sh2_1*z + sh2_3*x)/3
sh3_2 = -sqrt(2)*sh2_0*z/6 + 2*sqrt(2)*sh2_1*y/3 + sqrt(6)*sh2_2*x/3 + sqrt(2)*sh2_4*x/6
sh3_3 = -sqrt(3)*sh2_1*x/3 + sh2_2*y - sqrt(3)*sh2_3*z/3
sh3_4 = -sqrt(2)*sh2_0*x/6 + sqrt(6)*sh2_2*z/3 + 2*sqrt(2)*sh2_3*y/3 - sqrt(2)*sh2_4*z/6
sh3_5 = sqrt(5)*(-sh2_1*x + sh2_3*z + sh2_4*y)/3
sh3_6 = sqrt(30)*(-sh2_0*x + sh2_4*z)/6
yield [sh3_0, sh3_1, sh3_2, sh3_3, sh3_4, sh3_5, sh3_6]
sh4_0 = sqrt(14)*(sh3_0*z + sh3_6*x)/4
sh4_1 = sqrt(7)*(2*sh3_0*y + sqrt(6)*sh3_1*z + sqrt(6)*sh3_5*x)/8
sh4_2 = -sqrt(2)*sh3_0*z/8 + sqrt(3)*sh3_1*y/2 + sqrt(30)*sh3_2*z/8 + sqrt(30)*sh3_4*x/8 + sqrt(2)*sh3_6*x/8
sh4_3 = -sqrt(6)*sh3_1*z/8 + sqrt(15)*sh3_2*y/4 + sqrt(10)*sh3_3*x/4 + sqrt(6)*sh3_5*x/8
sh4_4 = -sqrt(6)*sh3_2*x/4 + sh3_3*y - sqrt(6)*sh3_4*z/4
sh4_5 = -sqrt(6)*sh3_1*x/8 + sqrt(10)*sh3_3*z/4 + sqrt(15)*sh3_4*y/4 - sqrt(6)*sh3_5*z/8
sh4_6 = -sqrt(2)*sh3_0*x/8 - sqrt(30)*sh3_2*x/8 + sqrt(30)*sh3_4*z/8 + sqrt(3)*sh3_5*y/2 - sqrt(2)*sh3_6*z/8
sh4_7 = sqrt(7)*(-sqrt(6)*sh3_1*x + sqrt(6)*sh3_5*z + 2*sh3_6*y)/8
sh4_8 = sqrt(14)*(-sh3_0*x + sh3_6*z)/4
yield [sh4_0, sh4_1, sh4_2, sh4_3, sh4_4, sh4_5, sh4_6, sh4_7, sh4_8]
sh5_0 = 3*sqrt(10)*(sh4_0*z + sh4_8*x)/10
sh5_1 = 3*sh4_0*y/5 + 3*sqrt(2)*sh4_1*z/5 + 3*sqrt(2)*sh4_7*x/5
sh5_2 = -sqrt(2)*sh4_0*z/10 + 4*sh4_1*y/5 + sqrt(14)*sh4_2*z/5 + sqrt(14)*sh4_6*x/5 + sqrt(2)*sh4_8*x/10
sh5_3 = -sqrt(6)*sh4_1*z/10 + sqrt(21)*sh4_2*y/5 + sqrt(42)*sh4_3*z/10 + sqrt(42)*sh4_5*x/10 + sqrt(6)*sh4_7*x/10
sh5_4 = -sqrt(3)*sh4_2*z/5 + 2*sqrt(6)*sh4_3*y/5 + sqrt(15)*sh4_4*x/5 + sqrt(3)*sh4_6*x/5
sh5_5 = -sqrt(10)*sh4_3*x/5 + sh4_4*y - sqrt(10)*sh4_5*z/5
sh5_6 = -sqrt(3)*sh4_2*x/5 + sqrt(15)*sh4_4*z/5 + 2*sqrt(6)*sh4_5*y/5 - sqrt(3)*sh4_6*z/5
sh5_7 = -sqrt(6)*sh4_1*x/10 - sqrt(42)*sh4_3*x/10 + sqrt(42)*sh4_5*z/10 + sqrt(21)*sh4_6*y/5 - sqrt(6)*sh4_7*z/10
sh5_8 = -sqrt(2)*sh4_0*x/10 - sqrt(14)*sh4_2*x/5 + sqrt(14)*sh4_6*z/5 + 4*sh4_7*y/5 - sqrt(2)*sh4_8*z/10
sh5_9 = -3*sqrt(2)*sh4_1*x/5 + 3*sqrt(2)*sh4_7*z/5 + 3*sh4_8*y/5
sh5_10 = 3*sqrt(10)*(-sh4_0*x + sh4_8*z)/10
yield [sh5_0, sh5_1, sh5_2, sh5_3, sh5_4, sh5_5, sh5_6, sh5_7, sh5_8, sh5_9, sh5_10]
sh6_0 = sqrt(33)*(sh5_0*z + sh5_10*x)/6
sh6_1 = sqrt(11)*sh5_0*y/6 + sqrt(110)*sh5_1*z/12 + sqrt(110)*sh5_9*x/12
sh6_2 = -sqrt(2)*sh5_0*z/12 + sqrt(5)*sh5_1*y/3 + sqrt(2)*sh5_10*x/12 + sqrt(10)*sh5_2*z/4 + sqrt(10)*sh5_8*x/4
sh6_3 = -sqrt(6)*sh5_1*z/12 + sqrt(3)*sh5_2*y/2 + sqrt(2)*sh5_3*z/2 + sqrt(2)*sh5_7*x/2 + sqrt(6)*sh5_9*x/12
sh6_4 = -sqrt(3)*sh5_2*z/6 + 2*sqrt(2)*sh5_3*y/3 + sqrt(14)*sh5_4*z/6 + sqrt(14)*sh5_6*x/6 + sqrt(3)*sh5_8*x/6
sh6_5 = -sqrt(5)*sh5_3*z/6 + sqrt(35)*sh5_4*y/6 + sqrt(21)*sh5_5*x/6 + sqrt(5)*sh5_7*x/6
sh6_6 = -sqrt(15)*sh5_4*x/6 + sh5_5*y - sqrt(15)*sh5_6*z/6
sh6_7 = -sqrt(5)*sh5_3*x/6 + sqrt(21)*sh5_5*z/6 + sqrt(35)*sh5_6*y/6 - sqrt(5)*sh5_7*z/6
sh6_8 = -sqrt(3)*sh5_2*x/6 - sqrt(14)*sh5_4*x/6 + sqrt(14)*sh5_6*z/6 + 2*sqrt(2)*sh5_7*y/3 - sqrt(3)*sh5_8*z/6
sh6_9 = -sqrt(6)*sh5_1*x/12 - sqrt(2)*sh5_3*x/2 + sqrt(2)*sh5_7*z/2 + sqrt(3)*sh5_8*y/2 - sqrt(6)*sh5_9*z/12
sh6_10 = -sqrt(2)*sh5_0*x/12 - sqrt(2)*sh5_10*z/12 - sqrt(10)*sh5_2*x/4 + sqrt(10)*sh5_8*z/4 + sqrt(5)*sh5_9*y/3
sh6_11 = -sqrt(110)*sh5_1*x/12 + sqrt(11)*sh5_10*y/6 + sqrt(110)*sh5_9*z/12
sh6_12 = sqrt(33)*(-sh5_0*x + sh5_10*z)/6
yield [sh6_0, sh6_1, sh6_2, sh6_3, sh6_4, sh6_5, sh6_6, sh6_7, sh6_8, sh6_9, sh6_10, sh6_11, sh6_12]
sh7_0 = sqrt(182)*(sh6_0*z + sh6_12*x)/14
sh7_1 = sqrt(13)*sh6_0*y/7 + sqrt(39)*sh6_1*z/7 + sqrt(39)*sh6_11*x/7
sh7_2 = -sqrt(2)*sh6_0*z/14 + 2*sqrt(6)*sh6_1*y/7 + sqrt(33)*sh6_10*x/7 + sqrt(2)*sh6_12*x/14 + sqrt(33)*sh6_2*z/7
sh7_3 = -sqrt(6)*sh6_1*z/14 + sqrt(6)*sh6_11*x/14 + sqrt(33)*sh6_2*y/7 + sqrt(110)*sh6_3*z/14 + sqrt(110)*sh6_9*x/14
sh7_4 = sqrt(3)*sh6_10*x/7 - sqrt(3)*sh6_2*z/7 + 2*sqrt(10)*sh6_3*y/7 + 3*sqrt(10)*sh6_4*z/14 + 3*sqrt(10)*sh6_8*x/14
sh7_5 = -sqrt(5)*sh6_3*z/7 + 3*sqrt(5)*sh6_4*y/7 + 3*sqrt(2)*sh6_5*z/7 + 3*sqrt(2)*sh6_7*x/7 + sqrt(5)*sh6_9*x/7
sh7_6 = -sqrt(30)*sh6_4*z/14 + 4*sqrt(3)*sh6_5*y/7 + 2*sqrt(7)*sh6_6*x/7 + sqrt(30)*sh6_8*x/14
sh7_7 = -sqrt(21)*sh6_5*x/7 + sh6_6*y - sqrt(21)*sh6_7*z/7
sh7_8 = -sqrt(30)*sh6_4*x/14 + 2*sqrt(7)*sh6_6*z/7 + 4*sqrt(3)*sh6_7*y/7 - sqrt(30)*sh6_8*z/14
sh7_9 = -sqrt(5)*sh6_3*x/7 - 3*sqrt(2)*sh6_5*x/7 + 3*sqrt(2)*sh6_7*z/7 + 3*sqrt(5)*sh6_8*y/7 - sqrt(5)*sh6_9*z/7
sh7_10 = -sqrt(3)*sh6_10*z/7 - sqrt(3)*sh6_2*x/7 - 3*sqrt(10)*sh6_4*x/14 + 3*sqrt(10)*sh6_8*z/14 + 2*sqrt(10)*sh6_9*y/7
sh7_11 = -sqrt(6)*sh6_1*x/14 + sqrt(33)*sh6_10*y/7 - sqrt(6)*sh6_11*z/14 - sqrt(110)*sh6_3*x/14 + sqrt(110)*sh6_9*z/14
sh7_12 = -sqrt(2)*sh6_0*x/14 + sqrt(33)*sh6_10*z/7 + 2*sqrt(6)*sh6_11*y/7 - sqrt(2)*sh6_12*z/14 - sqrt(33)*sh6_2*x/7
sh7_13 = -sqrt(39)*sh6_1*x/7 + sqrt(39)*sh6_11*z/7 + sqrt(13)*sh6_12*y/7
sh7_14 = sqrt(182)*(-sh6_0*x + sh6_12*z)/14
yield [sh7_0, sh7_1, sh7_2, sh7_3, sh7_4, sh7_5, sh7_6, sh7_7, sh7_8, sh7_9, sh7_10, sh7_11, sh7_12, sh7_13, sh7_14]
sh8_0 = sqrt(15)*(sh7_0*z + sh7_14*x)/4
sh8_1 = sqrt(15)*sh7_0*y/8 + sqrt(210)*sh7_1*z/16 + sqrt(210)*sh7_13*x/16
sh8_2 = -sqrt(2)*sh7_0*z/16 + sqrt(7)*sh7_1*y/4 + sqrt(182)*sh7_12*x/16 + sqrt(2)*sh7_14*x/16 + sqrt(182)*sh7_2*z/16
sh8_3 = sqrt(510)*(-sqrt(85)*sh7_1*z + sqrt(2210)*sh7_11*x + sqrt(85)*sh7_13*x + sqrt(2210)*sh7_2*y + sqrt(2210)*sh7_3*z)/1360
sh8_4 = sqrt(33)*sh7_10*x/8 + sqrt(3)*sh7_12*x/8 - sqrt(3)*sh7_2*z/8 + sqrt(3)*sh7_3*y/2 + sqrt(33)*sh7_4*z/8
sh8_5 = sqrt(510)*(sqrt(102)*sh7_11*x - sqrt(102)*sh7_3*z + sqrt(1122)*sh7_4*y + sqrt(561)*sh7_5*z + sqrt(561)*sh7_9*x)/816
sh8_6 = sqrt(30)*sh7_10*x/16 - sqrt(30)*sh7_4*z/16 + sqrt(15)*sh7_5*y/4 + 3*sqrt(10)*sh7_6*z/16 + 3*sqrt(10)*sh7_8*x/16
sh8_7 = -sqrt(42)*sh7_5*z/16 + 3*sqrt(7)*sh7_6*y/8 + 3*sh7_7*x/4 + sqrt(42)*sh7_9*x/16
sh8_8 = -sqrt(7)*sh7_6*x/4 + sh7_7*y - sqrt(7)*sh7_8*z/4
sh8_9 = -sqrt(42)*sh7_5*x/16 + 3*sh7_7*z/4 + 3*sqrt(7)*sh7_8*y/8 - sqrt(42)*sh7_9*z/16
sh8_10 = -sqrt(30)*sh7_10*z/16 - sqrt(30)*sh7_4*x/16 - 3*sqrt(10)*sh7_6*x/16 + 3*sqrt(10)*sh7_8*z/16 + sqrt(15)*sh7_9*y/4
sh8_11 = sqrt(510)*(sqrt(1122)*sh7_10*y - sqrt(102)*sh7_11*z - sqrt(102)*sh7_3*x - sqrt(561)*sh7_5*x + sqrt(561)*sh7_9*z)/816
sh8_12 = sqrt(33)*sh7_10*z/8 + sqrt(3)*sh7_11*y/2 - sqrt(3)*sh7_12*z/8 - sqrt(3)*sh7_2*x/8 - sqrt(33)*sh7_4*x/8
sh8_13 = sqrt(510)*(-sqrt(85)*sh7_1*x + sqrt(2210)*sh7_11*z + sqrt(2210)*sh7_12*y - sqrt(85)*sh7_13*z - sqrt(2210)*sh7_3*x)/1360
sh8_14 = -sqrt(2)*sh7_0*x/16 + sqrt(182)*sh7_12*z/16 + sqrt(7)*sh7_13*y/4 - sqrt(2)*sh7_14*z/16 - sqrt(182)*sh7_2*x/16
sh8_15 = -sqrt(210)*sh7_1*x/16 + sqrt(210)*sh7_13*z/16 + sqrt(15)*sh7_14*y/8
sh8_16 = sqrt(15)*(-sh7_0*x + sh7_14*z)/4
yield [sh8_0, sh8_1, sh8_2, sh8_3, sh8_4, sh8_5, sh8_6, sh8_7, sh8_8, sh8_9, sh8_10, sh8_11, sh8_12, sh8_13, sh8_14, sh8_15, sh8_16]
def generate_spherical_harmonics(): # pragma: no cover
import sympy
xyz = sympy.symbols("x, y, z")
print("sh0_0 = 1")
print("yield [sh0_0]\n")
sph_x = {
0: sympy.Array([1]),
}
sph_1 = {
0: sympy.Array([1]),
}
for l in range(8):
d = 2 * l + 1
names = [sympy.symbols(f"sh{l}_{m}") for m in range(d)]
w = wigner_3j_sympy(1, l, l + 1)
yx = sympy.Array([sum(xyz[i] * names[n] * w[i, n, m] for i in range(3) for n in range(d)) for m in range(d + 2)])
if l <= 1:
yx = yx.subs(zip(names, sph_x[l]))
y1 = yx.subs(zip(xyz, (1, 0, 0))).subs(zip(names, sph_1[l]))
norm = sympy.sqrt(sum(y1.applyfunc(lambda x: x**2)))
y1 = y1 / norm
yx = yx / norm
yx = sympy.simplify(yx)
sph_x[l + 1] = yx
sph_1[l + 1] = y1
# print code
for m, p in enumerate(yx):
print(f"sh{l+1}_{m} = {p}")
print(f"yield [{', '.join([f'sh{l+1}_{m}' for m in range(d + 2)])}]\n") | 0.905933 | 0.769124 |
import datetime
import hashlib
from typing import List
from unittest.mock import patch
import pytz
from django.conf import settings
from django.core import mail
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from django.utils import timezone
from freezegun import freeze_time
from posthog.email import EmailMessage, _send_email
from posthog.models import Event, MessagingRecord, Organization, Person, Team, User
from posthog.tasks.email import send_weekly_email_reports
class TestEmail(TestCase):
def create_person(self, team: Team, base_distinct_id: str = "") -> Person:
person = Person.objects.create(team=team)
person.add_distinct_id(base_distinct_id)
return person
@freeze_time("2020-09-21")
def setUp(self):
super().setUp()
self.organization = Organization.objects.create()
self.team = Team.objects.create(organization=self.organization, name="The Bakery")
self.user = User.objects.create(email="<EMAIL>")
self.user2 = User.objects.create(email="<EMAIL>")
self.user_red_herring = User.objects.create(email="<EMAIL>")
self.organization.members.add(self.user)
self.organization.members.add(self.user2)
self.organization.members.add(self.user_red_herring)
MessagingRecord.objects.get_or_create(
raw_email="<EMAIL>",
campaign_key=f"weekly_report_for_team_{self.team.pk}_on_2020-09-14",
defaults={"sent_at": timezone.now()},
) # This user should not get the emails
last_week = datetime.datetime(2020, 9, 17, 3, 22, tzinfo=pytz.UTC)
two_weeks_ago = datetime.datetime(2020, 9, 8, 19, 54, tzinfo=pytz.UTC)
self.persons: List = [self.create_person(self.team, str(i)) for i in range(0, 7)]
# Resurrected
self.persons[0].created_at = timezone.now() - datetime.timedelta(weeks=3)
self.persons[0].save()
self.persons[1].created_at = timezone.now() - datetime.timedelta(weeks=4)
self.persons[1].save()
Event.objects.create(team=self.team, timestamp=last_week, distinct_id=0)
Event.objects.create(team=self.team, timestamp=last_week, distinct_id=1)
# Retained
Event.objects.create(team=self.team, timestamp=last_week, distinct_id=2)
Event.objects.create(team=self.team, timestamp=two_weeks_ago, distinct_id=2)
Event.objects.create(team=self.team, timestamp=last_week, distinct_id=3)
Event.objects.create(team=self.team, timestamp=two_weeks_ago, distinct_id=3)
Event.objects.create(team=self.team, timestamp=last_week, distinct_id=4)
Event.objects.create(team=self.team, timestamp=two_weeks_ago, distinct_id=4)
# New
Event.objects.create(team=self.team, timestamp=last_week, distinct_id=5)
Event.objects.create(team=self.team, timestamp=last_week, distinct_id=5)
# Churned
Event.objects.create(team=self.team, timestamp=two_weeks_ago, distinct_id=6)
def test_cant_send_emails_if_not_properly_configured(self) -> None:
with self.settings(EMAIL_HOST=None):
with self.assertRaises(ImproperlyConfigured) as e:
EmailMessage("test_campaign", "Subject", "template")
self.assertEqual(
str(e.exception), "Email is not enabled in this instance.",
)
with self.settings(EMAIL_ENABLED=False):
with self.assertRaises(ImproperlyConfigured) as e:
EmailMessage("test_campaign", "Subject", "template")
self.assertEqual(
str(e.exception), "Email is not enabled in this instance.",
)
def test_cant_send_same_campaign_twice(self) -> None:
sent_at = timezone.now()
record, _ = MessagingRecord.objects.get_or_create(raw_email="<EMAIL>", campaign_key="campaign_1")
record.sent_at = sent_at
record.save()
with self.settings(
EMAIL_HOST="localhost", CELERY_TASK_ALWAYS_EAGER=True,
):
_send_email(
campaign_key="campaign_1",
to=[{"raw_email": "<EMAIL>", "recipient": "Test Posthog <<EMAIL>>"}],
subject="Test email",
headers={},
)
self.assertEqual(len(mail.outbox), 0)
record.refresh_from_db()
self.assertEqual(record.sent_at, sent_at)
@freeze_time("2020-09-21")
def test_weekly_email_report(self) -> None:
record_count: int = MessagingRecord.objects.count()
expected_recipients: List[str] = ["<EMAIL>", "<EMAIL>"]
with self.settings(
EMAIL_HOST="localhost", SITE_URL="http://localhost:9999", CELERY_TASK_ALWAYS_EAGER=True,
):
send_weekly_email_reports()
self.assertSetEqual({",".join(outmail.to) for outmail in mail.outbox}, set(expected_recipients))
self.assertEqual(
mail.outbox[0].subject, "PostHog weekly report for Sep 14, 2020 to Sep 20",
)
self.assertEqual(
mail.outbox[0].body, "",
) # no plain-text version support yet
html_message = mail.outbox[0].alternatives[0][0] # type: ignore
self.assertIn(
"http://localhost:9999/static/posthog-logo.png", html_message,
) # absolute URLs are used
self.assertIn('style="font-weight: 600"', html_message) # CSS is inlined
self.assertIn(
"Your PostHog weekly report is ready! Your team had 6 active users last week! 🎉", html_message,
) # preheader
# Ensure records are properly saved to prevent duplicate emails
self.assertEqual(MessagingRecord.objects.count(), record_count + 2)
for email in expected_recipients:
email_hash = hashlib.sha256(f"{settings.SECRET_KEY}_{email}".encode()).hexdigest()
record = MessagingRecord.objects.get(
email_hash=email_hash, campaign_key=f"weekly_report_for_team_{self.team.pk}_on_2020-09-14",
)
self.assertTrue((timezone.now() - record.sent_at).total_seconds() < 5)
@patch("posthog.tasks.email.EmailMessage")
@freeze_time("2020-09-21")
def test_weekly_email_report_content(self, mock_email_message):
with self.settings(
EMAIL_HOST="localhost", CELERY_TASK_ALWAYS_EAGER=True,
):
send_weekly_email_reports()
self.assertEqual(
mock_email_message.call_args[1]["campaign_key"], f"weekly_report_for_team_{self.team.pk}_on_2020-09-14",
) # Campaign key
self.assertEqual(
mock_email_message.call_args[1]["subject"], "PostHog weekly report for Sep 14, 2020 to Sep 20",
) # Email subject
self.assertEqual(mock_email_message.call_args[1]["template_name"], "weekly_report")
template_context = mock_email_message.call_args[1]["template_context"]
self.assertEqual(template_context["team"], "The Bakery")
self.assertEqual(
template_context["period_start"], datetime.datetime(2020, 9, 14, tzinfo=pytz.UTC),
)
self.assertEqual(
template_context["period_end"], datetime.datetime(2020, 9, 20, 23, 59, 59, 999999, tzinfo=pytz.UTC),
)
self.assertEqual(
template_context["active_users"], 6,
)
self.assertEqual(
template_context["active_users_delta"], 0.5,
)
self.assertEqual(
round(template_context["user_distribution"]["new"], 2), 0.17,
)
self.assertEqual(
template_context["user_distribution"]["retained"], 0.5,
)
self.assertEqual(
round(template_context["user_distribution"]["resurrected"], 2), 0.33,
)
self.assertEqual(
template_context["churned_users"], {"abs": 1, "ratio": 0.25, "delta": None},
) | posthog/test/test_email.py | import datetime
import hashlib
from typing import List
from unittest.mock import patch
import pytz
from django.conf import settings
from django.core import mail
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from django.utils import timezone
from freezegun import freeze_time
from posthog.email import EmailMessage, _send_email
from posthog.models import Event, MessagingRecord, Organization, Person, Team, User
from posthog.tasks.email import send_weekly_email_reports
class TestEmail(TestCase):
def create_person(self, team: Team, base_distinct_id: str = "") -> Person:
person = Person.objects.create(team=team)
person.add_distinct_id(base_distinct_id)
return person
@freeze_time("2020-09-21")
def setUp(self):
super().setUp()
self.organization = Organization.objects.create()
self.team = Team.objects.create(organization=self.organization, name="The Bakery")
self.user = User.objects.create(email="<EMAIL>")
self.user2 = User.objects.create(email="<EMAIL>")
self.user_red_herring = User.objects.create(email="<EMAIL>")
self.organization.members.add(self.user)
self.organization.members.add(self.user2)
self.organization.members.add(self.user_red_herring)
MessagingRecord.objects.get_or_create(
raw_email="<EMAIL>",
campaign_key=f"weekly_report_for_team_{self.team.pk}_on_2020-09-14",
defaults={"sent_at": timezone.now()},
) # This user should not get the emails
last_week = datetime.datetime(2020, 9, 17, 3, 22, tzinfo=pytz.UTC)
two_weeks_ago = datetime.datetime(2020, 9, 8, 19, 54, tzinfo=pytz.UTC)
self.persons: List = [self.create_person(self.team, str(i)) for i in range(0, 7)]
# Resurrected
self.persons[0].created_at = timezone.now() - datetime.timedelta(weeks=3)
self.persons[0].save()
self.persons[1].created_at = timezone.now() - datetime.timedelta(weeks=4)
self.persons[1].save()
Event.objects.create(team=self.team, timestamp=last_week, distinct_id=0)
Event.objects.create(team=self.team, timestamp=last_week, distinct_id=1)
# Retained
Event.objects.create(team=self.team, timestamp=last_week, distinct_id=2)
Event.objects.create(team=self.team, timestamp=two_weeks_ago, distinct_id=2)
Event.objects.create(team=self.team, timestamp=last_week, distinct_id=3)
Event.objects.create(team=self.team, timestamp=two_weeks_ago, distinct_id=3)
Event.objects.create(team=self.team, timestamp=last_week, distinct_id=4)
Event.objects.create(team=self.team, timestamp=two_weeks_ago, distinct_id=4)
# New
Event.objects.create(team=self.team, timestamp=last_week, distinct_id=5)
Event.objects.create(team=self.team, timestamp=last_week, distinct_id=5)
# Churned
Event.objects.create(team=self.team, timestamp=two_weeks_ago, distinct_id=6)
def test_cant_send_emails_if_not_properly_configured(self) -> None:
with self.settings(EMAIL_HOST=None):
with self.assertRaises(ImproperlyConfigured) as e:
EmailMessage("test_campaign", "Subject", "template")
self.assertEqual(
str(e.exception), "Email is not enabled in this instance.",
)
with self.settings(EMAIL_ENABLED=False):
with self.assertRaises(ImproperlyConfigured) as e:
EmailMessage("test_campaign", "Subject", "template")
self.assertEqual(
str(e.exception), "Email is not enabled in this instance.",
)
def test_cant_send_same_campaign_twice(self) -> None:
sent_at = timezone.now()
record, _ = MessagingRecord.objects.get_or_create(raw_email="<EMAIL>", campaign_key="campaign_1")
record.sent_at = sent_at
record.save()
with self.settings(
EMAIL_HOST="localhost", CELERY_TASK_ALWAYS_EAGER=True,
):
_send_email(
campaign_key="campaign_1",
to=[{"raw_email": "<EMAIL>", "recipient": "Test Posthog <<EMAIL>>"}],
subject="Test email",
headers={},
)
self.assertEqual(len(mail.outbox), 0)
record.refresh_from_db()
self.assertEqual(record.sent_at, sent_at)
@freeze_time("2020-09-21")
def test_weekly_email_report(self) -> None:
record_count: int = MessagingRecord.objects.count()
expected_recipients: List[str] = ["<EMAIL>", "<EMAIL>"]
with self.settings(
EMAIL_HOST="localhost", SITE_URL="http://localhost:9999", CELERY_TASK_ALWAYS_EAGER=True,
):
send_weekly_email_reports()
self.assertSetEqual({",".join(outmail.to) for outmail in mail.outbox}, set(expected_recipients))
self.assertEqual(
mail.outbox[0].subject, "PostHog weekly report for Sep 14, 2020 to Sep 20",
)
self.assertEqual(
mail.outbox[0].body, "",
) # no plain-text version support yet
html_message = mail.outbox[0].alternatives[0][0] # type: ignore
self.assertIn(
"http://localhost:9999/static/posthog-logo.png", html_message,
) # absolute URLs are used
self.assertIn('style="font-weight: 600"', html_message) # CSS is inlined
self.assertIn(
"Your PostHog weekly report is ready! Your team had 6 active users last week! 🎉", html_message,
) # preheader
# Ensure records are properly saved to prevent duplicate emails
self.assertEqual(MessagingRecord.objects.count(), record_count + 2)
for email in expected_recipients:
email_hash = hashlib.sha256(f"{settings.SECRET_KEY}_{email}".encode()).hexdigest()
record = MessagingRecord.objects.get(
email_hash=email_hash, campaign_key=f"weekly_report_for_team_{self.team.pk}_on_2020-09-14",
)
self.assertTrue((timezone.now() - record.sent_at).total_seconds() < 5)
@patch("posthog.tasks.email.EmailMessage")
@freeze_time("2020-09-21")
def test_weekly_email_report_content(self, mock_email_message):
with self.settings(
EMAIL_HOST="localhost", CELERY_TASK_ALWAYS_EAGER=True,
):
send_weekly_email_reports()
self.assertEqual(
mock_email_message.call_args[1]["campaign_key"], f"weekly_report_for_team_{self.team.pk}_on_2020-09-14",
) # Campaign key
self.assertEqual(
mock_email_message.call_args[1]["subject"], "PostHog weekly report for Sep 14, 2020 to Sep 20",
) # Email subject
self.assertEqual(mock_email_message.call_args[1]["template_name"], "weekly_report")
template_context = mock_email_message.call_args[1]["template_context"]
self.assertEqual(template_context["team"], "The Bakery")
self.assertEqual(
template_context["period_start"], datetime.datetime(2020, 9, 14, tzinfo=pytz.UTC),
)
self.assertEqual(
template_context["period_end"], datetime.datetime(2020, 9, 20, 23, 59, 59, 999999, tzinfo=pytz.UTC),
)
self.assertEqual(
template_context["active_users"], 6,
)
self.assertEqual(
template_context["active_users_delta"], 0.5,
)
self.assertEqual(
round(template_context["user_distribution"]["new"], 2), 0.17,
)
self.assertEqual(
template_context["user_distribution"]["retained"], 0.5,
)
self.assertEqual(
round(template_context["user_distribution"]["resurrected"], 2), 0.33,
)
self.assertEqual(
template_context["churned_users"], {"abs": 1, "ratio": 0.25, "delta": None},
) | 0.65368 | 0.182972 |
from depsolver.errors \
import \
DepSolverError
from depsolver.constraints \
import \
Equal, GEQ, LEQ
from depsolver.requirement_parser \
import \
RawRequirementParser
from depsolver.version \
import \
MaxVersion, MinVersion, Version
V = Version.from_string
class Requirement(object):
"""Requirements instances represent a 'package requirement', that is a
package + version constraints.
Arguments
---------
name: str
Package name
specs: seq
Sequence of constraints
"""
@classmethod
def from_string(cls, requirement_string):
"""Creates a new Requirement from a requirement string.
Arguments
---------
requirement_string: str
The requirement string, e.g. 'numpy >= 1.3.0'
Examples
--------
# This creates a requirement that will match any version of numpy
>>> Requirement.from_string("numpy")
numpy *
# This creates a requirement that will only version of numpy >= 1.3.0
>>> Requirement.from_string("numpy >= 1.3.0")
numpy >= 1.3.0
"""
parser = RequirementParser()
requirements = parser.parse(requirement_string)
if len(requirements) != 1:
raise DepSolverError("Invalid requirement string %r" % requirement_string)
else:
return requirements[0]
def __init__(self, name, specs):
self.name = name
self._min_bound = MinVersion()
self._max_bound = MaxVersion()
# transform GE and LE into NOT + corresponding GEQ/LEQ
# Take the min of GEQ, max of LEQ
equals = [req for req in specs if isinstance(req, Equal)]
if len(equals) > 1:
self._cannot_match = True
self._equal = None
elif len(equals) == 1:
self._cannot_match = False
self._equal = V(equals[0].version)
self._min_bound = self._max_bound = self._equal
else:
self._cannot_match = False
self._equal = None
geq = [req for req in specs if isinstance(req, GEQ)]
geq_versions = [V(g.version) for g in geq]
if len(geq_versions) > 0:
self._min_bound = max(geq_versions)
leq = [req for req in specs if isinstance(req, LEQ)]
leq_versions = [V(l.version) for l in leq]
if len(leq_versions) > 0:
self._max_bound = min(leq_versions)
if self._min_bound > self._max_bound:
self._cannot_match = True
def __repr__(self):
r = []
if self._cannot_match:
r.append("%s None" % self.name)
elif self._equal:
r.append("%s == %s" % (self.name, self._equal))
else:
if self._min_bound != MinVersion():
r.append("%s >= %s" % (self.name, self._min_bound))
if self._max_bound != MaxVersion():
r.append("%s <= %s" % (self.name, self._max_bound))
if self._min_bound == MinVersion() and self._max_bound == MaxVersion():
r.append("%s *" % self.name)
return ", ".join(r)
def __eq__(self, other):
return repr(self) == repr(other)
def __hash__(self):
return hash(repr(self))
def matches(self, provider):
"""Return True if provider requirement and this requirement are
compatible.
Arguments
---------
provider: Requirement
The requirement to match
Examples
--------
>>> req = Requirement.from_string("numpy >= 1.3.0")
>>> req.matches(Requirement.from_string("numpy"))
True
>>> req.matches(Requirement.from_string("numpy >= 1.2.0"))
True
>>> req.matches(Requirement.from_string("numpy >= 1.4.0"))
True
"""
if self.name != provider.name:
return False
if self._cannot_match:
return False
if self._equal is None:
if provider._equal is None:
if self._min_bound > provider._min_bound:
return provider.matches(self)
else:
return self._max_bound >= provider._min_bound
else:
if provider._equal >= self._min_bound and provider._equal <= self._max_bound:
return True
else:
return False
else:
if provider._equal is not None:
return provider._equal == self._equal
else:
return provider.matches(self)
class RequirementParser(object):
def __init__(self):
self._parser = RawRequirementParser()
def iter_parse(self, requirement_string):
for distribution_name, specs in self._parser.parse(requirement_string).items():
yield Requirement(distribution_name, specs)
def parse(self, requirement_string):
return [r for r in self.iter_parse(requirement_string)] | depsolver/requirement.py | from depsolver.errors \
import \
DepSolverError
from depsolver.constraints \
import \
Equal, GEQ, LEQ
from depsolver.requirement_parser \
import \
RawRequirementParser
from depsolver.version \
import \
MaxVersion, MinVersion, Version
V = Version.from_string
class Requirement(object):
"""Requirements instances represent a 'package requirement', that is a
package + version constraints.
Arguments
---------
name: str
Package name
specs: seq
Sequence of constraints
"""
@classmethod
def from_string(cls, requirement_string):
"""Creates a new Requirement from a requirement string.
Arguments
---------
requirement_string: str
The requirement string, e.g. 'numpy >= 1.3.0'
Examples
--------
# This creates a requirement that will match any version of numpy
>>> Requirement.from_string("numpy")
numpy *
# This creates a requirement that will only version of numpy >= 1.3.0
>>> Requirement.from_string("numpy >= 1.3.0")
numpy >= 1.3.0
"""
parser = RequirementParser()
requirements = parser.parse(requirement_string)
if len(requirements) != 1:
raise DepSolverError("Invalid requirement string %r" % requirement_string)
else:
return requirements[0]
def __init__(self, name, specs):
self.name = name
self._min_bound = MinVersion()
self._max_bound = MaxVersion()
# transform GE and LE into NOT + corresponding GEQ/LEQ
# Take the min of GEQ, max of LEQ
equals = [req for req in specs if isinstance(req, Equal)]
if len(equals) > 1:
self._cannot_match = True
self._equal = None
elif len(equals) == 1:
self._cannot_match = False
self._equal = V(equals[0].version)
self._min_bound = self._max_bound = self._equal
else:
self._cannot_match = False
self._equal = None
geq = [req for req in specs if isinstance(req, GEQ)]
geq_versions = [V(g.version) for g in geq]
if len(geq_versions) > 0:
self._min_bound = max(geq_versions)
leq = [req for req in specs if isinstance(req, LEQ)]
leq_versions = [V(l.version) for l in leq]
if len(leq_versions) > 0:
self._max_bound = min(leq_versions)
if self._min_bound > self._max_bound:
self._cannot_match = True
def __repr__(self):
r = []
if self._cannot_match:
r.append("%s None" % self.name)
elif self._equal:
r.append("%s == %s" % (self.name, self._equal))
else:
if self._min_bound != MinVersion():
r.append("%s >= %s" % (self.name, self._min_bound))
if self._max_bound != MaxVersion():
r.append("%s <= %s" % (self.name, self._max_bound))
if self._min_bound == MinVersion() and self._max_bound == MaxVersion():
r.append("%s *" % self.name)
return ", ".join(r)
def __eq__(self, other):
return repr(self) == repr(other)
def __hash__(self):
return hash(repr(self))
def matches(self, provider):
"""Return True if provider requirement and this requirement are
compatible.
Arguments
---------
provider: Requirement
The requirement to match
Examples
--------
>>> req = Requirement.from_string("numpy >= 1.3.0")
>>> req.matches(Requirement.from_string("numpy"))
True
>>> req.matches(Requirement.from_string("numpy >= 1.2.0"))
True
>>> req.matches(Requirement.from_string("numpy >= 1.4.0"))
True
"""
if self.name != provider.name:
return False
if self._cannot_match:
return False
if self._equal is None:
if provider._equal is None:
if self._min_bound > provider._min_bound:
return provider.matches(self)
else:
return self._max_bound >= provider._min_bound
else:
if provider._equal >= self._min_bound and provider._equal <= self._max_bound:
return True
else:
return False
else:
if provider._equal is not None:
return provider._equal == self._equal
else:
return provider.matches(self)
class RequirementParser(object):
def __init__(self):
self._parser = RawRequirementParser()
def iter_parse(self, requirement_string):
for distribution_name, specs in self._parser.parse(requirement_string).items():
yield Requirement(distribution_name, specs)
def parse(self, requirement_string):
return [r for r in self.iter_parse(requirement_string)] | 0.816516 | 0.333557 |
import httplib
from json import loads, dumps
import types
import urllib
SERVER_ERROR = "err" # Indicates that a non-fatal error occurred on the server
# Usually means an invalid Wave ID
UNKNOWN_ERROR = "unk" # Indicates that an error occurred in the MindstormsyAPI
# Use MindstormsyClient.lastError to get a more detailed explanation about what went wrong
NO_ERROR = 0 # Everything's fine, stop being so paranoid!
CONNECTION_ERROR = 1 # An error occurred while connecting to the Wave robot
# Could mean a timeout, spelling mistake in the server name, no internet connection, etc.
REQUEST_ERROR = 2 # The connection to the server was successful, but a status code other than 200 OK was returned
# Or, something failed while reading the response from the server
INVALID_JSON_ERROR = 3 # The JSON returned from the server was invalid, and the parser failed
UNKNOWN_UNKNOWN_ERROR = 4 # Panic!
class MindstormsyClient():
"""The MindstormsyClient class enables you to fetch action strings from a Mindstormsy-Robot instance on the Google App Engine."""
def __init__(self, server="mindstormsy-robot.appspot.com", port=80):
"Initialises the object. Specify your own server (hostname only) if you're running a custom Mindstormsy-Robot instance (and custom port if it's running locally)."
self._server = server
self._port = 80
self.lastError = NO_ERROR
def poll(self, waveId, timeout):
"Polls the Wave robot. Specify the Wave ID (the user can obtain this through the Mindstormsy gadget in Google Wave) and connection timeout. The method will return the current action for the Wave ID as a string. If there was an error connecting or invalid data was fetched, then UNKNOWN_ERROR will be returned. If you receive an UNKNOWN_ERROR, then check MindstormsyClient.lastError for a more detailed explanation on what went wrong. Refer to the error codes in mindstormsyapi.py for more info. An error on the server (usually the Wave ID not existing in the database yet) will return SERVER_ERROR."
try:
conn = httplib.HTTPConnection(self._server, self._port, True, timeout)
conn.request("GET", "/?id=" + urllib.quote(waveId))
response = conn.getresponse()
except KeyboardInterrupt as e:
raise e
except:
self.lastError = CONNECTION_ERROR
return UNKNOWN_ERROR
try:
if response.status != 200:
self.lastError = REQUEST_ERROR
return UNKNOWN_ERROR
data = response.read()
conn.close()
except KeyboardInterrupt as e:
raise e
except:
self.lastError = REQUEST_ERROR
return UNKNOWN_ERROR
try:
action = loads(data)["action"]
self.lastError = NO_ERROR
return action
except KeyboardInterrupt as e:
raise e
except:
self.lastError = INVALID_JSON_ERROR
return UNKNOWN_ERROR
self.lastError = UNKNOWN_ERROR_ERROR
return UNKNOWN_ERROR
if __name__ == "__main__":
print "Import me using 'import mindstormsyapi'." | mindstormsy-client/mindstormsyapi.py | import httplib
from json import loads, dumps
import types
import urllib
SERVER_ERROR = "err" # Indicates that a non-fatal error occurred on the server
# Usually means an invalid Wave ID
UNKNOWN_ERROR = "unk" # Indicates that an error occurred in the MindstormsyAPI
# Use MindstormsyClient.lastError to get a more detailed explanation about what went wrong
NO_ERROR = 0 # Everything's fine, stop being so paranoid!
CONNECTION_ERROR = 1 # An error occurred while connecting to the Wave robot
# Could mean a timeout, spelling mistake in the server name, no internet connection, etc.
REQUEST_ERROR = 2 # The connection to the server was successful, but a status code other than 200 OK was returned
# Or, something failed while reading the response from the server
INVALID_JSON_ERROR = 3 # The JSON returned from the server was invalid, and the parser failed
UNKNOWN_UNKNOWN_ERROR = 4 # Panic!
class MindstormsyClient():
"""The MindstormsyClient class enables you to fetch action strings from a Mindstormsy-Robot instance on the Google App Engine."""
def __init__(self, server="mindstormsy-robot.appspot.com", port=80):
"Initialises the object. Specify your own server (hostname only) if you're running a custom Mindstormsy-Robot instance (and custom port if it's running locally)."
self._server = server
self._port = 80
self.lastError = NO_ERROR
def poll(self, waveId, timeout):
"Polls the Wave robot. Specify the Wave ID (the user can obtain this through the Mindstormsy gadget in Google Wave) and connection timeout. The method will return the current action for the Wave ID as a string. If there was an error connecting or invalid data was fetched, then UNKNOWN_ERROR will be returned. If you receive an UNKNOWN_ERROR, then check MindstormsyClient.lastError for a more detailed explanation on what went wrong. Refer to the error codes in mindstormsyapi.py for more info. An error on the server (usually the Wave ID not existing in the database yet) will return SERVER_ERROR."
try:
conn = httplib.HTTPConnection(self._server, self._port, True, timeout)
conn.request("GET", "/?id=" + urllib.quote(waveId))
response = conn.getresponse()
except KeyboardInterrupt as e:
raise e
except:
self.lastError = CONNECTION_ERROR
return UNKNOWN_ERROR
try:
if response.status != 200:
self.lastError = REQUEST_ERROR
return UNKNOWN_ERROR
data = response.read()
conn.close()
except KeyboardInterrupt as e:
raise e
except:
self.lastError = REQUEST_ERROR
return UNKNOWN_ERROR
try:
action = loads(data)["action"]
self.lastError = NO_ERROR
return action
except KeyboardInterrupt as e:
raise e
except:
self.lastError = INVALID_JSON_ERROR
return UNKNOWN_ERROR
self.lastError = UNKNOWN_ERROR_ERROR
return UNKNOWN_ERROR
if __name__ == "__main__":
print "Import me using 'import mindstormsyapi'." | 0.328314 | 0.140336 |
import numpy as np
from collections import OrderedDict
from .. import analyze
from ..objects import Signal
from ..enum import Freq
try:
from ..utils.ta1 import MACD, SMA
except:
from ..utils.ta import MACD, SMA
def get_s_single_k(c: analyze.CZSC, di: int = 1) -> OrderedDict:
"""获取倒数第i根K线的单K信号"""
if c.freq not in [Freq.D, Freq.W]:
return OrderedDict()
if len(c.bars_raw) < di:
return OrderedDict()
s = OrderedDict()
freq: Freq = c.freq
k1 = str(freq.value)
default_signals = [
Signal(k1=k1, k2=f"倒{di}K", k3="状态", v1="其他", v2='其他', v3='其他'),
]
for signal in default_signals:
s[signal.key] = signal.value
k = c.bars_raw[-di]
if k.close > k.open:
v = Signal(k1=k1, k2=f"倒{di}K", k3="状态", v1="上涨")
else:
v = Signal(k1=k1, k2=f"倒{di}K", k3="状态", v1="下跌")
s[v.key] = v.value
return s
def get_s_three_k(c: analyze.CZSC, di: int = 1) -> OrderedDict:
"""倒数第i根K线的三K信号
:param c: CZSC 对象
:param di: 最近一根K线为倒数第i根
:return: 信号字典
"""
assert di >= 1
freq: Freq = c.freq
k1 = str(freq.value)
k2 = f"倒{di}K"
s = OrderedDict()
v = Signal(k1=k1, k2=k2, k3="三K形态", v1="其他", v2='其他', v3='其他')
s[v.key] = v.value
if len(c.bars_ubi) < 3 + di:
return s
if di == 1:
tri = c.bars_ubi[-3:]
else:
tri = c.bars_ubi[-3 - di + 1:-di + 1]
if tri[0].high > tri[1].high < tri[2].high:
v = Signal(k1=k1, k2=k2, k3="三K形态", v1="底分型")
elif tri[0].high < tri[1].high < tri[2].high:
v = Signal(k1=k1, k2=k2, k3="三K形态", v1="向上走")
elif tri[0].high < tri[1].high > tri[2].high:
v = Signal(k1=k1, k2=k2, k3="三K形态", v1="顶分型")
elif tri[0].high > tri[1].high > tri[2].high:
v = Signal(k1=k1, k2=k2, k3="三K形态", v1="向下走")
else:
v = None
if v and "其他" not in v.value:
s[v.key] = v.value
return s
def get_s_macd(c: analyze.CZSC, di: int = 1) -> OrderedDict:
"""获取倒数第i根K线的MACD相关信号"""
freq: Freq = c.freq
s = OrderedDict()
k1 = str(freq.value)
k2 = f"倒{di}K"
default_signals = [
Signal(k1=k1, k2=k2, k3="DIF多空", v1="其他", v2='其他', v3='其他'),
Signal(k1=k1, k2=k2, k3="DIF方向", v1="其他", v2='其他', v3='其他'),
Signal(k1=k1, k2=k2, k3="DEA多空", v1="其他", v2='其他', v3='其他'),
Signal(k1=k1, k2=k2, k3="DEA方向", v1="其他", v2='其他', v3='其他'),
Signal(k1=k1, k2=k2, k3="MACD多空", v1="其他", v2='其他', v3='其他'),
Signal(k1=k1, k2=k2, k3="MACD方向", v1="其他", v2='其他', v3='其他'),
]
for signal in default_signals:
s[signal.key] = signal.value
if len(c.bars_raw) < 100:
return s
if di == 1:
close = np.array([x.close for x in c.bars_raw[-100:]])
else:
close = np.array([x.close for x in c.bars_raw[-100-di+1:-di+1]])
dif, dea, macd = MACD(close, fastperiod=12, slowperiod=26, signalperiod=9)
# DIF 多空信号
dif_base = sum([abs(dif[-2] - dif[-1]), abs(dif[-3] - dif[-2]), abs(dif[-4] - dif[-3])]) / 3
if dif[-1] > dif_base:
v = Signal(k1=k1, k2=k2, k3="DIF多空", v1="多头")
elif dif[-1] < -dif_base:
v = Signal(k1=k1, k2=k2, k3="DIF多空", v1="空头")
else:
v = Signal(k1=k1, k2=k2, k3="DIF多空", v1="模糊")
s[v.key] = v.value
if dif[-1] > dif[-2] > dif[-3]:
v = Signal(k1=k1, k2=k2, k3="DIF方向", v1="向上")
elif dif[-1] < dif[-2] < dif[-3]:
v = Signal(k1=k1, k2=k2, k3="DIF方向", v1="向下")
else:
v = Signal(k1=k1, k2=k2, k3="DIF方向", v1="模糊")
s[v.key] = v.value
# DEA 多空信号
dea_base = sum([abs(dea[-2] - dea[-1]), abs(dea[-3] - dea[-2]), abs(dea[-4] - dea[-3])]) / 3
if dea[-1] > dea_base:
v = Signal(k1=k1, k2=k2, k3="DEA多空", v1="多头")
elif dea[-1] < -dea_base:
v = Signal(k1=k1, k2=k2, k3="DEA多空", v1="空头")
else:
v = Signal(k1=k1, k2=k2, k3="DEA多空", v1="模糊")
s[v.key] = v.value
# DEA 方向信号
if dea[-1] > dea[-2]:
v = Signal(k1=k1, k2=k2, k3="DEA方向", v1="向上")
elif dea[-1] < dea[-2]:
v = Signal(k1=k1, k2=k2, k3="DEA方向", v1="向下")
else:
v = Signal(k1=k1, k2=k2, k3="DEA方向", v1="模糊")
s[v.key] = v.value
# MACD 多空信号
if macd[-1] >= 0:
v = Signal(k1=k1, k2=k2, k3="MACD多空", v1="多头")
else:
v = Signal(k1=k1, k2=k2, k3="MACD多空", v1="空头")
s[v.key] = v.value
# MACD 方向信号
if macd[-1] > macd[-2] > macd[-3]:
v = Signal(k1=k1, k2=k2, k3="MACD方向", v1="向上")
elif macd[-1] < macd[-2] < macd[-3]:
v = Signal(k1=k1, k2=k2, k3="MACD方向", v1="向下")
else:
v = Signal(k1=k1, k2=k2, k3="MACD方向", v1="模糊")
s[v.key] = v.value
return s
def get_s_sma(c: analyze.CZSC, di: int = 1, t_seq=(5, 10, 20, 60)) -> OrderedDict:
"""获取倒数第i根K线的SMA相关信号"""
freq: Freq = c.freq
s = OrderedDict()
k1 = str(freq.value)
k2 = f"倒{di}K"
for t in t_seq:
x1 = Signal(k1=k1, k2=k2, k3=f"SMA{t}多空", v1="其他", v2='其他', v3='其他')
x2 = Signal(k1=k1, k2=k2, k3=f"SMA{t}方向", v1="其他", v2='其他', v3='其他')
s[x1.key] = x1.value
s[x2.key] = x2.value
n = max(t_seq) + 10
if len(c.bars_raw) < n:
return s
if di == 1:
close = np.array([x.close for x in c.bars_raw[-n:]])
else:
close = np.array([x.close for x in c.bars_raw[-n-di+1:-di+1]])
for t in t_seq:
sma = SMA(close, timeperiod=t)
if close[-1] >= sma[-1]:
v1 = Signal(k1=k1, k2=k2, k3=f"SMA{t}多空", v1="多头")
else:
v1 = Signal(k1=k1, k2=k2, k3=f"SMA{t}多空", v1="空头")
s[v1.key] = v1.value
if sma[-1] >= sma[-2]:
v2 = Signal(k1=k1, k2=k2, k3=f"SMA{t}方向", v1="向上")
else:
v2 = Signal(k1=k1, k2=k2, k3=f"SMA{t}方向", v1="向下")
s[v2.key] = v2.value
return s | czsc/signals/ta.py | import numpy as np
from collections import OrderedDict
from .. import analyze
from ..objects import Signal
from ..enum import Freq
try:
from ..utils.ta1 import MACD, SMA
except:
from ..utils.ta import MACD, SMA
def get_s_single_k(c: analyze.CZSC, di: int = 1) -> OrderedDict:
"""获取倒数第i根K线的单K信号"""
if c.freq not in [Freq.D, Freq.W]:
return OrderedDict()
if len(c.bars_raw) < di:
return OrderedDict()
s = OrderedDict()
freq: Freq = c.freq
k1 = str(freq.value)
default_signals = [
Signal(k1=k1, k2=f"倒{di}K", k3="状态", v1="其他", v2='其他', v3='其他'),
]
for signal in default_signals:
s[signal.key] = signal.value
k = c.bars_raw[-di]
if k.close > k.open:
v = Signal(k1=k1, k2=f"倒{di}K", k3="状态", v1="上涨")
else:
v = Signal(k1=k1, k2=f"倒{di}K", k3="状态", v1="下跌")
s[v.key] = v.value
return s
def get_s_three_k(c: analyze.CZSC, di: int = 1) -> OrderedDict:
"""倒数第i根K线的三K信号
:param c: CZSC 对象
:param di: 最近一根K线为倒数第i根
:return: 信号字典
"""
assert di >= 1
freq: Freq = c.freq
k1 = str(freq.value)
k2 = f"倒{di}K"
s = OrderedDict()
v = Signal(k1=k1, k2=k2, k3="三K形态", v1="其他", v2='其他', v3='其他')
s[v.key] = v.value
if len(c.bars_ubi) < 3 + di:
return s
if di == 1:
tri = c.bars_ubi[-3:]
else:
tri = c.bars_ubi[-3 - di + 1:-di + 1]
if tri[0].high > tri[1].high < tri[2].high:
v = Signal(k1=k1, k2=k2, k3="三K形态", v1="底分型")
elif tri[0].high < tri[1].high < tri[2].high:
v = Signal(k1=k1, k2=k2, k3="三K形态", v1="向上走")
elif tri[0].high < tri[1].high > tri[2].high:
v = Signal(k1=k1, k2=k2, k3="三K形态", v1="顶分型")
elif tri[0].high > tri[1].high > tri[2].high:
v = Signal(k1=k1, k2=k2, k3="三K形态", v1="向下走")
else:
v = None
if v and "其他" not in v.value:
s[v.key] = v.value
return s
def get_s_macd(c: analyze.CZSC, di: int = 1) -> OrderedDict:
"""获取倒数第i根K线的MACD相关信号"""
freq: Freq = c.freq
s = OrderedDict()
k1 = str(freq.value)
k2 = f"倒{di}K"
default_signals = [
Signal(k1=k1, k2=k2, k3="DIF多空", v1="其他", v2='其他', v3='其他'),
Signal(k1=k1, k2=k2, k3="DIF方向", v1="其他", v2='其他', v3='其他'),
Signal(k1=k1, k2=k2, k3="DEA多空", v1="其他", v2='其他', v3='其他'),
Signal(k1=k1, k2=k2, k3="DEA方向", v1="其他", v2='其他', v3='其他'),
Signal(k1=k1, k2=k2, k3="MACD多空", v1="其他", v2='其他', v3='其他'),
Signal(k1=k1, k2=k2, k3="MACD方向", v1="其他", v2='其他', v3='其他'),
]
for signal in default_signals:
s[signal.key] = signal.value
if len(c.bars_raw) < 100:
return s
if di == 1:
close = np.array([x.close for x in c.bars_raw[-100:]])
else:
close = np.array([x.close for x in c.bars_raw[-100-di+1:-di+1]])
dif, dea, macd = MACD(close, fastperiod=12, slowperiod=26, signalperiod=9)
# DIF 多空信号
dif_base = sum([abs(dif[-2] - dif[-1]), abs(dif[-3] - dif[-2]), abs(dif[-4] - dif[-3])]) / 3
if dif[-1] > dif_base:
v = Signal(k1=k1, k2=k2, k3="DIF多空", v1="多头")
elif dif[-1] < -dif_base:
v = Signal(k1=k1, k2=k2, k3="DIF多空", v1="空头")
else:
v = Signal(k1=k1, k2=k2, k3="DIF多空", v1="模糊")
s[v.key] = v.value
if dif[-1] > dif[-2] > dif[-3]:
v = Signal(k1=k1, k2=k2, k3="DIF方向", v1="向上")
elif dif[-1] < dif[-2] < dif[-3]:
v = Signal(k1=k1, k2=k2, k3="DIF方向", v1="向下")
else:
v = Signal(k1=k1, k2=k2, k3="DIF方向", v1="模糊")
s[v.key] = v.value
# DEA 多空信号
dea_base = sum([abs(dea[-2] - dea[-1]), abs(dea[-3] - dea[-2]), abs(dea[-4] - dea[-3])]) / 3
if dea[-1] > dea_base:
v = Signal(k1=k1, k2=k2, k3="DEA多空", v1="多头")
elif dea[-1] < -dea_base:
v = Signal(k1=k1, k2=k2, k3="DEA多空", v1="空头")
else:
v = Signal(k1=k1, k2=k2, k3="DEA多空", v1="模糊")
s[v.key] = v.value
# DEA 方向信号
if dea[-1] > dea[-2]:
v = Signal(k1=k1, k2=k2, k3="DEA方向", v1="向上")
elif dea[-1] < dea[-2]:
v = Signal(k1=k1, k2=k2, k3="DEA方向", v1="向下")
else:
v = Signal(k1=k1, k2=k2, k3="DEA方向", v1="模糊")
s[v.key] = v.value
# MACD 多空信号
if macd[-1] >= 0:
v = Signal(k1=k1, k2=k2, k3="MACD多空", v1="多头")
else:
v = Signal(k1=k1, k2=k2, k3="MACD多空", v1="空头")
s[v.key] = v.value
# MACD 方向信号
if macd[-1] > macd[-2] > macd[-3]:
v = Signal(k1=k1, k2=k2, k3="MACD方向", v1="向上")
elif macd[-1] < macd[-2] < macd[-3]:
v = Signal(k1=k1, k2=k2, k3="MACD方向", v1="向下")
else:
v = Signal(k1=k1, k2=k2, k3="MACD方向", v1="模糊")
s[v.key] = v.value
return s
def get_s_sma(c: analyze.CZSC, di: int = 1, t_seq=(5, 10, 20, 60)) -> OrderedDict:
"""获取倒数第i根K线的SMA相关信号"""
freq: Freq = c.freq
s = OrderedDict()
k1 = str(freq.value)
k2 = f"倒{di}K"
for t in t_seq:
x1 = Signal(k1=k1, k2=k2, k3=f"SMA{t}多空", v1="其他", v2='其他', v3='其他')
x2 = Signal(k1=k1, k2=k2, k3=f"SMA{t}方向", v1="其他", v2='其他', v3='其他')
s[x1.key] = x1.value
s[x2.key] = x2.value
n = max(t_seq) + 10
if len(c.bars_raw) < n:
return s
if di == 1:
close = np.array([x.close for x in c.bars_raw[-n:]])
else:
close = np.array([x.close for x in c.bars_raw[-n-di+1:-di+1]])
for t in t_seq:
sma = SMA(close, timeperiod=t)
if close[-1] >= sma[-1]:
v1 = Signal(k1=k1, k2=k2, k3=f"SMA{t}多空", v1="多头")
else:
v1 = Signal(k1=k1, k2=k2, k3=f"SMA{t}多空", v1="空头")
s[v1.key] = v1.value
if sma[-1] >= sma[-2]:
v2 = Signal(k1=k1, k2=k2, k3=f"SMA{t}方向", v1="向上")
else:
v2 = Signal(k1=k1, k2=k2, k3=f"SMA{t}方向", v1="向下")
s[v2.key] = v2.value
return s | 0.318697 | 0.355495 |
import datetime
from .resources import (
LINK_LIST_URL_TEMPLATE,
CURRENT_YEAR,
DATE_FMT,
ALL_SOURCES,
)
def string_safe_list(obj):
"""
Turn an (iterable) object into a list. If it is a string or not
iterable, put the whole object into a list of length 1.
:param obj:
:return list:
"""
if isinstance(obj, str) or not hasattr(obj, "__iter__"):
return [obj]
else:
return list(obj)
def countries_from_summary(summary):
"""
Get the list of unique countries from the summary.
:param list[dict] summary: The E1a summary.
:return list[str]: The available countries.
"""
return list({d["ct"] for d in summary})
def pollutants_from_summary(summary):
"""
Get the list of unique pollutants from the summary.
:param list[dict] summary: The E1a summary.
:return dict: The available pollutants, with name ("pl") as key
and pollutant number ("shortpl") as value.
"""
return {d["pl"]: d["shortpl"] for d in summary}
def pollutants_per_country(summary):
"""
Get the available pollutants per country from the summary.
:param list[dict] summary: The E1a summary.
:return dict[list[dict]]: All available pollutants per country.
"""
output = dict()
for d in summary.copy():
country = d.pop("ct")
if country in output:
output[country].append(d)
else:
output[country] = [d]
return output
def link_list_url(
country,
shortpl=None,
year_from="2013",
year_to=CURRENT_YEAR,
source="All",
update_date=None,
):
"""
Generate the URL where the download links for a query can be found.
:param str country: The 2-letter country code. See
AirbaseClient.countries for options.
:param str shortpl: (optional) The pollutant number. Leave blank to
get all pollutants. See AirbaseClient.pollutants_per_country for
options.
:param str year_from: (optional) The first year of data. Can not be
earlier than 2013. Default 2013.
:param str year_to: (optional) The last year of data. Can not be
later than the current year. Default <current year>.
:param str source: (optional) One of "E1a", "E2a" or "All". E2a
(UTD) data are only available for years where E1a data have not
yet been delivered (this will normally be the most recent year).
Default "All".
:param str|datetime update_date: (optional). Format
"yyyy-mm-dd hh:mm:ss". To be used when only files created or
updated after a certain date is of interest.
:return str: The URL which will yield the list of relevant CSV
download links.
"""
shortpl = shortpl or ""
if int(year_from) < 2013:
raise ValueError("'year_from' must be at least 2013")
year_from = str(int(year_from))
if int(year_to) > int(CURRENT_YEAR):
raise ValueError("'year_to' must be at most " + str(CURRENT_YEAR))
year_to = str(int(year_to))
if isinstance(update_date, datetime.datetime):
update_date = update_date.strftime(DATE_FMT)
update_date = update_date or ""
if source is not None and source not in ALL_SOURCES:
raise ValueError("'source' must be one of: " + ",".join(ALL_SOURCES))
source = source or ""
return LINK_LIST_URL_TEMPLATE.format(
country=country,
shortpl=shortpl,
year_from=year_from,
year_to=year_to,
source=source,
update_date=update_date,
)
def extract_csv_links(text):
"""Get a list of csv links from the download link response text"""
links = text.replace("\r", "").split("\n")
links.remove("")
return links | airbase/util.py |
import datetime
from .resources import (
LINK_LIST_URL_TEMPLATE,
CURRENT_YEAR,
DATE_FMT,
ALL_SOURCES,
)
def string_safe_list(obj):
"""
Turn an (iterable) object into a list. If it is a string or not
iterable, put the whole object into a list of length 1.
:param obj:
:return list:
"""
if isinstance(obj, str) or not hasattr(obj, "__iter__"):
return [obj]
else:
return list(obj)
def countries_from_summary(summary):
"""
Get the list of unique countries from the summary.
:param list[dict] summary: The E1a summary.
:return list[str]: The available countries.
"""
return list({d["ct"] for d in summary})
def pollutants_from_summary(summary):
"""
Get the list of unique pollutants from the summary.
:param list[dict] summary: The E1a summary.
:return dict: The available pollutants, with name ("pl") as key
and pollutant number ("shortpl") as value.
"""
return {d["pl"]: d["shortpl"] for d in summary}
def pollutants_per_country(summary):
"""
Get the available pollutants per country from the summary.
:param list[dict] summary: The E1a summary.
:return dict[list[dict]]: All available pollutants per country.
"""
output = dict()
for d in summary.copy():
country = d.pop("ct")
if country in output:
output[country].append(d)
else:
output[country] = [d]
return output
def link_list_url(
country,
shortpl=None,
year_from="2013",
year_to=CURRENT_YEAR,
source="All",
update_date=None,
):
"""
Generate the URL where the download links for a query can be found.
:param str country: The 2-letter country code. See
AirbaseClient.countries for options.
:param str shortpl: (optional) The pollutant number. Leave blank to
get all pollutants. See AirbaseClient.pollutants_per_country for
options.
:param str year_from: (optional) The first year of data. Can not be
earlier than 2013. Default 2013.
:param str year_to: (optional) The last year of data. Can not be
later than the current year. Default <current year>.
:param str source: (optional) One of "E1a", "E2a" or "All". E2a
(UTD) data are only available for years where E1a data have not
yet been delivered (this will normally be the most recent year).
Default "All".
:param str|datetime update_date: (optional). Format
"yyyy-mm-dd hh:mm:ss". To be used when only files created or
updated after a certain date is of interest.
:return str: The URL which will yield the list of relevant CSV
download links.
"""
shortpl = shortpl or ""
if int(year_from) < 2013:
raise ValueError("'year_from' must be at least 2013")
year_from = str(int(year_from))
if int(year_to) > int(CURRENT_YEAR):
raise ValueError("'year_to' must be at most " + str(CURRENT_YEAR))
year_to = str(int(year_to))
if isinstance(update_date, datetime.datetime):
update_date = update_date.strftime(DATE_FMT)
update_date = update_date or ""
if source is not None and source not in ALL_SOURCES:
raise ValueError("'source' must be one of: " + ",".join(ALL_SOURCES))
source = source or ""
return LINK_LIST_URL_TEMPLATE.format(
country=country,
shortpl=shortpl,
year_from=year_from,
year_to=year_to,
source=source,
update_date=update_date,
)
def extract_csv_links(text):
"""Get a list of csv links from the download link response text"""
links = text.replace("\r", "").split("\n")
links.remove("")
return links | 0.682574 | 0.396711 |
import os
import socket
from mock import mock
from pyngrok import ngrok, installer, conf
from pyngrok.exception import PyngrokNgrokInstallError, PyngrokSecurityError, PyngrokError
from .testcase import NgrokTestCase
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, <NAME>"
__version__ = "4.1.0"
class TestInstaller(NgrokTestCase):
def test_installer(self):
# GIVEN
if os.path.exists(conf.DEFAULT_NGROK_PATH):
os.remove(conf.DEFAULT_NGROK_PATH)
self.assertFalse(os.path.exists(conf.DEFAULT_NGROK_PATH))
# WHEN
ngrok.connect(pyngrok_config=self.pyngrok_config)
# THEN
self.assertTrue(os.path.exists(conf.DEFAULT_NGROK_PATH))
def test_config_provisioned(self):
# GIVEN
if os.path.exists(self.pyngrok_config.config_path):
os.remove(self.pyngrok_config.config_path)
self.assertFalse(os.path.exists(self.pyngrok_config.config_path))
# WHEN
ngrok.connect(pyngrok_config=self.pyngrok_config)
# THEN
self.assertTrue(os.path.exists(self.pyngrok_config.config_path))
@mock.patch("pyngrok.installer.urlopen")
def test_installer_download_fails(self, mock_urlopen):
# GIVEN
magic_mock = mock.MagicMock()
magic_mock.getcode.return_value = 500
mock_urlopen.return_value = magic_mock
if os.path.exists(conf.DEFAULT_NGROK_PATH):
os.remove(conf.DEFAULT_NGROK_PATH)
self.assertFalse(os.path.exists(conf.DEFAULT_NGROK_PATH))
# WHEN
with self.assertRaises(PyngrokNgrokInstallError):
ngrok.connect(pyngrok_config=self.pyngrok_config)
# THEN
self.assertFalse(os.path.exists(conf.DEFAULT_NGROK_PATH))
@mock.patch("pyngrok.installer.urlopen")
def test_installer_retry(self, mock_urlopen):
# GIVEN
mock_urlopen.side_effect = socket.timeout("The read operation timed out")
if os.path.exists(conf.DEFAULT_NGROK_PATH):
os.remove(conf.DEFAULT_NGROK_PATH)
self.assertFalse(os.path.exists(conf.DEFAULT_NGROK_PATH))
# WHEN
with self.assertRaises(PyngrokNgrokInstallError):
ngrok.connect(pyngrok_config=self.pyngrok_config)
# THEN
self.assertEqual(mock_urlopen.call_count, 2)
self.assertFalse(os.path.exists(conf.DEFAULT_NGROK_PATH))
def test_download_file_security_error(self):
# WHEN
with self.assertRaises(PyngrokSecurityError):
installer._download_file("file:{}".format(__file__), retries=10)
def test_web_addr_false_not_allowed(self):
# WHEN
with self.assertRaises(PyngrokError):
installer.install_default_config(self.pyngrok_config.config_path, {"web_addr": False}) | tests/test_installer.py | import os
import socket
from mock import mock
from pyngrok import ngrok, installer, conf
from pyngrok.exception import PyngrokNgrokInstallError, PyngrokSecurityError, PyngrokError
from .testcase import NgrokTestCase
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, <NAME>"
__version__ = "4.1.0"
class TestInstaller(NgrokTestCase):
def test_installer(self):
# GIVEN
if os.path.exists(conf.DEFAULT_NGROK_PATH):
os.remove(conf.DEFAULT_NGROK_PATH)
self.assertFalse(os.path.exists(conf.DEFAULT_NGROK_PATH))
# WHEN
ngrok.connect(pyngrok_config=self.pyngrok_config)
# THEN
self.assertTrue(os.path.exists(conf.DEFAULT_NGROK_PATH))
def test_config_provisioned(self):
# GIVEN
if os.path.exists(self.pyngrok_config.config_path):
os.remove(self.pyngrok_config.config_path)
self.assertFalse(os.path.exists(self.pyngrok_config.config_path))
# WHEN
ngrok.connect(pyngrok_config=self.pyngrok_config)
# THEN
self.assertTrue(os.path.exists(self.pyngrok_config.config_path))
@mock.patch("pyngrok.installer.urlopen")
def test_installer_download_fails(self, mock_urlopen):
# GIVEN
magic_mock = mock.MagicMock()
magic_mock.getcode.return_value = 500
mock_urlopen.return_value = magic_mock
if os.path.exists(conf.DEFAULT_NGROK_PATH):
os.remove(conf.DEFAULT_NGROK_PATH)
self.assertFalse(os.path.exists(conf.DEFAULT_NGROK_PATH))
# WHEN
with self.assertRaises(PyngrokNgrokInstallError):
ngrok.connect(pyngrok_config=self.pyngrok_config)
# THEN
self.assertFalse(os.path.exists(conf.DEFAULT_NGROK_PATH))
@mock.patch("pyngrok.installer.urlopen")
def test_installer_retry(self, mock_urlopen):
# GIVEN
mock_urlopen.side_effect = socket.timeout("The read operation timed out")
if os.path.exists(conf.DEFAULT_NGROK_PATH):
os.remove(conf.DEFAULT_NGROK_PATH)
self.assertFalse(os.path.exists(conf.DEFAULT_NGROK_PATH))
# WHEN
with self.assertRaises(PyngrokNgrokInstallError):
ngrok.connect(pyngrok_config=self.pyngrok_config)
# THEN
self.assertEqual(mock_urlopen.call_count, 2)
self.assertFalse(os.path.exists(conf.DEFAULT_NGROK_PATH))
def test_download_file_security_error(self):
# WHEN
with self.assertRaises(PyngrokSecurityError):
installer._download_file("file:{}".format(__file__), retries=10)
def test_web_addr_false_not_allowed(self):
# WHEN
with self.assertRaises(PyngrokError):
installer.install_default_config(self.pyngrok_config.config_path, {"web_addr": False}) | 0.336876 | 0.159217 |
from django.db.backends.signals import connection_created
from django.db.migrations.writer import MigrationWriter
from django.test.utils import modify_settings
from . import PostgreSQLTestCase
try:
from psycopg2.extras import (
DateRange,
DateTimeRange,
DateTimeTZRange,
NumericRange,
)
from django.contrib.postgres.fields import (
DateRangeField,
DateTimeRangeField,
IntegerRangeField,
)
except ImportError:
pass
class PostgresConfigTests(PostgreSQLTestCase):
def test_register_type_handlers_connection(self):
from django.contrib.postgres.signals import register_type_handlers
self.assertNotIn(
register_type_handlers, connection_created._live_receivers(None)
)
with modify_settings(INSTALLED_APPS={"append": "django.contrib.postgres"}):
self.assertIn(
register_type_handlers, connection_created._live_receivers(None)
)
self.assertNotIn(
register_type_handlers, connection_created._live_receivers(None)
)
def test_register_serializer_for_migrations(self):
tests = (
(DateRange(empty=True), DateRangeField),
(DateTimeRange(empty=True), DateRangeField),
(DateTimeTZRange(None, None, "[]"), DateTimeRangeField),
(NumericRange(1, 10), IntegerRangeField),
)
def assertNotSerializable():
for default, test_field in tests:
with self.subTest(default=default):
field = test_field(default=default)
with self.assertRaisesMessage(
ValueError, "Cannot serialize: %s" % default.__class__.__name__
):
MigrationWriter.serialize(field)
assertNotSerializable()
with self.modify_settings(INSTALLED_APPS={"append": "django.contrib.postgres"}):
for default, test_field in tests:
with self.subTest(default=default):
field = test_field(default=default)
serialized_field, imports = MigrationWriter.serialize(field)
self.assertEqual(
imports,
{
"import django.contrib.postgres.fields.ranges",
"import psycopg2.extras",
},
)
self.assertIn(
"%s.%s(default=psycopg2.extras.%r)"
% (
field.__module__,
field.__class__.__name__,
default,
),
serialized_field,
)
assertNotSerializable() | tests/postgres_tests/test_apps.py | from django.db.backends.signals import connection_created
from django.db.migrations.writer import MigrationWriter
from django.test.utils import modify_settings
from . import PostgreSQLTestCase
try:
from psycopg2.extras import (
DateRange,
DateTimeRange,
DateTimeTZRange,
NumericRange,
)
from django.contrib.postgres.fields import (
DateRangeField,
DateTimeRangeField,
IntegerRangeField,
)
except ImportError:
pass
class PostgresConfigTests(PostgreSQLTestCase):
def test_register_type_handlers_connection(self):
from django.contrib.postgres.signals import register_type_handlers
self.assertNotIn(
register_type_handlers, connection_created._live_receivers(None)
)
with modify_settings(INSTALLED_APPS={"append": "django.contrib.postgres"}):
self.assertIn(
register_type_handlers, connection_created._live_receivers(None)
)
self.assertNotIn(
register_type_handlers, connection_created._live_receivers(None)
)
def test_register_serializer_for_migrations(self):
tests = (
(DateRange(empty=True), DateRangeField),
(DateTimeRange(empty=True), DateRangeField),
(DateTimeTZRange(None, None, "[]"), DateTimeRangeField),
(NumericRange(1, 10), IntegerRangeField),
)
def assertNotSerializable():
for default, test_field in tests:
with self.subTest(default=default):
field = test_field(default=default)
with self.assertRaisesMessage(
ValueError, "Cannot serialize: %s" % default.__class__.__name__
):
MigrationWriter.serialize(field)
assertNotSerializable()
with self.modify_settings(INSTALLED_APPS={"append": "django.contrib.postgres"}):
for default, test_field in tests:
with self.subTest(default=default):
field = test_field(default=default)
serialized_field, imports = MigrationWriter.serialize(field)
self.assertEqual(
imports,
{
"import django.contrib.postgres.fields.ranges",
"import psycopg2.extras",
},
)
self.assertIn(
"%s.%s(default=psycopg2.extras.%r)"
% (
field.__module__,
field.__class__.__name__,
default,
),
serialized_field,
)
assertNotSerializable() | 0.477554 | 0.204144 |
import collections
from collections import OrderedDict
import copy
import pandas as pd
import numpy as np
from datetime import date, timedelta
# Set the float display format
pd.options.display.float_format = '{:.8f}'.format
# DO NOT MODIFY - THE CODE BELOW CONTAINS HELPER CODE TO TEST YOUR PROJECT
def _generate_output_error_msg(fn_name, fn_inputs, fn_outputs, fn_expected_outputs):
formatted_inputs = []
formatted_outputs = []
formatted_expected_outputs = []
for input_name, input_value in fn_inputs.items():
formatted_outputs.append('INPUT {}:\n{}\n'.format(
input_name, str(input_value)))
for output_name, output_value in fn_outputs.items():
formatted_outputs.append('OUTPUT {}:\n{}\n'.format(
output_name, str(output_value)))
for expected_output_name, expected_output_value in fn_expected_outputs.items():
formatted_expected_outputs.append('EXPECTED OUTPUT FOR {}:\n{}\n'.format(
expected_output_name, str(expected_output_value)))
return 'Wrong value for {}.\n' \
'{}\n' \
'{}\n' \
'{}' \
.format(
fn_name,
'\n'.join(formatted_inputs),
'\n'.join(formatted_outputs),
'\n'.join(formatted_expected_outputs))
def _is_equal(x, y):
is_equal = False
if isinstance(x, pd.DataFrame) or isinstance(y, pd.Series):
is_equal = x.equals(y)
elif isinstance(x, np.ndarray):
is_equal = np.array_equal(x, y)
elif isinstance(x, list):
if len(x) == len(y):
for x_item, y_item in zip(x, y):
if not _is_equal(x_item, y_item):
break
else:
is_equal = True
else:
is_equal = x == y
return is_equal
def project_test(func):
def func_wrapper(*args):
result = func(*args)
print('Tests Passed')
return result
return func_wrapper
def generate_random_tickers(n_tickers=None):
min_ticker_len = 3
max_ticker_len = 5
tickers = []
if not n_tickers:
n_tickers = np.random.randint(8, 14)
ticker_symbol_random = np.random.randint(ord('A'), ord('Z')+1, (n_tickers, max_ticker_len))
ticker_symbol_lengths = np.random.randint(min_ticker_len, max_ticker_len, n_tickers)
for ticker_symbol_rand, ticker_symbol_length in zip(ticker_symbol_random, ticker_symbol_lengths):
ticker_symbol = ''.join([chr(c_id) for c_id in ticker_symbol_rand[:ticker_symbol_length]])
tickers.append(ticker_symbol)
return tickers
def generate_random_dates(n_days=None):
if not n_days:
n_days = np.random.randint(14, 20)
start_year = np.random.randint(1999, 2017)
start_month = np.random.randint(1, 12)
start_day = np.random.randint(1, 29)
start_date = date(start_year, start_month, start_day)
dates = []
for i in range(n_days):
dates.append(start_date + timedelta(days=i))
return dates
def assert_structure(received_obj, expected_obj, obj_name):
assert isinstance(received_obj, type(expected_obj)), \
'Wrong type for output {}. Got {}, expected {}'.format(obj_name, type(received_obj), type(expected_obj))
if hasattr(expected_obj, 'shape'):
assert received_obj.shape == expected_obj.shape, \
'Wrong shape for output {}. Got {}, expected {}'.format(obj_name, received_obj.shape, expected_obj.shape)
elif hasattr(expected_obj, '__len__'):
assert len(received_obj) == len(expected_obj), \
'Wrong len for output {}. Got {}, expected {}'.format(obj_name, len(received_obj), len(expected_obj))
if type(expected_obj) == pd.DataFrame:
assert set(received_obj.columns) == set(expected_obj.columns), \
'Incorrect columns for output {}\n' \
'COLUMNS: {}\n' \
'EXPECTED COLUMNS: {}'.format(obj_name, sorted(received_obj.columns), sorted(expected_obj.columns))
# This is to catch a case where __equal__ says it's equal between different types
assert set([type(i) for i in received_obj.columns]) == set([type(i) for i in expected_obj.columns]), \
'Incorrect types in columns for output {}\n' \
'COLUMNS: {}\n' \
'EXPECTED COLUMNS: {}'.format(obj_name, sorted(received_obj.columns), sorted(expected_obj.columns))
for column in expected_obj.columns:
assert received_obj[column].dtype == expected_obj[column].dtype, \
'Incorrect type for output {}, column {}\n' \
'Type: {}\n' \
'EXPECTED Type: {}'.format(obj_name, column, received_obj[column].dtype, expected_obj[column].dtype)
if type(expected_obj) in {pd.DataFrame, pd.Series}:
assert set(received_obj.index) == set(expected_obj.index), \
'Incorrect indices for output {}\n' \
'INDICES: {}\n' \
'EXPECTED INDICES: {}'.format(obj_name, sorted(received_obj.index), sorted(expected_obj.index))
# This is to catch a case where __equal__ says it's equal between different types
assert set([type(i) for i in received_obj.index]) == set([type(i) for i in expected_obj.index]), \
'Incorrect types in indices for output {}\n' \
'INDICES: {}\n' \
'EXPECTED INDICES: {}'.format(obj_name, sorted(received_obj.index), sorted(expected_obj.index))
def does_data_match(obj_a, obj_b):
if type(obj_a) == pd.DataFrame:
# Sort Columns
obj_b = obj_b.sort_index(1)
obj_a = obj_a.sort_index(1)
if type(obj_a) in {pd.DataFrame, pd.Series}:
# Sort Indices
obj_b = obj_b.sort_index()
obj_a = obj_a.sort_index()
try:
data_is_close = np.isclose(obj_b, obj_a, equal_nan=True)
except TypeError:
data_is_close = obj_b == obj_a
else:
if isinstance(obj_a, collections.Iterable):
data_is_close = data_is_close.all()
return data_is_close
def assert_output(fn, fn_inputs, fn_expected_outputs, check_parameter_changes=True):
assert type(fn_expected_outputs) == OrderedDict
if check_parameter_changes:
fn_inputs_passed_in = copy.deepcopy(fn_inputs)
else:
fn_inputs_passed_in = fn_inputs
fn_raw_out = fn(**fn_inputs_passed_in)
# Check if inputs have changed
if check_parameter_changes:
for input_name, input_value in fn_inputs.items():
passed_in_unchanged = _is_equal(input_value, fn_inputs_passed_in[input_name])
assert passed_in_unchanged, 'Input parameter "{}" has been modified inside the function. ' \
'The function shouldn\'t modify the function parameters.'.format(input_name)
fn_outputs = OrderedDict()
if len(fn_expected_outputs) == 1:
fn_outputs[list(fn_expected_outputs)[0]] = fn_raw_out
elif len(fn_expected_outputs) > 1:
assert type(fn_raw_out) == tuple,\
'Expecting function to return tuple, got type {}'.format(type(fn_raw_out))
assert len(fn_raw_out) == len(fn_expected_outputs),\
'Expected {} outputs in tuple, only found {} outputs'.format(len(fn_expected_outputs), len(fn_raw_out))
for key_i, output_key in enumerate(fn_expected_outputs.keys()):
fn_outputs[output_key] = fn_raw_out[key_i]
err_message = _generate_output_error_msg(
fn.__name__,
fn_inputs,
fn_outputs,
fn_expected_outputs)
for fn_out, (out_name, expected_out) in zip(fn_outputs.values(), fn_expected_outputs.items()):
assert_structure(fn_out, expected_out, out_name)
correct_data = does_data_match(expected_out, fn_out)
assert correct_data, err_message | tests.py | import collections
from collections import OrderedDict
import copy
import pandas as pd
import numpy as np
from datetime import date, timedelta
# Set the float display format
pd.options.display.float_format = '{:.8f}'.format
# DO NOT MODIFY - THE CODE BELOW CONTAINS HELPER CODE TO TEST YOUR PROJECT
def _generate_output_error_msg(fn_name, fn_inputs, fn_outputs, fn_expected_outputs):
formatted_inputs = []
formatted_outputs = []
formatted_expected_outputs = []
for input_name, input_value in fn_inputs.items():
formatted_outputs.append('INPUT {}:\n{}\n'.format(
input_name, str(input_value)))
for output_name, output_value in fn_outputs.items():
formatted_outputs.append('OUTPUT {}:\n{}\n'.format(
output_name, str(output_value)))
for expected_output_name, expected_output_value in fn_expected_outputs.items():
formatted_expected_outputs.append('EXPECTED OUTPUT FOR {}:\n{}\n'.format(
expected_output_name, str(expected_output_value)))
return 'Wrong value for {}.\n' \
'{}\n' \
'{}\n' \
'{}' \
.format(
fn_name,
'\n'.join(formatted_inputs),
'\n'.join(formatted_outputs),
'\n'.join(formatted_expected_outputs))
def _is_equal(x, y):
is_equal = False
if isinstance(x, pd.DataFrame) or isinstance(y, pd.Series):
is_equal = x.equals(y)
elif isinstance(x, np.ndarray):
is_equal = np.array_equal(x, y)
elif isinstance(x, list):
if len(x) == len(y):
for x_item, y_item in zip(x, y):
if not _is_equal(x_item, y_item):
break
else:
is_equal = True
else:
is_equal = x == y
return is_equal
def project_test(func):
def func_wrapper(*args):
result = func(*args)
print('Tests Passed')
return result
return func_wrapper
def generate_random_tickers(n_tickers=None):
min_ticker_len = 3
max_ticker_len = 5
tickers = []
if not n_tickers:
n_tickers = np.random.randint(8, 14)
ticker_symbol_random = np.random.randint(ord('A'), ord('Z')+1, (n_tickers, max_ticker_len))
ticker_symbol_lengths = np.random.randint(min_ticker_len, max_ticker_len, n_tickers)
for ticker_symbol_rand, ticker_symbol_length in zip(ticker_symbol_random, ticker_symbol_lengths):
ticker_symbol = ''.join([chr(c_id) for c_id in ticker_symbol_rand[:ticker_symbol_length]])
tickers.append(ticker_symbol)
return tickers
def generate_random_dates(n_days=None):
if not n_days:
n_days = np.random.randint(14, 20)
start_year = np.random.randint(1999, 2017)
start_month = np.random.randint(1, 12)
start_day = np.random.randint(1, 29)
start_date = date(start_year, start_month, start_day)
dates = []
for i in range(n_days):
dates.append(start_date + timedelta(days=i))
return dates
def assert_structure(received_obj, expected_obj, obj_name):
assert isinstance(received_obj, type(expected_obj)), \
'Wrong type for output {}. Got {}, expected {}'.format(obj_name, type(received_obj), type(expected_obj))
if hasattr(expected_obj, 'shape'):
assert received_obj.shape == expected_obj.shape, \
'Wrong shape for output {}. Got {}, expected {}'.format(obj_name, received_obj.shape, expected_obj.shape)
elif hasattr(expected_obj, '__len__'):
assert len(received_obj) == len(expected_obj), \
'Wrong len for output {}. Got {}, expected {}'.format(obj_name, len(received_obj), len(expected_obj))
if type(expected_obj) == pd.DataFrame:
assert set(received_obj.columns) == set(expected_obj.columns), \
'Incorrect columns for output {}\n' \
'COLUMNS: {}\n' \
'EXPECTED COLUMNS: {}'.format(obj_name, sorted(received_obj.columns), sorted(expected_obj.columns))
# This is to catch a case where __equal__ says it's equal between different types
assert set([type(i) for i in received_obj.columns]) == set([type(i) for i in expected_obj.columns]), \
'Incorrect types in columns for output {}\n' \
'COLUMNS: {}\n' \
'EXPECTED COLUMNS: {}'.format(obj_name, sorted(received_obj.columns), sorted(expected_obj.columns))
for column in expected_obj.columns:
assert received_obj[column].dtype == expected_obj[column].dtype, \
'Incorrect type for output {}, column {}\n' \
'Type: {}\n' \
'EXPECTED Type: {}'.format(obj_name, column, received_obj[column].dtype, expected_obj[column].dtype)
if type(expected_obj) in {pd.DataFrame, pd.Series}:
assert set(received_obj.index) == set(expected_obj.index), \
'Incorrect indices for output {}\n' \
'INDICES: {}\n' \
'EXPECTED INDICES: {}'.format(obj_name, sorted(received_obj.index), sorted(expected_obj.index))
# This is to catch a case where __equal__ says it's equal between different types
assert set([type(i) for i in received_obj.index]) == set([type(i) for i in expected_obj.index]), \
'Incorrect types in indices for output {}\n' \
'INDICES: {}\n' \
'EXPECTED INDICES: {}'.format(obj_name, sorted(received_obj.index), sorted(expected_obj.index))
def does_data_match(obj_a, obj_b):
if type(obj_a) == pd.DataFrame:
# Sort Columns
obj_b = obj_b.sort_index(1)
obj_a = obj_a.sort_index(1)
if type(obj_a) in {pd.DataFrame, pd.Series}:
# Sort Indices
obj_b = obj_b.sort_index()
obj_a = obj_a.sort_index()
try:
data_is_close = np.isclose(obj_b, obj_a, equal_nan=True)
except TypeError:
data_is_close = obj_b == obj_a
else:
if isinstance(obj_a, collections.Iterable):
data_is_close = data_is_close.all()
return data_is_close
def assert_output(fn, fn_inputs, fn_expected_outputs, check_parameter_changes=True):
assert type(fn_expected_outputs) == OrderedDict
if check_parameter_changes:
fn_inputs_passed_in = copy.deepcopy(fn_inputs)
else:
fn_inputs_passed_in = fn_inputs
fn_raw_out = fn(**fn_inputs_passed_in)
# Check if inputs have changed
if check_parameter_changes:
for input_name, input_value in fn_inputs.items():
passed_in_unchanged = _is_equal(input_value, fn_inputs_passed_in[input_name])
assert passed_in_unchanged, 'Input parameter "{}" has been modified inside the function. ' \
'The function shouldn\'t modify the function parameters.'.format(input_name)
fn_outputs = OrderedDict()
if len(fn_expected_outputs) == 1:
fn_outputs[list(fn_expected_outputs)[0]] = fn_raw_out
elif len(fn_expected_outputs) > 1:
assert type(fn_raw_out) == tuple,\
'Expecting function to return tuple, got type {}'.format(type(fn_raw_out))
assert len(fn_raw_out) == len(fn_expected_outputs),\
'Expected {} outputs in tuple, only found {} outputs'.format(len(fn_expected_outputs), len(fn_raw_out))
for key_i, output_key in enumerate(fn_expected_outputs.keys()):
fn_outputs[output_key] = fn_raw_out[key_i]
err_message = _generate_output_error_msg(
fn.__name__,
fn_inputs,
fn_outputs,
fn_expected_outputs)
for fn_out, (out_name, expected_out) in zip(fn_outputs.values(), fn_expected_outputs.items()):
assert_structure(fn_out, expected_out, out_name)
correct_data = does_data_match(expected_out, fn_out)
assert correct_data, err_message | 0.592902 | 0.357175 |
##
# Import Modules
#
from Ffs import Ffs
import Section
import subprocess
import Common.LongFilePathOs as os
from GenFdsGlobalVariable import GenFdsGlobalVariable
from CommonDataClass.FdfClass import CompressSectionClassObject
from Common.DataType import *
## generate compress section
#
#
class CompressSection (CompressSectionClassObject) :
## compress types: PI standard and non PI standard
CompTypeDict = {
'PI_STD' : 'PI_STD',
'PI_NONE' : 'PI_NONE'
}
## The constructor
#
# @param self The object pointer
#
def __init__(self):
CompressSectionClassObject.__init__(self)
## GenSection() method
#
# Generate compressed section
#
# @param self The object pointer
# @param OutputPath Where to place output file
# @param ModuleName Which module this section belongs to
# @param SecNum Index of section
# @param KeyStringList Filter for inputs of section generation
# @param FfsInf FfsInfStatement object that contains this section data
# @param Dict dictionary contains macro and its value
# @retval tuple (Generated file name, section alignment)
#
def GenSection(self, OutputPath, ModuleName, SecNum, KeyStringList, FfsInf = None, Dict = {}, IsMakefile = False):
if FfsInf is not None:
self.CompType = FfsInf.__ExtendMacro__(self.CompType)
self.Alignment = FfsInf.__ExtendMacro__(self.Alignment)
SectFiles = tuple()
SectAlign = []
Index = 0
MaxAlign = None
for Sect in self.SectionList:
Index = Index + 1
SecIndex = '%s.%d' %(SecNum, Index)
ReturnSectList, AlignValue = Sect.GenSection(OutputPath, ModuleName, SecIndex, KeyStringList, FfsInf, Dict, IsMakefile=IsMakefile)
if AlignValue is not None:
if MaxAlign is None:
MaxAlign = AlignValue
if GenFdsGlobalVariable.GetAlignment (AlignValue) > GenFdsGlobalVariable.GetAlignment (MaxAlign):
MaxAlign = AlignValue
if ReturnSectList != []:
if AlignValue is None:
AlignValue = "1"
for FileData in ReturnSectList:
SectFiles += (FileData,)
SectAlign.append(AlignValue)
OutputFile = OutputPath + \
os.sep + \
ModuleName + \
SUP_MODULE_SEC + \
SecNum + \
Ffs.SectionSuffix['COMPRESS']
OutputFile = os.path.normpath(OutputFile)
DummyFile = OutputFile + '.dummy'
GenFdsGlobalVariable.GenerateSection(DummyFile, SectFiles, InputAlign=SectAlign, IsMakefile=IsMakefile)
GenFdsGlobalVariable.GenerateSection(OutputFile, [DummyFile], Section.Section.SectionType['COMPRESS'],
CompressionType=self.CompTypeDict[self.CompType], IsMakefile=IsMakefile)
OutputFileList = []
OutputFileList.append(OutputFile)
return OutputFileList, self.Alignment | BaseTools/Source/Python/GenFds/CompressSection.py |
##
# Import Modules
#
from Ffs import Ffs
import Section
import subprocess
import Common.LongFilePathOs as os
from GenFdsGlobalVariable import GenFdsGlobalVariable
from CommonDataClass.FdfClass import CompressSectionClassObject
from Common.DataType import *
## generate compress section
#
#
class CompressSection (CompressSectionClassObject) :
## compress types: PI standard and non PI standard
CompTypeDict = {
'PI_STD' : 'PI_STD',
'PI_NONE' : 'PI_NONE'
}
## The constructor
#
# @param self The object pointer
#
def __init__(self):
CompressSectionClassObject.__init__(self)
## GenSection() method
#
# Generate compressed section
#
# @param self The object pointer
# @param OutputPath Where to place output file
# @param ModuleName Which module this section belongs to
# @param SecNum Index of section
# @param KeyStringList Filter for inputs of section generation
# @param FfsInf FfsInfStatement object that contains this section data
# @param Dict dictionary contains macro and its value
# @retval tuple (Generated file name, section alignment)
#
def GenSection(self, OutputPath, ModuleName, SecNum, KeyStringList, FfsInf = None, Dict = {}, IsMakefile = False):
if FfsInf is not None:
self.CompType = FfsInf.__ExtendMacro__(self.CompType)
self.Alignment = FfsInf.__ExtendMacro__(self.Alignment)
SectFiles = tuple()
SectAlign = []
Index = 0
MaxAlign = None
for Sect in self.SectionList:
Index = Index + 1
SecIndex = '%s.%d' %(SecNum, Index)
ReturnSectList, AlignValue = Sect.GenSection(OutputPath, ModuleName, SecIndex, KeyStringList, FfsInf, Dict, IsMakefile=IsMakefile)
if AlignValue is not None:
if MaxAlign is None:
MaxAlign = AlignValue
if GenFdsGlobalVariable.GetAlignment (AlignValue) > GenFdsGlobalVariable.GetAlignment (MaxAlign):
MaxAlign = AlignValue
if ReturnSectList != []:
if AlignValue is None:
AlignValue = "1"
for FileData in ReturnSectList:
SectFiles += (FileData,)
SectAlign.append(AlignValue)
OutputFile = OutputPath + \
os.sep + \
ModuleName + \
SUP_MODULE_SEC + \
SecNum + \
Ffs.SectionSuffix['COMPRESS']
OutputFile = os.path.normpath(OutputFile)
DummyFile = OutputFile + '.dummy'
GenFdsGlobalVariable.GenerateSection(DummyFile, SectFiles, InputAlign=SectAlign, IsMakefile=IsMakefile)
GenFdsGlobalVariable.GenerateSection(OutputFile, [DummyFile], Section.Section.SectionType['COMPRESS'],
CompressionType=self.CompTypeDict[self.CompType], IsMakefile=IsMakefile)
OutputFileList = []
OutputFileList.append(OutputFile)
return OutputFileList, self.Alignment | 0.195786 | 0.079175 |
from collections import OrderedDict
from functools import partial
import numpy as np
from numpy.testing import assert_allclose
import pytest
from jax import random
import jax.numpy as jnp
from funsor import Tensor, bint, reals
import numpyro
from numpyro.contrib.control_flow import scan
from numpyro.contrib.funsor import config_enumerate, enum, markov, to_data, to_funsor
from numpyro.contrib.funsor.enum_messenger import NamedMessenger
from numpyro.contrib.funsor.enum_messenger import plate as enum_plate
from numpyro.contrib.funsor.infer_util import log_density
from numpyro.contrib.indexing import Vindex
import numpyro.distributions as dist
from numpyro.infer import MCMC, NUTS
from numpyro.primitives import _PYRO_STACK
def test_gaussian_mixture_model():
K, N = 3, 1000
def gmm(data):
mix_proportions = numpyro.sample("phi", dist.Dirichlet(jnp.ones(K)))
with numpyro.plate("num_clusters", K, dim=-1):
cluster_means = numpyro.sample("cluster_means", dist.Normal(jnp.arange(K), 1.))
with numpyro.plate("data", data.shape[0], dim=-1):
assignments = numpyro.sample("assignments", dist.Categorical(mix_proportions))
numpyro.sample("obs", dist.Normal(cluster_means[assignments], 1.), obs=data)
true_cluster_means = jnp.array([1., 5., 10.])
true_mix_proportions = jnp.array([0.1, 0.3, 0.6])
cluster_assignments = dist.Categorical(true_mix_proportions).sample(random.PRNGKey(0), (N,))
data = dist.Normal(true_cluster_means[cluster_assignments], 1.0).sample(random.PRNGKey(1))
nuts_kernel = NUTS(gmm)
mcmc = MCMC(nuts_kernel, num_warmup=500, num_samples=500)
mcmc.run(random.PRNGKey(2), data)
samples = mcmc.get_samples()
assert_allclose(samples["phi"].mean(0).sort(), true_mix_proportions, atol=0.05)
assert_allclose(samples["cluster_means"].mean(0).sort(), true_cluster_means, atol=0.2)
def test_bernoulli_latent_model():
def model(data):
y_prob = numpyro.sample("y_prob", dist.Beta(1., 1.))
with numpyro.plate("data", data.shape[0]):
y = numpyro.sample("y", dist.Bernoulli(y_prob))
z = numpyro.sample("z", dist.Bernoulli(0.65 * y + 0.1))
numpyro.sample("obs", dist.Normal(2. * z, 1.), obs=data)
N = 2000
y_prob = 0.3
y = dist.Bernoulli(y_prob).sample(random.PRNGKey(0), (N,))
z = dist.Bernoulli(0.65 * y + 0.1).sample(random.PRNGKey(1))
data = dist.Normal(2. * z, 1.0).sample(random.PRNGKey(2))
nuts_kernel = NUTS(model)
mcmc = MCMC(nuts_kernel, num_warmup=500, num_samples=500)
mcmc.run(random.PRNGKey(3), data)
samples = mcmc.get_samples()
assert_allclose(samples["y_prob"].mean(0), y_prob, atol=0.05)
def test_change_point():
def model(count_data):
n_count_data = count_data.shape[0]
alpha = 1 / jnp.mean(count_data.astype(np.float32))
lambda_1 = numpyro.sample('lambda_1', dist.Exponential(alpha))
lambda_2 = numpyro.sample('lambda_2', dist.Exponential(alpha))
# this is the same as DiscreteUniform(0, 69)
tau = numpyro.sample('tau', dist.Categorical(logits=jnp.zeros(70)))
idx = jnp.arange(n_count_data)
lambda_ = jnp.where(tau > idx, lambda_1, lambda_2)
with numpyro.plate("data", n_count_data):
numpyro.sample('obs', dist.Poisson(lambda_), obs=count_data)
count_data = jnp.array([
13, 24, 8, 24, 7, 35, 14, 11, 15, 11, 22, 22, 11, 57, 11,
19, 29, 6, 19, 12, 22, 12, 18, 72, 32, 9, 7, 13, 19, 23,
27, 20, 6, 17, 13, 10, 14, 6, 16, 15, 7, 2, 15, 15, 19,
70, 49, 7, 53, 22, 21, 31, 19, 11, 1, 20, 12, 35, 17, 23,
17, 4, 2, 31, 30, 13, 27, 0, 39, 37, 5, 14, 13, 22,
])
kernel = NUTS(model)
mcmc = MCMC(kernel, num_warmup=500, num_samples=500)
mcmc.run(random.PRNGKey(0), count_data)
samples = mcmc.get_samples()
assert_allclose(samples["lambda_1"].mean(0), 18., atol=1.)
assert_allclose(samples["lambda_2"].mean(0), 22.5, atol=1.5)
def test_gaussian_hmm():
dim = 4
num_steps = 10
def model(data):
with numpyro.plate("states", dim):
transition = numpyro.sample("transition", dist.Dirichlet(jnp.ones(dim)))
emission_loc = numpyro.sample("emission_loc", dist.Normal(0, 1))
emission_scale = numpyro.sample("emission_scale", dist.LogNormal(0, 1))
trans_prob = numpyro.sample("initialize", dist.Dirichlet(jnp.ones(dim)))
for t, y in markov(enumerate(data)):
x = numpyro.sample("x_{}".format(t), dist.Categorical(trans_prob))
numpyro.sample("y_{}".format(t), dist.Normal(emission_loc[x], emission_scale[x]), obs=y)
trans_prob = transition[x]
def _generate_data():
transition_probs = np.random.rand(dim, dim)
transition_probs = transition_probs / transition_probs.sum(-1, keepdims=True)
emissions_loc = np.arange(dim)
emissions_scale = 1.
state = np.random.choice(3)
obs = [np.random.normal(emissions_loc[state], emissions_scale)]
for _ in range(num_steps - 1):
state = np.random.choice(dim, p=transition_probs[state])
obs.append(np.random.normal(emissions_loc[state], emissions_scale))
return np.stack(obs)
data = _generate_data()
nuts_kernel = NUTS(model)
mcmc = MCMC(nuts_kernel, num_warmup=500, num_samples=500)
mcmc.run(random.PRNGKey(0), data)
def test_iteration():
def testing():
for i in markov(range(5)):
v1 = to_data(Tensor(jnp.ones(2), OrderedDict([(str(i), bint(2))]), 'real'))
v2 = to_data(Tensor(jnp.zeros(2), OrderedDict([('a', bint(2))]), 'real'))
fv1 = to_funsor(v1, reals())
fv2 = to_funsor(v2, reals())
print(i, v1.shape) # shapes should alternate
if i % 2 == 0:
assert v1.shape == (2,)
else:
assert v1.shape == (2, 1, 1)
assert v2.shape == (2, 1)
print(i, fv1.inputs)
print('a', v2.shape) # shapes should stay the same
print('a', fv2.inputs)
with NamedMessenger():
testing()
def test_nesting():
def testing():
with markov():
v1 = to_data(Tensor(jnp.ones(2), OrderedDict([("1", bint(2))]), 'real'))
print(1, v1.shape) # shapes should alternate
assert v1.shape == (2,)
with markov():
v2 = to_data(Tensor(jnp.ones(2), OrderedDict([("2", bint(2))]), 'real'))
print(2, v2.shape) # shapes should alternate
assert v2.shape == (2, 1)
with markov():
v3 = to_data(Tensor(jnp.ones(2), OrderedDict([("3", bint(2))]), 'real'))
print(3, v3.shape) # shapes should alternate
assert v3.shape == (2,)
with markov():
v4 = to_data(Tensor(jnp.ones(2), OrderedDict([("4", bint(2))]), 'real'))
print(4, v4.shape) # shapes should alternate
assert v4.shape == (2, 1)
with NamedMessenger():
testing()
def test_staggered():
def testing():
for i in markov(range(12)):
if i % 4 == 0:
v2 = to_data(Tensor(jnp.zeros(2), OrderedDict([('a', bint(2))]), 'real'))
fv2 = to_funsor(v2, reals())
assert v2.shape == (2,)
print('a', v2.shape)
print('a', fv2.inputs)
with NamedMessenger():
testing()
def test_nested_plate():
with enum(first_available_dim=-3):
with enum_plate("a", 5):
with enum_plate("b", 2):
x = numpyro.sample("x", dist.Normal(0, 1), rng_key=random.PRNGKey(0))
assert x.shape == (2, 5)
@pytest.mark.parametrize('num_steps', [1, 10, 11])
def test_scan_enum_one_latent(num_steps):
data = random.normal(random.PRNGKey(0), (num_steps,))
init_probs = jnp.array([0.6, 0.4])
transition_probs = jnp.array([[0.8, 0.2], [0.1, 0.9]])
locs = jnp.array([-1.0, 1.0])
def model(data):
x = None
for i, y in markov(enumerate(data)):
probs = init_probs if x is None else transition_probs[x]
x = numpyro.sample(f"x_{i}", dist.Categorical(probs))
numpyro.sample(f"y_{i}", dist.Normal(locs[x], 1), obs=y)
return x
def fun_model(data):
def transition_fn(x, y):
probs = init_probs if x is None else transition_probs[x]
x = numpyro.sample("x", dist.Categorical(probs))
numpyro.sample("y", dist.Normal(locs[x], 1), obs=y)
return x, None
x, collections = scan(transition_fn, None, data)
assert collections is None
return x
actual_log_joint = log_density(enum(config_enumerate(fun_model)), (data,), {}, {})[0]
expected_log_joint = log_density(enum(config_enumerate(model)), (data,), {}, {})[0]
assert_allclose(actual_log_joint, expected_log_joint)
actual_last_x = enum(config_enumerate(fun_model))(data)
expected_last_x = enum(config_enumerate(model))(data)
assert_allclose(actual_last_x, expected_last_x)
def test_scan_enum_plate():
N, D = 10, 3
data = random.normal(random.PRNGKey(0), (N, D))
init_probs = jnp.array([0.6, 0.4])
transition_probs = jnp.array([[0.8, 0.2], [0.1, 0.9]])
locs = jnp.array([-1.0, 1.0])
def model(data):
x = None
D_plate = numpyro.plate("D", D, dim=-1)
for i, y in markov(enumerate(data)):
with D_plate:
probs = init_probs if x is None else transition_probs[x]
x = numpyro.sample(f"x_{i}", dist.Categorical(probs))
numpyro.sample(f"y_{i}", dist.Normal(locs[x], 1), obs=y)
def fun_model(data):
def transition_fn(x, y):
probs = init_probs if x is None else transition_probs[x]
with numpyro.plate("D", D, dim=-1):
x = numpyro.sample("x", dist.Categorical(probs))
numpyro.sample("y", dist.Normal(locs[x], 1), obs=y)
return x, None
scan(transition_fn, None, data)
actual_log_joint = log_density(enum(config_enumerate(fun_model), -2), (data,), {}, {})[0]
expected_log_joint = log_density(enum(config_enumerate(model), -2), (data,), {}, {})[0]
assert_allclose(actual_log_joint, expected_log_joint)
def test_scan_enum_separated_plates_same_dim():
N, D1, D2 = 10, 3, 4
data = random.normal(random.PRNGKey(0), (N, D1 + D2))
data1, data2 = data[:, :D1], data[:, D1:]
init_probs = jnp.array([0.6, 0.4])
transition_probs = jnp.array([[0.8, 0.2], [0.1, 0.9]])
locs = jnp.array([-1.0, 1.0])
def model(data1, data2):
x = None
D1_plate = numpyro.plate("D1", D1, dim=-1)
D2_plate = numpyro.plate("D2", D2, dim=-1)
for i, (y1, y2) in markov(enumerate(zip(data1, data2))):
probs = init_probs if x is None else transition_probs[x]
x = numpyro.sample(f"x_{i}", dist.Categorical(probs))
with D1_plate:
numpyro.sample(f"y1_{i}", dist.Normal(locs[x], 1), obs=y1)
with D2_plate:
numpyro.sample(f"y2_{i}", dist.Normal(locs[x], 1), obs=y2)
def fun_model(data1, data2):
def transition_fn(x, y):
y1, y2 = y
probs = init_probs if x is None else transition_probs[x]
x = numpyro.sample("x", dist.Categorical(probs))
with numpyro.plate("D1", D1, dim=-1):
numpyro.sample("y1", dist.Normal(locs[x], 1), obs=y1)
with numpyro.plate("D2", D2, dim=-1):
numpyro.sample("y2", dist.Normal(locs[x], 1), obs=y2)
return x, None
scan(transition_fn, None, (data1, data2))
actual_log_joint = log_density(enum(config_enumerate(fun_model), -2), (data1, data2), {}, {})[0]
expected_log_joint = log_density(enum(config_enumerate(model), -2), (data1, data2), {}, {})[0]
assert_allclose(actual_log_joint, expected_log_joint)
def test_scan_enum_separated_plate_discrete():
N, D = 10, 3
data = random.normal(random.PRNGKey(0), (N, D))
transition_probs = jnp.array([[0.8, 0.2], [0.1, 0.9]])
locs = jnp.array([[-1.0, 1.0], [2.0, 3.0]])
def model(data):
x = 0
D_plate = numpyro.plate("D", D, dim=-1)
for i, y in markov(enumerate(data)):
probs = transition_probs[x]
x = numpyro.sample(f"x_{i}", dist.Categorical(probs))
with D_plate:
w = numpyro.sample(f"w_{i}", dist.Bernoulli(0.6))
numpyro.sample(f"y_{i}", dist.Normal(Vindex(locs)[x, w], 1), obs=y)
def fun_model(data):
def transition_fn(x, y):
probs = transition_probs[x]
x = numpyro.sample("x", dist.Categorical(probs))
with numpyro.plate("D", D, dim=-1):
w = numpyro.sample("w", dist.Bernoulli(0.6))
numpyro.sample("y", dist.Normal(Vindex(locs)[x, w], 1), obs=y)
return x, None
scan(transition_fn, 0, data)
actual_log_joint = log_density(enum(config_enumerate(fun_model), -2), (data,), {}, {})[0]
expected_log_joint = log_density(enum(config_enumerate(model), -2), (data,), {}, {})[0]
assert_allclose(actual_log_joint, expected_log_joint)
def test_scan_enum_discrete_outside():
data = random.normal(random.PRNGKey(0), (10,))
probs = jnp.array([[[0.8, 0.2], [0.1, 0.9]],
[[0.7, 0.3], [0.6, 0.4]]])
locs = jnp.array([-1.0, 1.0])
def model(data):
w = numpyro.sample("w", dist.Bernoulli(0.6))
x = 0
for i, y in markov(enumerate(data)):
x = numpyro.sample(f"x_{i}", dist.Categorical(probs[w, x]))
numpyro.sample(f"y_{i}", dist.Normal(locs[x], 1), obs=y)
def fun_model(data):
w = numpyro.sample("w", dist.Bernoulli(0.6))
def transition_fn(x, y):
x = numpyro.sample("x", dist.Categorical(probs[w, x]))
numpyro.sample("y", dist.Normal(locs[x], 1), obs=y)
return x, None
scan(transition_fn, 0, data)
actual_log_joint = log_density(enum(config_enumerate(fun_model)), (data,), {}, {})[0]
expected_log_joint = log_density(enum(config_enumerate(model)), (data,), {}, {})[0]
assert_allclose(actual_log_joint, expected_log_joint)
def test_scan_enum_two_latents():
num_steps = 11
data = random.normal(random.PRNGKey(0), (num_steps,))
probs_x = jnp.array([[0.8, 0.2], [0.1, 0.9]])
probs_w = jnp.array([[0.7, 0.3], [0.6, 0.4]])
locs = jnp.array([[-1.0, 1.0], [2.0, 3.0]])
def model(data):
x = w = 0
for i, y in markov(enumerate(data)):
x = numpyro.sample(f"x_{i}", dist.Categorical(probs_x[x]))
w = numpyro.sample(f"w_{i}", dist.Categorical(probs_w[w]))
numpyro.sample(f"y_{i}", dist.Normal(locs[w, x], 1), obs=y)
def fun_model(data):
def transition_fn(carry, y):
x, w = carry
x = numpyro.sample("x", dist.Categorical(probs_x[x]))
w = numpyro.sample("w", dist.Categorical(probs_w[w]))
numpyro.sample("y", dist.Normal(locs[w, x], 1), obs=y)
# also test if scan's `ys` are recorded corrected
return (x, w), x
scan(transition_fn, (0, 0), data)
actual_log_joint = log_density(enum(config_enumerate(fun_model)), (data,), {}, {})[0]
expected_log_joint = log_density(enum(config_enumerate(model)), (data,), {}, {})[0]
assert_allclose(actual_log_joint, expected_log_joint)
def test_scan_enum_scan_enum():
num_steps = 11
data_x = random.normal(random.PRNGKey(0), (num_steps,))
data_w = data_x[:-1] + 1
probs_x = jnp.array([[0.8, 0.2], [0.1, 0.9]])
probs_w = jnp.array([[0.7, 0.3], [0.6, 0.4]])
locs_x = jnp.array([-1.0, 1.0])
locs_w = jnp.array([2.0, 3.0])
def model(data_x, data_w):
x = w = 0
for i, y in markov(enumerate(data_x)):
x = numpyro.sample(f"x_{i}", dist.Categorical(probs_x[x]))
numpyro.sample(f"y_x_{i}", dist.Normal(locs_x[x], 1), obs=y)
for i, y in markov(enumerate(data_w)):
w = numpyro.sample(f"w{i}", dist.Categorical(probs_w[w]))
numpyro.sample(f"y_w_{i}", dist.Normal(locs_w[w], 1), obs=y)
def fun_model(data_x, data_w):
def transition_fn(name, probs, locs, x, y):
x = numpyro.sample(name, dist.Categorical(probs[x]))
numpyro.sample("y_" + name, dist.Normal(locs[x], 1), obs=y)
return x, None
scan(partial(transition_fn, "x", probs_x, locs_x), 0, data_x)
scan(partial(transition_fn, "w", probs_w, locs_w), 0, data_w)
actual_log_joint = log_density(enum(config_enumerate(fun_model)), (data_x, data_w), {}, {})[0]
expected_log_joint = log_density(enum(config_enumerate(model)), (data_x, data_w), {}, {})[0]
assert_allclose(actual_log_joint, expected_log_joint)
def test_missing_plate(monkeypatch):
K, N = 3, 1000
def gmm(data):
mix_proportions = numpyro.sample("phi", dist.Dirichlet(jnp.ones(K)))
# plate/to_event is missing here
cluster_means = numpyro.sample("cluster_means", dist.Normal(jnp.arange(K), 1.))
with numpyro.plate("data", data.shape[0], dim=-1):
assignments = numpyro.sample("assignments", dist.Categorical(mix_proportions))
numpyro.sample("obs", dist.Normal(cluster_means[assignments], 1.), obs=data)
true_cluster_means = jnp.array([1., 5., 10.])
true_mix_proportions = jnp.array([0.1, 0.3, 0.6])
cluster_assignments = dist.Categorical(true_mix_proportions).sample(random.PRNGKey(0), (N,))
data = dist.Normal(true_cluster_means[cluster_assignments], 1.0).sample(random.PRNGKey(1))
nuts_kernel = NUTS(gmm)
mcmc = MCMC(nuts_kernel, num_warmup=500, num_samples=500)
with pytest.raises(AssertionError, match="Missing plate statement"):
mcmc.run(random.PRNGKey(2), data)
monkeypatch.setattr(numpyro.infer.util, "_validate_model", lambda model_trace: None)
with pytest.raises(Exception):
mcmc.run(random.PRNGKey(2), data)
assert len(_PYRO_STACK) == 0 | test/contrib/test_funsor.py |
from collections import OrderedDict
from functools import partial
import numpy as np
from numpy.testing import assert_allclose
import pytest
from jax import random
import jax.numpy as jnp
from funsor import Tensor, bint, reals
import numpyro
from numpyro.contrib.control_flow import scan
from numpyro.contrib.funsor import config_enumerate, enum, markov, to_data, to_funsor
from numpyro.contrib.funsor.enum_messenger import NamedMessenger
from numpyro.contrib.funsor.enum_messenger import plate as enum_plate
from numpyro.contrib.funsor.infer_util import log_density
from numpyro.contrib.indexing import Vindex
import numpyro.distributions as dist
from numpyro.infer import MCMC, NUTS
from numpyro.primitives import _PYRO_STACK
def test_gaussian_mixture_model():
K, N = 3, 1000
def gmm(data):
mix_proportions = numpyro.sample("phi", dist.Dirichlet(jnp.ones(K)))
with numpyro.plate("num_clusters", K, dim=-1):
cluster_means = numpyro.sample("cluster_means", dist.Normal(jnp.arange(K), 1.))
with numpyro.plate("data", data.shape[0], dim=-1):
assignments = numpyro.sample("assignments", dist.Categorical(mix_proportions))
numpyro.sample("obs", dist.Normal(cluster_means[assignments], 1.), obs=data)
true_cluster_means = jnp.array([1., 5., 10.])
true_mix_proportions = jnp.array([0.1, 0.3, 0.6])
cluster_assignments = dist.Categorical(true_mix_proportions).sample(random.PRNGKey(0), (N,))
data = dist.Normal(true_cluster_means[cluster_assignments], 1.0).sample(random.PRNGKey(1))
nuts_kernel = NUTS(gmm)
mcmc = MCMC(nuts_kernel, num_warmup=500, num_samples=500)
mcmc.run(random.PRNGKey(2), data)
samples = mcmc.get_samples()
assert_allclose(samples["phi"].mean(0).sort(), true_mix_proportions, atol=0.05)
assert_allclose(samples["cluster_means"].mean(0).sort(), true_cluster_means, atol=0.2)
def test_bernoulli_latent_model():
def model(data):
y_prob = numpyro.sample("y_prob", dist.Beta(1., 1.))
with numpyro.plate("data", data.shape[0]):
y = numpyro.sample("y", dist.Bernoulli(y_prob))
z = numpyro.sample("z", dist.Bernoulli(0.65 * y + 0.1))
numpyro.sample("obs", dist.Normal(2. * z, 1.), obs=data)
N = 2000
y_prob = 0.3
y = dist.Bernoulli(y_prob).sample(random.PRNGKey(0), (N,))
z = dist.Bernoulli(0.65 * y + 0.1).sample(random.PRNGKey(1))
data = dist.Normal(2. * z, 1.0).sample(random.PRNGKey(2))
nuts_kernel = NUTS(model)
mcmc = MCMC(nuts_kernel, num_warmup=500, num_samples=500)
mcmc.run(random.PRNGKey(3), data)
samples = mcmc.get_samples()
assert_allclose(samples["y_prob"].mean(0), y_prob, atol=0.05)
def test_change_point():
def model(count_data):
n_count_data = count_data.shape[0]
alpha = 1 / jnp.mean(count_data.astype(np.float32))
lambda_1 = numpyro.sample('lambda_1', dist.Exponential(alpha))
lambda_2 = numpyro.sample('lambda_2', dist.Exponential(alpha))
# this is the same as DiscreteUniform(0, 69)
tau = numpyro.sample('tau', dist.Categorical(logits=jnp.zeros(70)))
idx = jnp.arange(n_count_data)
lambda_ = jnp.where(tau > idx, lambda_1, lambda_2)
with numpyro.plate("data", n_count_data):
numpyro.sample('obs', dist.Poisson(lambda_), obs=count_data)
count_data = jnp.array([
13, 24, 8, 24, 7, 35, 14, 11, 15, 11, 22, 22, 11, 57, 11,
19, 29, 6, 19, 12, 22, 12, 18, 72, 32, 9, 7, 13, 19, 23,
27, 20, 6, 17, 13, 10, 14, 6, 16, 15, 7, 2, 15, 15, 19,
70, 49, 7, 53, 22, 21, 31, 19, 11, 1, 20, 12, 35, 17, 23,
17, 4, 2, 31, 30, 13, 27, 0, 39, 37, 5, 14, 13, 22,
])
kernel = NUTS(model)
mcmc = MCMC(kernel, num_warmup=500, num_samples=500)
mcmc.run(random.PRNGKey(0), count_data)
samples = mcmc.get_samples()
assert_allclose(samples["lambda_1"].mean(0), 18., atol=1.)
assert_allclose(samples["lambda_2"].mean(0), 22.5, atol=1.5)
def test_gaussian_hmm():
dim = 4
num_steps = 10
def model(data):
with numpyro.plate("states", dim):
transition = numpyro.sample("transition", dist.Dirichlet(jnp.ones(dim)))
emission_loc = numpyro.sample("emission_loc", dist.Normal(0, 1))
emission_scale = numpyro.sample("emission_scale", dist.LogNormal(0, 1))
trans_prob = numpyro.sample("initialize", dist.Dirichlet(jnp.ones(dim)))
for t, y in markov(enumerate(data)):
x = numpyro.sample("x_{}".format(t), dist.Categorical(trans_prob))
numpyro.sample("y_{}".format(t), dist.Normal(emission_loc[x], emission_scale[x]), obs=y)
trans_prob = transition[x]
def _generate_data():
transition_probs = np.random.rand(dim, dim)
transition_probs = transition_probs / transition_probs.sum(-1, keepdims=True)
emissions_loc = np.arange(dim)
emissions_scale = 1.
state = np.random.choice(3)
obs = [np.random.normal(emissions_loc[state], emissions_scale)]
for _ in range(num_steps - 1):
state = np.random.choice(dim, p=transition_probs[state])
obs.append(np.random.normal(emissions_loc[state], emissions_scale))
return np.stack(obs)
data = _generate_data()
nuts_kernel = NUTS(model)
mcmc = MCMC(nuts_kernel, num_warmup=500, num_samples=500)
mcmc.run(random.PRNGKey(0), data)
def test_iteration():
def testing():
for i in markov(range(5)):
v1 = to_data(Tensor(jnp.ones(2), OrderedDict([(str(i), bint(2))]), 'real'))
v2 = to_data(Tensor(jnp.zeros(2), OrderedDict([('a', bint(2))]), 'real'))
fv1 = to_funsor(v1, reals())
fv2 = to_funsor(v2, reals())
print(i, v1.shape) # shapes should alternate
if i % 2 == 0:
assert v1.shape == (2,)
else:
assert v1.shape == (2, 1, 1)
assert v2.shape == (2, 1)
print(i, fv1.inputs)
print('a', v2.shape) # shapes should stay the same
print('a', fv2.inputs)
with NamedMessenger():
testing()
def test_nesting():
def testing():
with markov():
v1 = to_data(Tensor(jnp.ones(2), OrderedDict([("1", bint(2))]), 'real'))
print(1, v1.shape) # shapes should alternate
assert v1.shape == (2,)
with markov():
v2 = to_data(Tensor(jnp.ones(2), OrderedDict([("2", bint(2))]), 'real'))
print(2, v2.shape) # shapes should alternate
assert v2.shape == (2, 1)
with markov():
v3 = to_data(Tensor(jnp.ones(2), OrderedDict([("3", bint(2))]), 'real'))
print(3, v3.shape) # shapes should alternate
assert v3.shape == (2,)
with markov():
v4 = to_data(Tensor(jnp.ones(2), OrderedDict([("4", bint(2))]), 'real'))
print(4, v4.shape) # shapes should alternate
assert v4.shape == (2, 1)
with NamedMessenger():
testing()
def test_staggered():
def testing():
for i in markov(range(12)):
if i % 4 == 0:
v2 = to_data(Tensor(jnp.zeros(2), OrderedDict([('a', bint(2))]), 'real'))
fv2 = to_funsor(v2, reals())
assert v2.shape == (2,)
print('a', v2.shape)
print('a', fv2.inputs)
with NamedMessenger():
testing()
def test_nested_plate():
with enum(first_available_dim=-3):
with enum_plate("a", 5):
with enum_plate("b", 2):
x = numpyro.sample("x", dist.Normal(0, 1), rng_key=random.PRNGKey(0))
assert x.shape == (2, 5)
@pytest.mark.parametrize('num_steps', [1, 10, 11])
def test_scan_enum_one_latent(num_steps):
data = random.normal(random.PRNGKey(0), (num_steps,))
init_probs = jnp.array([0.6, 0.4])
transition_probs = jnp.array([[0.8, 0.2], [0.1, 0.9]])
locs = jnp.array([-1.0, 1.0])
def model(data):
x = None
for i, y in markov(enumerate(data)):
probs = init_probs if x is None else transition_probs[x]
x = numpyro.sample(f"x_{i}", dist.Categorical(probs))
numpyro.sample(f"y_{i}", dist.Normal(locs[x], 1), obs=y)
return x
def fun_model(data):
def transition_fn(x, y):
probs = init_probs if x is None else transition_probs[x]
x = numpyro.sample("x", dist.Categorical(probs))
numpyro.sample("y", dist.Normal(locs[x], 1), obs=y)
return x, None
x, collections = scan(transition_fn, None, data)
assert collections is None
return x
actual_log_joint = log_density(enum(config_enumerate(fun_model)), (data,), {}, {})[0]
expected_log_joint = log_density(enum(config_enumerate(model)), (data,), {}, {})[0]
assert_allclose(actual_log_joint, expected_log_joint)
actual_last_x = enum(config_enumerate(fun_model))(data)
expected_last_x = enum(config_enumerate(model))(data)
assert_allclose(actual_last_x, expected_last_x)
def test_scan_enum_plate():
N, D = 10, 3
data = random.normal(random.PRNGKey(0), (N, D))
init_probs = jnp.array([0.6, 0.4])
transition_probs = jnp.array([[0.8, 0.2], [0.1, 0.9]])
locs = jnp.array([-1.0, 1.0])
def model(data):
x = None
D_plate = numpyro.plate("D", D, dim=-1)
for i, y in markov(enumerate(data)):
with D_plate:
probs = init_probs if x is None else transition_probs[x]
x = numpyro.sample(f"x_{i}", dist.Categorical(probs))
numpyro.sample(f"y_{i}", dist.Normal(locs[x], 1), obs=y)
def fun_model(data):
def transition_fn(x, y):
probs = init_probs if x is None else transition_probs[x]
with numpyro.plate("D", D, dim=-1):
x = numpyro.sample("x", dist.Categorical(probs))
numpyro.sample("y", dist.Normal(locs[x], 1), obs=y)
return x, None
scan(transition_fn, None, data)
actual_log_joint = log_density(enum(config_enumerate(fun_model), -2), (data,), {}, {})[0]
expected_log_joint = log_density(enum(config_enumerate(model), -2), (data,), {}, {})[0]
assert_allclose(actual_log_joint, expected_log_joint)
def test_scan_enum_separated_plates_same_dim():
N, D1, D2 = 10, 3, 4
data = random.normal(random.PRNGKey(0), (N, D1 + D2))
data1, data2 = data[:, :D1], data[:, D1:]
init_probs = jnp.array([0.6, 0.4])
transition_probs = jnp.array([[0.8, 0.2], [0.1, 0.9]])
locs = jnp.array([-1.0, 1.0])
def model(data1, data2):
x = None
D1_plate = numpyro.plate("D1", D1, dim=-1)
D2_plate = numpyro.plate("D2", D2, dim=-1)
for i, (y1, y2) in markov(enumerate(zip(data1, data2))):
probs = init_probs if x is None else transition_probs[x]
x = numpyro.sample(f"x_{i}", dist.Categorical(probs))
with D1_plate:
numpyro.sample(f"y1_{i}", dist.Normal(locs[x], 1), obs=y1)
with D2_plate:
numpyro.sample(f"y2_{i}", dist.Normal(locs[x], 1), obs=y2)
def fun_model(data1, data2):
def transition_fn(x, y):
y1, y2 = y
probs = init_probs if x is None else transition_probs[x]
x = numpyro.sample("x", dist.Categorical(probs))
with numpyro.plate("D1", D1, dim=-1):
numpyro.sample("y1", dist.Normal(locs[x], 1), obs=y1)
with numpyro.plate("D2", D2, dim=-1):
numpyro.sample("y2", dist.Normal(locs[x], 1), obs=y2)
return x, None
scan(transition_fn, None, (data1, data2))
actual_log_joint = log_density(enum(config_enumerate(fun_model), -2), (data1, data2), {}, {})[0]
expected_log_joint = log_density(enum(config_enumerate(model), -2), (data1, data2), {}, {})[0]
assert_allclose(actual_log_joint, expected_log_joint)
def test_scan_enum_separated_plate_discrete():
N, D = 10, 3
data = random.normal(random.PRNGKey(0), (N, D))
transition_probs = jnp.array([[0.8, 0.2], [0.1, 0.9]])
locs = jnp.array([[-1.0, 1.0], [2.0, 3.0]])
def model(data):
x = 0
D_plate = numpyro.plate("D", D, dim=-1)
for i, y in markov(enumerate(data)):
probs = transition_probs[x]
x = numpyro.sample(f"x_{i}", dist.Categorical(probs))
with D_plate:
w = numpyro.sample(f"w_{i}", dist.Bernoulli(0.6))
numpyro.sample(f"y_{i}", dist.Normal(Vindex(locs)[x, w], 1), obs=y)
def fun_model(data):
def transition_fn(x, y):
probs = transition_probs[x]
x = numpyro.sample("x", dist.Categorical(probs))
with numpyro.plate("D", D, dim=-1):
w = numpyro.sample("w", dist.Bernoulli(0.6))
numpyro.sample("y", dist.Normal(Vindex(locs)[x, w], 1), obs=y)
return x, None
scan(transition_fn, 0, data)
actual_log_joint = log_density(enum(config_enumerate(fun_model), -2), (data,), {}, {})[0]
expected_log_joint = log_density(enum(config_enumerate(model), -2), (data,), {}, {})[0]
assert_allclose(actual_log_joint, expected_log_joint)
def test_scan_enum_discrete_outside():
data = random.normal(random.PRNGKey(0), (10,))
probs = jnp.array([[[0.8, 0.2], [0.1, 0.9]],
[[0.7, 0.3], [0.6, 0.4]]])
locs = jnp.array([-1.0, 1.0])
def model(data):
w = numpyro.sample("w", dist.Bernoulli(0.6))
x = 0
for i, y in markov(enumerate(data)):
x = numpyro.sample(f"x_{i}", dist.Categorical(probs[w, x]))
numpyro.sample(f"y_{i}", dist.Normal(locs[x], 1), obs=y)
def fun_model(data):
w = numpyro.sample("w", dist.Bernoulli(0.6))
def transition_fn(x, y):
x = numpyro.sample("x", dist.Categorical(probs[w, x]))
numpyro.sample("y", dist.Normal(locs[x], 1), obs=y)
return x, None
scan(transition_fn, 0, data)
actual_log_joint = log_density(enum(config_enumerate(fun_model)), (data,), {}, {})[0]
expected_log_joint = log_density(enum(config_enumerate(model)), (data,), {}, {})[0]
assert_allclose(actual_log_joint, expected_log_joint)
def test_scan_enum_two_latents():
num_steps = 11
data = random.normal(random.PRNGKey(0), (num_steps,))
probs_x = jnp.array([[0.8, 0.2], [0.1, 0.9]])
probs_w = jnp.array([[0.7, 0.3], [0.6, 0.4]])
locs = jnp.array([[-1.0, 1.0], [2.0, 3.0]])
def model(data):
x = w = 0
for i, y in markov(enumerate(data)):
x = numpyro.sample(f"x_{i}", dist.Categorical(probs_x[x]))
w = numpyro.sample(f"w_{i}", dist.Categorical(probs_w[w]))
numpyro.sample(f"y_{i}", dist.Normal(locs[w, x], 1), obs=y)
def fun_model(data):
def transition_fn(carry, y):
x, w = carry
x = numpyro.sample("x", dist.Categorical(probs_x[x]))
w = numpyro.sample("w", dist.Categorical(probs_w[w]))
numpyro.sample("y", dist.Normal(locs[w, x], 1), obs=y)
# also test if scan's `ys` are recorded corrected
return (x, w), x
scan(transition_fn, (0, 0), data)
actual_log_joint = log_density(enum(config_enumerate(fun_model)), (data,), {}, {})[0]
expected_log_joint = log_density(enum(config_enumerate(model)), (data,), {}, {})[0]
assert_allclose(actual_log_joint, expected_log_joint)
def test_scan_enum_scan_enum():
num_steps = 11
data_x = random.normal(random.PRNGKey(0), (num_steps,))
data_w = data_x[:-1] + 1
probs_x = jnp.array([[0.8, 0.2], [0.1, 0.9]])
probs_w = jnp.array([[0.7, 0.3], [0.6, 0.4]])
locs_x = jnp.array([-1.0, 1.0])
locs_w = jnp.array([2.0, 3.0])
def model(data_x, data_w):
x = w = 0
for i, y in markov(enumerate(data_x)):
x = numpyro.sample(f"x_{i}", dist.Categorical(probs_x[x]))
numpyro.sample(f"y_x_{i}", dist.Normal(locs_x[x], 1), obs=y)
for i, y in markov(enumerate(data_w)):
w = numpyro.sample(f"w{i}", dist.Categorical(probs_w[w]))
numpyro.sample(f"y_w_{i}", dist.Normal(locs_w[w], 1), obs=y)
def fun_model(data_x, data_w):
def transition_fn(name, probs, locs, x, y):
x = numpyro.sample(name, dist.Categorical(probs[x]))
numpyro.sample("y_" + name, dist.Normal(locs[x], 1), obs=y)
return x, None
scan(partial(transition_fn, "x", probs_x, locs_x), 0, data_x)
scan(partial(transition_fn, "w", probs_w, locs_w), 0, data_w)
actual_log_joint = log_density(enum(config_enumerate(fun_model)), (data_x, data_w), {}, {})[0]
expected_log_joint = log_density(enum(config_enumerate(model)), (data_x, data_w), {}, {})[0]
assert_allclose(actual_log_joint, expected_log_joint)
def test_missing_plate(monkeypatch):
K, N = 3, 1000
def gmm(data):
mix_proportions = numpyro.sample("phi", dist.Dirichlet(jnp.ones(K)))
# plate/to_event is missing here
cluster_means = numpyro.sample("cluster_means", dist.Normal(jnp.arange(K), 1.))
with numpyro.plate("data", data.shape[0], dim=-1):
assignments = numpyro.sample("assignments", dist.Categorical(mix_proportions))
numpyro.sample("obs", dist.Normal(cluster_means[assignments], 1.), obs=data)
true_cluster_means = jnp.array([1., 5., 10.])
true_mix_proportions = jnp.array([0.1, 0.3, 0.6])
cluster_assignments = dist.Categorical(true_mix_proportions).sample(random.PRNGKey(0), (N,))
data = dist.Normal(true_cluster_means[cluster_assignments], 1.0).sample(random.PRNGKey(1))
nuts_kernel = NUTS(gmm)
mcmc = MCMC(nuts_kernel, num_warmup=500, num_samples=500)
with pytest.raises(AssertionError, match="Missing plate statement"):
mcmc.run(random.PRNGKey(2), data)
monkeypatch.setattr(numpyro.infer.util, "_validate_model", lambda model_trace: None)
with pytest.raises(Exception):
mcmc.run(random.PRNGKey(2), data)
assert len(_PYRO_STACK) == 0 | 0.644337 | 0.455501 |
import fcntl
import json
import logging
import uuid
from multiprocessing import Process
import redis as _redis
import requests
from flask import Flask, Response, request
import settings
from emulation import run_emulation
from tokenization import tokenize
app = Flask(__name__)
process = None # made process a global to avoid zombie process
FORMAT = '%(asctime)-15s %(message)s'
logging.basicConfig(format=FORMAT)
logger = logging.getLogger(__name__)
redis = _redis.Redis(host=settings.REDIS_HOST, port=settings.REDIS_PORT)
def has_flock(fd):
"""
Checks if fd has flock over it
True if it is, False otherwise
:param fd:
:return:
:rtype: bool
"""
try:
fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
except BlockingIOError:
return True
else:
return False
def has_redis_lock(uuid):
"""
Checks if redis has lock on uuid
:param uuid:
:return:
"""
try:
with redis.lock(str(uuid) + '__lock'):
pass
except _redis.exceptions.LockError:
return True
else:
return False
@app.route('/emulate', methods=['POST'])
def emulate():
"""
Listens for incoming POST request with emulation parameters
:return:
"""
# TODO: this
data = json.loads(request.data)
number_of_token_bags = tokenize(PD=data.get('PD'),
LGD=data.get('LGD'),
credit_value=data.get('creditSum', 100),
number_of_credits=data.get('creditsCount'))
with open(settings.LOCK_FILE_NAME, 'w') as lockfile:
if has_flock(lockfile):
logger.warning('Could not acquire lock.')
return Response(status=503)
global process
if process is not None:
process.join() # to avoid zombie process
emulation_uuid = uuid.uuid4()
redis.set(str(emulation_uuid) + '__token_bags', number_of_token_bags)
process = Process(target=run_emulation,
kwargs=dict(url=settings.API_URL,
emulation_uuid=emulation_uuid,
assets=number_of_token_bags,
meanmoney=data.get('meanmoney', 800),
days=data.get('days'),
yearreturn=data.get('placementRate'),
meantargetreturn=data.get('placementRate'),
nplaysers=data.get('peopleCount', 10)))
process.start()
return Response(json.dumps(
{'result': {
'emulation_uuid': str(emulation_uuid)
}}),
status=200,
content_type='application/json')
@app.route('/results', methods=['GET'])
def results():
"""
Listens for incoming GET request and returns emulation statistics (TBA)
:return:
"""
emulation_uuid = request.args.get('uuid')
if not emulation_uuid:
return Response(status=404)
if has_redis_lock(emulation_uuid):
return Response(status=503)
data = requests.get(settings.API_URL + 'api/v1/user/stats/',
params={
'uuid': emulation_uuid
}).json()
initial_token_bags = redis.get(str(emulation_uuid) + '__token_bags')
data['result']['placement_stats'] = [
v / float(initial_token_bags)
for v in data['result']['placement_stats']
]
return Response(json.dumps(data),
status=200,
content_type='application/json') | emulation/app.py | import fcntl
import json
import logging
import uuid
from multiprocessing import Process
import redis as _redis
import requests
from flask import Flask, Response, request
import settings
from emulation import run_emulation
from tokenization import tokenize
app = Flask(__name__)
process = None # made process a global to avoid zombie process
FORMAT = '%(asctime)-15s %(message)s'
logging.basicConfig(format=FORMAT)
logger = logging.getLogger(__name__)
redis = _redis.Redis(host=settings.REDIS_HOST, port=settings.REDIS_PORT)
def has_flock(fd):
"""
Checks if fd has flock over it
True if it is, False otherwise
:param fd:
:return:
:rtype: bool
"""
try:
fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
except BlockingIOError:
return True
else:
return False
def has_redis_lock(uuid):
"""
Checks if redis has lock on uuid
:param uuid:
:return:
"""
try:
with redis.lock(str(uuid) + '__lock'):
pass
except _redis.exceptions.LockError:
return True
else:
return False
@app.route('/emulate', methods=['POST'])
def emulate():
"""
Listens for incoming POST request with emulation parameters
:return:
"""
# TODO: this
data = json.loads(request.data)
number_of_token_bags = tokenize(PD=data.get('PD'),
LGD=data.get('LGD'),
credit_value=data.get('creditSum', 100),
number_of_credits=data.get('creditsCount'))
with open(settings.LOCK_FILE_NAME, 'w') as lockfile:
if has_flock(lockfile):
logger.warning('Could not acquire lock.')
return Response(status=503)
global process
if process is not None:
process.join() # to avoid zombie process
emulation_uuid = uuid.uuid4()
redis.set(str(emulation_uuid) + '__token_bags', number_of_token_bags)
process = Process(target=run_emulation,
kwargs=dict(url=settings.API_URL,
emulation_uuid=emulation_uuid,
assets=number_of_token_bags,
meanmoney=data.get('meanmoney', 800),
days=data.get('days'),
yearreturn=data.get('placementRate'),
meantargetreturn=data.get('placementRate'),
nplaysers=data.get('peopleCount', 10)))
process.start()
return Response(json.dumps(
{'result': {
'emulation_uuid': str(emulation_uuid)
}}),
status=200,
content_type='application/json')
@app.route('/results', methods=['GET'])
def results():
"""
Listens for incoming GET request and returns emulation statistics (TBA)
:return:
"""
emulation_uuid = request.args.get('uuid')
if not emulation_uuid:
return Response(status=404)
if has_redis_lock(emulation_uuid):
return Response(status=503)
data = requests.get(settings.API_URL + 'api/v1/user/stats/',
params={
'uuid': emulation_uuid
}).json()
initial_token_bags = redis.get(str(emulation_uuid) + '__token_bags')
data['result']['placement_stats'] = [
v / float(initial_token_bags)
for v in data['result']['placement_stats']
]
return Response(json.dumps(data),
status=200,
content_type='application/json') | 0.277375 | 0.092647 |
import os, sys, string, math, re, Utils, codecs
class DelayedApply:
def __init__(self, dir, data_or_filename):
self.mSandbox = Utils.FindSandbox(dir)
self.mSmeObjects = self.FindDiscoObjects()
self.mSmeOperations = self.FindOperationObjects()
self.mObjectMap = {}
self.mOperations = []
if os.path.isfile(data_or_filename):
file = open(data_or_filename, 'rb')
data = file.read()
file.close()
else:
data = data_or_filename
data = self.ConvertFile(data)
self.Parse(data)
print((str(self)))
def ConvertFile(self, data):
new_data = []
for i in range(len(data)):
if (i % 2) == 0:
new_data.append(data[i])
return ''.join(new_data)
def Parse(self, data):
lines = data.split('\n')
count = len(lines)
i = 0
while i < count:
# Search for operations first because they will also appear in the the object list
if lines[i].strip() in self.mSmeOperations:
# catalog the operation parameters
operation = lines[i].strip()
i += 3
owner = lines[i].strip()
self.mOperations.append( [operation, owner] )
elif lines[i].strip() in self.mSmeObjects:
object = lines[i].strip()
i += 1
id = lines[i].strip()
if object in list(self.mObjectMap.keys()):
if id not in self.mObjectMap[object]:
self.mObjectMap[object].append(id)
else:
self.mObjectMap[object] = [id]
i += 1
def __str__(self):
retval = 'Objects In Job\n'
for object in list(self.mObjectMap.keys()):
retval += '%s\n' % object
for id in self.mObjectMap[object]:
retval += '\t%s\n' % id
retval += '\n'
retval += '\nOperations in Job\n'
for operation in self.mOperations:
retval += '%s - %s\n\n' % (operation[0], operation[1])
return retval
def FindDiscoObjects(self):
dir = os.path.join(self.mSandbox, 'ws', 'Sme', 'Dev')
return FindClassObjects(dir, 'DiscoObject')
def FindOperationObjects(self):
dir = os.path.join(self.mSandbox, 'ws', 'Sme', 'Dev', 'Operation')
return FindClassObjects(dir, 'Operation')
class ClassEntry:
def __init__(self, class_name, declaration):
self.mClassName = class_name
self.mDeclaration = declaration
self.mUsed = False
def SetUsed(self):
self.mUsed = True
def HasBeenUsed(self):
return self.mUsed
def __cmp__(self, other):
return cmp(self.mClassName, other.mClassName)
def FindClassObjects(dir, object_name):
declarations = GetClassDeclarations(dir)
objects = [object_name]
previous = len(objects)
searching = True
while searching:
for decl in declarations:
if decl.HasBeenUsed():
# Have we already searched and found something in this declaration line
continue
elif decl.mClassName in objects:
# Is this class name a duplicate of another we have already found
decl.SetUsed()
continue
for object in objects[:]:
if decl.mDeclaration.find(object) != -1:
decl.SetUsed()
objects.append(decl.mClassName)
break
count = len(objects)
if count == previous:
searching = False
else:
previous = count
objects.sort()
return objects
def GetClassDeclarations(dir):
regex = re.compile( '^[^\w]*class\s+(?P<class_name>[\w]*)\s*:' )
declarations = []
for filename in Utils.RecurseDirectory(dir, IsHeaderFile, False):
file = open(filename, 'r')
for line in file.readlines():
match = regex.match(line)
if match:
entry = ClassEntry(match.group('class_name'), line[match.end():].strip())
declarations.append(entry)
file.close()
declarations.sort()
return declarations
def IsHeaderFile(filename):
return os.path.basename( os.path.dirname(filename) ).lower() != 'test' and filename.lower().endswith('.h')
if __name__ == '__main__':
if len( sys.argv ) < 2:
Utils.Error('The delayed apply file needs to be specified on the command line.')
sys.exit(2)
delayed_apply = DelayedApply(sys.argv[1])
print((str(delayed_apply))) | delayed_apply_converter.py | import os, sys, string, math, re, Utils, codecs
class DelayedApply:
def __init__(self, dir, data_or_filename):
self.mSandbox = Utils.FindSandbox(dir)
self.mSmeObjects = self.FindDiscoObjects()
self.mSmeOperations = self.FindOperationObjects()
self.mObjectMap = {}
self.mOperations = []
if os.path.isfile(data_or_filename):
file = open(data_or_filename, 'rb')
data = file.read()
file.close()
else:
data = data_or_filename
data = self.ConvertFile(data)
self.Parse(data)
print((str(self)))
def ConvertFile(self, data):
new_data = []
for i in range(len(data)):
if (i % 2) == 0:
new_data.append(data[i])
return ''.join(new_data)
def Parse(self, data):
lines = data.split('\n')
count = len(lines)
i = 0
while i < count:
# Search for operations first because they will also appear in the the object list
if lines[i].strip() in self.mSmeOperations:
# catalog the operation parameters
operation = lines[i].strip()
i += 3
owner = lines[i].strip()
self.mOperations.append( [operation, owner] )
elif lines[i].strip() in self.mSmeObjects:
object = lines[i].strip()
i += 1
id = lines[i].strip()
if object in list(self.mObjectMap.keys()):
if id not in self.mObjectMap[object]:
self.mObjectMap[object].append(id)
else:
self.mObjectMap[object] = [id]
i += 1
def __str__(self):
retval = 'Objects In Job\n'
for object in list(self.mObjectMap.keys()):
retval += '%s\n' % object
for id in self.mObjectMap[object]:
retval += '\t%s\n' % id
retval += '\n'
retval += '\nOperations in Job\n'
for operation in self.mOperations:
retval += '%s - %s\n\n' % (operation[0], operation[1])
return retval
def FindDiscoObjects(self):
dir = os.path.join(self.mSandbox, 'ws', 'Sme', 'Dev')
return FindClassObjects(dir, 'DiscoObject')
def FindOperationObjects(self):
dir = os.path.join(self.mSandbox, 'ws', 'Sme', 'Dev', 'Operation')
return FindClassObjects(dir, 'Operation')
class ClassEntry:
def __init__(self, class_name, declaration):
self.mClassName = class_name
self.mDeclaration = declaration
self.mUsed = False
def SetUsed(self):
self.mUsed = True
def HasBeenUsed(self):
return self.mUsed
def __cmp__(self, other):
return cmp(self.mClassName, other.mClassName)
def FindClassObjects(dir, object_name):
declarations = GetClassDeclarations(dir)
objects = [object_name]
previous = len(objects)
searching = True
while searching:
for decl in declarations:
if decl.HasBeenUsed():
# Have we already searched and found something in this declaration line
continue
elif decl.mClassName in objects:
# Is this class name a duplicate of another we have already found
decl.SetUsed()
continue
for object in objects[:]:
if decl.mDeclaration.find(object) != -1:
decl.SetUsed()
objects.append(decl.mClassName)
break
count = len(objects)
if count == previous:
searching = False
else:
previous = count
objects.sort()
return objects
def GetClassDeclarations(dir):
regex = re.compile( '^[^\w]*class\s+(?P<class_name>[\w]*)\s*:' )
declarations = []
for filename in Utils.RecurseDirectory(dir, IsHeaderFile, False):
file = open(filename, 'r')
for line in file.readlines():
match = regex.match(line)
if match:
entry = ClassEntry(match.group('class_name'), line[match.end():].strip())
declarations.append(entry)
file.close()
declarations.sort()
return declarations
def IsHeaderFile(filename):
return os.path.basename( os.path.dirname(filename) ).lower() != 'test' and filename.lower().endswith('.h')
if __name__ == '__main__':
if len( sys.argv ) < 2:
Utils.Error('The delayed apply file needs to be specified on the command line.')
sys.exit(2)
delayed_apply = DelayedApply(sys.argv[1])
print((str(delayed_apply))) | 0.126057 | 0.142113 |
import ast
import builtins
import re
import token
import tokenize
import os.path
from thonny.assistance import ErrorHelper, Suggestion, name_similarity, add_error_helper
from thonny import assistance
from thonny.misc_utils import running_on_linux, running_on_windows
class SyntaxErrorHelper(ErrorHelper):
def __init__(self, error_info):
super().__init__(error_info)
self.tokens = []
self.token_error = None
if self.error_info["message"] == "EOL while scanning string literal":
self.intro_text = (
"You haven't properly closed the string on line %s." % self.error_info["lineno"]
+ "\n(If you want a multi-line string, then surround it with"
+ " `'''` or `\"\"\"` at both ends.)"
)
elif self.error_info["message"] == "EOF while scanning triple-quoted string literal":
# lineno is not useful, as it is at the end of the file and user probably
# didn't want the string to end there
self.intro_text = "You haven't properly closed a triple-quoted string"
else:
if self.error_info["filename"] and os.path.isfile(self.error_info["filename"]):
with open(self.error_info["filename"], mode="rb") as fp:
try:
for t in tokenize.tokenize(fp.readline):
self.tokens.append(t)
except tokenize.TokenError as e:
self.token_error = e
if not self.tokens or self.tokens[-1].type not in [
token.ERRORTOKEN,
token.ENDMARKER,
]:
self.tokens.append(tokenize.TokenInfo(token.ERRORTOKEN, "", None, None, ""))
else:
self.tokens = []
unbalanced = self._sug_unbalanced_parens()
if unbalanced:
self.intro_text = (
"Unbalanced parentheses, brackets or braces:\n\n" + unbalanced.body
)
self.intro_confidence = 5
else:
self.intro_text = "Python doesn't know how to read your program."
if "^" in str(self.error_info):
self.intro_text += (
"\n\nSmall `^` in the original error message shows where it gave up,"
+ " but the actual mistake can be before this."
)
self.suggestions = [self._sug_missing_or_misplaced_colon()]
def _sug_missing_or_misplaced_colon(self):
i = 0
title = "Did you forget the colon?"
relevance = 0
body = ""
while i < len(self.tokens) and self.tokens[i].type != token.ENDMARKER:
t = self.tokens[i]
if t.string in [
"if",
"elif",
"else",
"while",
"for",
"with",
"try",
"except",
"finally",
"class",
"def",
]:
keyword_pos = i
while (
self.tokens[i].type
not in [
token.NEWLINE,
token.ENDMARKER,
token.COLON, # colon may be OP
token.RBRACE,
]
and self.tokens[i].string != ":"
):
old_i = i
if self.tokens[i].string in "([{":
i = self._skip_braced_part(i)
assert i > old_i
if i == len(self.tokens):
return None
else:
i += 1
if self.tokens[i].string != ":":
relevance = 9
body = "`%s` header must end with a colon." % t.string
break
# Colon was present, but maybe it should have been right
# after the keyword.
if (
t.string in ["else", "try", "finally"]
and self.tokens[keyword_pos + 1].string != ":"
):
title = "Incorrect use of `%s`" % t.string
body = "Nothing is allowed between `%s` and colon." % t.string
relevance = 9
if (
self.tokens[keyword_pos + 1].type not in (token.NEWLINE, tokenize.COMMENT)
and t.string == "else"
):
body = "If you want to specify a conditon, then use `elif` or nested `if`."
break
i += 1
return Suggestion("missing-or-misplaced-colon", title, body, relevance)
def _sug_unbalanced_parens(self):
problem = self._find_first_braces_problem()
if not problem:
return None
return Suggestion("missing-or-misplaced-colon", "Unbalanced brackets", problem[1], 8)
def _sug_wrong_increment_op(self):
pass
def _sug_wrong_decrement_op(self):
pass
def _sug_wrong_comparison_op(self):
pass
def _sug_switched_assignment_sides(self):
pass
def _skip_braced_part(self, token_index):
assert self.tokens[token_index].string in ["(", "[", "{"]
level = 1
token_index += 1
while token_index < len(self.tokens):
if self.tokens[token_index].string in ["(", "[", "{"]:
level += 1
elif self.tokens[token_index].string in [")", "]", "}"]:
level -= 1
token_index += 1
if level <= 0:
return token_index
assert token_index == len(self.tokens)
return token_index
def _find_first_braces_problem(self):
# closers = {'(':')', '{':'}', '[':']'}
openers = {")": "(", "}": "{", "]": "["}
brace_stack = []
for t in self.tokens:
if t.string in ["(", "[", "{"]:
brace_stack.append(t)
elif t.string in [")", "]", "}"]:
if not brace_stack:
return (
t,
"Found '`%s`' at `line %d <%s>`_ without preceding matching '`%s`'"
% (
t.string,
t.start[0],
assistance.format_file_url(
self.error_info["filename"], t.start[0], t.start[1]
),
openers[t.string],
),
)
elif brace_stack[-1].string != openers[t.string]:
return (
t,
"Found '`%s`' at `line %d <%s>`__ when last unmatched opener was '`%s`' at `line %d <%s>`__"
% (
t.string,
t.start[0],
assistance.format_file_url(
self.error_info["filename"], t.start[0], t.start[1]
),
brace_stack[-1].string,
brace_stack[-1].start[0],
assistance.format_file_url(
self.error_info["filename"],
brace_stack[-1].start[0],
brace_stack[-1].start[1],
),
),
)
else:
brace_stack.pop()
if brace_stack:
return (
brace_stack[-1],
"'`%s`' at `line %d <%s>`_ is not closed by the end of the program"
% (
brace_stack[-1].string,
brace_stack[-1].start[0],
assistance.format_file_url(
self.error_info["filename"],
brace_stack[-1].start[0],
brace_stack[-1].start[1],
),
),
)
return None
class NameErrorHelper(ErrorHelper):
def __init__(self, error_info):
super().__init__(error_info)
names = re.findall(r"\'.*\'", error_info["message"])
assert len(names) == 1
self.name = names[0].strip("'")
self.intro_text = "Python doesn't know what `%s` stands for." % self.name
self.suggestions = [
self._sug_bad_spelling(),
self._sug_missing_quotes(),
self._sug_missing_import(),
self._sug_local_from_global(),
self._sug_not_defined_yet(),
]
def _sug_missing_quotes(self):
if self._is_attribute_value() or self._is_call_function() or self._is_subscript_value():
relevance = 0
else:
relevance = 5
return Suggestion(
"missing-quotes",
"Did you actually mean string (text)?",
'If you didn\'t mean a variable but literal text "%s", then surround it with quotes.'
% self.name,
relevance,
)
def _sug_bad_spelling(self):
# Yes, it would be more proper to consult builtins from the backend,
# but it's easier this way...
all_names = {name for name in dir(builtins) if not name.startswith("_")}
all_names |= {"pass", "break", "continue", "return", "yield"}
if self.last_frame.globals is not None:
all_names |= set(self.last_frame.globals.keys())
if self.last_frame.locals is not None:
all_names |= set(self.last_frame.locals.keys())
similar_names = {self.name}
if all_names:
relevance = 0
for name in all_names:
sim = name_similarity(name, self.name)
if sim > 4:
similar_names.add(name)
relevance = max(sim, relevance)
else:
relevance = 3
if len(similar_names) > 1:
body = "I found similar names. Are all of them spelled correctly?\n\n"
for name in sorted(similar_names, key=lambda x: x.lower()):
# TODO: add location info
body += "* `%s`\n\n" % name
else:
body = (
"Compare the name with corresponding definition / assignment / documentation."
+ " Don't forget that case of the letters matters!"
)
return Suggestion("bad-spelling-name", "Did you misspell it (somewhere)?", body, relevance)
def _sug_missing_import(self):
likely_importable_functions = {
"math": {"ceil", "floor", "sqrt", "sin", "cos", "degrees"},
"random": {"randint"},
"turtle": {
"left",
"right",
"forward",
"fd",
"goto",
"setpos",
"Turtle",
"penup",
"up",
"pendown",
"down",
"color",
"pencolor",
"fillcolor",
"begin_fill",
"end_fill",
"pensize",
"width",
},
"re": {"search", "match", "findall"},
"datetime": {"date", "time", "datetime", "today"},
"statistics": {
"mean",
"median",
"median_low",
"median_high",
"mode",
"pstdev",
"pvariance",
"stdev",
"variance",
},
"os": {"listdir"},
"time": {"time", "sleep"},
}
body = None
if self._is_call_function():
relevance = 5
for mod in likely_importable_functions:
if self.name in likely_importable_functions[mod]:
relevance += 3
body = (
"If you meant `%s` from module `%s`, then add\n\n`from %s import %s`\n\nto the beginning of your script."
% (self.name, mod, mod, self.name)
)
break
elif self._is_attribute_value():
relevance = 5
body = (
"If you meant module `%s`, then add `import %s` to the beginning of your script"
% (self.name, self.name)
)
if self.name in likely_importable_functions:
relevance += 3
elif self._is_subscript_value() and self.name != "argv":
relevance = 0
elif self.name == "pi":
body = "If you meant the constant π, then add `from math import pi` to the beginning of your script."
relevance = 8
elif self.name == "argv":
body = "If you meant the list with program arguments, then add `from sys import argv` to the beginning of your script."
relevance = 8
else:
relevance = 3
if body is None:
body = "Some functions/variables need to be imported before they can be used."
return Suggestion("missing-import", "Did you forget to import it?", body, relevance)
def _sug_local_from_global(self):
relevance = 0
body = None
if self.last_frame.code_name == "<module>" and self.last_frame_module_ast is not None:
function_names = set()
for node in ast.walk(self.last_frame_module_ast):
if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef)):
if self.name in map(lambda x: x.arg, node.args.args):
function_names.add(node.name)
# TODO: varargs, kw, ...
declared_global = False
for localnode in ast.walk(node):
# print(node.name, localnode)
if (
isinstance(localnode, ast.Name)
and localnode.id == self.name
and isinstance(localnode.ctx, ast.Store)
):
function_names.add(node.name)
elif isinstance(localnode, ast.Global) and self.name in localnode.names:
declared_global = True
if node.name in function_names and declared_global:
function_names.remove(node.name)
if function_names:
relevance = 9
body = (
(
"Name `%s` defined in `%s` is not accessible in the global/module level."
% (self.name, " and ".join(function_names))
)
+ "\n\nIf you need that data at the global level, then consider changing the function so that it `return`-s the value."
)
return Suggestion(
"local-from-global",
"Are you trying to acces a local variable outside of the function?",
body,
relevance,
)
def _sug_not_defined_yet(self):
return Suggestion(
"not-defined-yet",
"Has Python executed the definition?",
(
"Don't forget that name becomes defined when corresponding definition ('=', 'def' or 'import') gets executed."
+ " If the definition comes later in code or is inside an if-statement, Python may not have executed it (yet)."
+ "\n\n"
+ "Make sure Python arrives to the definition before it arrives to this line. When in doubt, "
+ "`use the debugger <debuggers.rst>`_."
),
2,
)
def _sug_maybe_attribute(self):
"TODO:"
def _sug_synonym(self):
"TODO:"
def _is_call_function(self):
return self.name + "(" in (
self.error_info["line"].replace(" ", "").replace("\n", "").replace("\r", "")
)
def _is_subscript_value(self):
return self.name + "[" in (
self.error_info["line"].replace(" ", "").replace("\n", "").replace("\r", "")
)
def _is_attribute_value(self):
return self.name + "." in (
self.error_info["line"].replace(" ", "").replace("\n", "").replace("\r", "")
)
class AttributeErrorHelper(ErrorHelper):
def __init__(self, error_info):
super().__init__(error_info)
names = re.findall(r"\'.*?\'", error_info["message"])
assert len(names) == 2
self.type_name = names[0].strip("'")
self.att_name = names[1].strip("'")
self.intro_text = (
"Your program tries to "
+ ("call method " if self._is_call_function() else "access attribute ")
+ "`%s` of " % self.att_name
+ _get_phrase_for_object(self.type_name)
+ ", but this type doesn't have such "
+ ("method." if self._is_call_function() else "attribute.")
)
self.suggestions = [
self._sug_wrong_attribute_instead_of_len(),
self._sug_bad_spelling(),
self._sug_bad_type(),
]
def _sug_wrong_attribute_instead_of_len(self):
if self.type_name == "str":
goal = "length"
elif self.type_name == "bytes":
goal = "number of bytes"
elif self.type_name == "list":
goal = "number of elements"
elif self.type_name == "tuple":
goal = "number of elements"
elif self.type_name == "set":
goal = "number of elements"
elif self.type_name == "dict":
goal = "number of entries"
else:
return
return Suggestion(
"wrong-attribute-instead-of-len",
"Did you mean to ask the %s?" % goal,
"This can be done with function `len`, eg:\n\n`len(%s)`"
% _get_sample_for_type(self.type_name),
(9 if self.att_name.lower() in ("len", "length", "size") else 0),
)
def _sug_bad_spelling(self):
# TODO: compare with attributes of known types
return Suggestion(
"bad-spelling-attribute",
"Did you misspell the name?",
"Don't forget that case of the letters matters too!",
3,
)
def _sug_bad_type(self):
if self._is_call_function():
action = "call this function on"
else:
action = "ask this attribute from"
return Suggestion(
"wrong-type-attribute",
"Did you expect another type?",
"If you didn't mean %s %s, " % (action, _get_phrase_for_object(self.type_name))
+ "then step through your program to see "
+ "why this type appears here.",
3,
)
def _is_call_function(self):
return "." + self.att_name + "(" in (
self.error_info["line"].replace(" ", "").replace("\n", "").replace("\r", "")
)
class OSErrorHelper(ErrorHelper):
def __init__(self, error_info):
super().__init__(error_info)
if "Address already in use" in self.error_info["message"]:
self.intro_text = "Your programs tries to listen on a port which is already taken."
self.suggestions = [
Suggestion(
"kill-by-port-type-error",
"Want to close the other process?",
self.get_kill_process_instructions(),
5,
),
Suggestion(
"use-another-type-error",
"Can you use another port?",
"If you don't want to mess with the other process, then check whether"
+ " you can configure your program to use another port.",
3,
),
]
else:
self.intro_text = "No specific information is available for this error."
def get_kill_process_instructions(self):
s = (
"Let's say you need port 5000. If you don't know which process is using it,"
+ " then enter following system command into Thonny's Shell:\n\n"
)
if running_on_windows():
s += (
"``!netstat -ano | findstr :5000``\n\n"
+ "You should see the process ID in the last column.\n\n"
)
else:
s += (
"``!lsof -i:5000``\n\n" + "You should see the process ID under the heading PID.\n\n"
)
s += (
"Let's pretend the ID is 12345."
" You can try hard-killing the process with following command:\n\n"
)
if running_on_windows():
s += "``!tskill 12345``\n"
else:
s += (
"``!kill -9 12345``\n\n"
+ "Both steps can be combined into single command:\n\n"
+ "``!kill -9 $(lsof -t -i:5000)``\n\n"
)
return s
class TypeErrorHelper(ErrorHelper):
def __init__(self, error_info):
super().__init__(error_info)
self.intro_text = (
"Python was asked to do an operation with an object which " + "doesn't support it."
)
self.suggestions = [
Suggestion(
"step-to-find-type-error",
"Did you expect another type?",
"Step through your program to see why this type appears here.",
3,
),
Suggestion(
"look-documentation-type-error",
"Maybe you forgot some details about this operation?",
"Look up the documentation or perform a web search with the error message.",
2,
),
]
# overwrite / add for special cases
# something + str or str + something
for r, string_first in [
(r"unsupported operand type\(s\) for \+: '(.+?)' and 'str'", False),
(r"^Can't convert '(.+?)' object to str implicitly$", True), # Python 3.5
(r"^must be str, not (.+)$", True), # Python 3.6
(r'^can only concatenate str (not "(.+?)") to str$', True), # Python 3.7
]:
m = re.match(r, error_info["message"], re.I) # @UndefinedVariable
if m is not None:
self._bad_string_concatenation(m.group(1), string_first)
return
# TODO: other operations, when one side is string
def _bad_string_concatenation(self, other_type_name, string_first):
self.intro_text = "Your program is trying to put together " + (
"a string and %s." if string_first else "%s and a string."
) % _get_phrase_for_object(other_type_name)
self.suggestions.append(
Suggestion(
"convert-other-operand-to-string",
"Did you mean to treat both sides as text and produce a string?",
"In this case you should apply function `str` to the %s "
% _get_phrase_for_object(other_type_name, False)
+ "in order to convert it to string first, eg:\n\n"
+ ("`'abc' + str(%s)`" if string_first else "`str(%s) + 'abc'`")
% _get_sample_for_type(other_type_name),
8,
)
)
if other_type_name in ("float", "int"):
self.suggestions.append(
Suggestion(
"convert-other-operand-to-number",
"Did you mean to treat both sides as numbers and produce a sum?",
"In this case you should first convert the string to a number "
+ "using either function `float` or `int`, eg:\n\n"
+ ("`float('3.14') + 22`" if string_first else "`22 + float('3.14')`"),
7,
)
)
def _get_phrase_for_object(type_name, with_article=True):
friendly_names = {
"str": "a string",
"int": "an integer",
"float": "a float",
"list": "a list",
"tuple": "a tuple",
"dict": "a dictionary",
"set": "a set",
"bool": "a boolean",
}
result = friendly_names.get(type_name, "an object of type '%s'" % type_name)
if with_article:
return result
else:
_, rest = result.split(" ", maxsplit=1)
return rest
def _get_sample_for_type(type_name):
if type_name == "int":
return "42"
elif type_name == "float":
return "3.14"
elif type_name == "str":
return "'abc'"
elif type_name == "bytes":
return "b'abc'"
elif type_name == "list":
return "[1, 2, 3]"
elif type_name == "tuple":
return "(1, 2, 3)"
elif type_name == "set":
return "{1, 2, 3}"
elif type_name == "dict":
return "{1 : 'one', 2 : 'two'}"
else:
return "..."
def load_plugin():
for name in globals():
if name.endswith("ErrorHelper") and not name.startswith("_"):
type_name = name[: -len("Helper")]
add_error_helper(type_name, globals()[name]) | thonny/plugins/stdlib_error_helpers.py | import ast
import builtins
import re
import token
import tokenize
import os.path
from thonny.assistance import ErrorHelper, Suggestion, name_similarity, add_error_helper
from thonny import assistance
from thonny.misc_utils import running_on_linux, running_on_windows
class SyntaxErrorHelper(ErrorHelper):
def __init__(self, error_info):
super().__init__(error_info)
self.tokens = []
self.token_error = None
if self.error_info["message"] == "EOL while scanning string literal":
self.intro_text = (
"You haven't properly closed the string on line %s." % self.error_info["lineno"]
+ "\n(If you want a multi-line string, then surround it with"
+ " `'''` or `\"\"\"` at both ends.)"
)
elif self.error_info["message"] == "EOF while scanning triple-quoted string literal":
# lineno is not useful, as it is at the end of the file and user probably
# didn't want the string to end there
self.intro_text = "You haven't properly closed a triple-quoted string"
else:
if self.error_info["filename"] and os.path.isfile(self.error_info["filename"]):
with open(self.error_info["filename"], mode="rb") as fp:
try:
for t in tokenize.tokenize(fp.readline):
self.tokens.append(t)
except tokenize.TokenError as e:
self.token_error = e
if not self.tokens or self.tokens[-1].type not in [
token.ERRORTOKEN,
token.ENDMARKER,
]:
self.tokens.append(tokenize.TokenInfo(token.ERRORTOKEN, "", None, None, ""))
else:
self.tokens = []
unbalanced = self._sug_unbalanced_parens()
if unbalanced:
self.intro_text = (
"Unbalanced parentheses, brackets or braces:\n\n" + unbalanced.body
)
self.intro_confidence = 5
else:
self.intro_text = "Python doesn't know how to read your program."
if "^" in str(self.error_info):
self.intro_text += (
"\n\nSmall `^` in the original error message shows where it gave up,"
+ " but the actual mistake can be before this."
)
self.suggestions = [self._sug_missing_or_misplaced_colon()]
def _sug_missing_or_misplaced_colon(self):
i = 0
title = "Did you forget the colon?"
relevance = 0
body = ""
while i < len(self.tokens) and self.tokens[i].type != token.ENDMARKER:
t = self.tokens[i]
if t.string in [
"if",
"elif",
"else",
"while",
"for",
"with",
"try",
"except",
"finally",
"class",
"def",
]:
keyword_pos = i
while (
self.tokens[i].type
not in [
token.NEWLINE,
token.ENDMARKER,
token.COLON, # colon may be OP
token.RBRACE,
]
and self.tokens[i].string != ":"
):
old_i = i
if self.tokens[i].string in "([{":
i = self._skip_braced_part(i)
assert i > old_i
if i == len(self.tokens):
return None
else:
i += 1
if self.tokens[i].string != ":":
relevance = 9
body = "`%s` header must end with a colon." % t.string
break
# Colon was present, but maybe it should have been right
# after the keyword.
if (
t.string in ["else", "try", "finally"]
and self.tokens[keyword_pos + 1].string != ":"
):
title = "Incorrect use of `%s`" % t.string
body = "Nothing is allowed between `%s` and colon." % t.string
relevance = 9
if (
self.tokens[keyword_pos + 1].type not in (token.NEWLINE, tokenize.COMMENT)
and t.string == "else"
):
body = "If you want to specify a conditon, then use `elif` or nested `if`."
break
i += 1
return Suggestion("missing-or-misplaced-colon", title, body, relevance)
def _sug_unbalanced_parens(self):
problem = self._find_first_braces_problem()
if not problem:
return None
return Suggestion("missing-or-misplaced-colon", "Unbalanced brackets", problem[1], 8)
def _sug_wrong_increment_op(self):
pass
def _sug_wrong_decrement_op(self):
pass
def _sug_wrong_comparison_op(self):
pass
def _sug_switched_assignment_sides(self):
pass
def _skip_braced_part(self, token_index):
assert self.tokens[token_index].string in ["(", "[", "{"]
level = 1
token_index += 1
while token_index < len(self.tokens):
if self.tokens[token_index].string in ["(", "[", "{"]:
level += 1
elif self.tokens[token_index].string in [")", "]", "}"]:
level -= 1
token_index += 1
if level <= 0:
return token_index
assert token_index == len(self.tokens)
return token_index
def _find_first_braces_problem(self):
# closers = {'(':')', '{':'}', '[':']'}
openers = {")": "(", "}": "{", "]": "["}
brace_stack = []
for t in self.tokens:
if t.string in ["(", "[", "{"]:
brace_stack.append(t)
elif t.string in [")", "]", "}"]:
if not brace_stack:
return (
t,
"Found '`%s`' at `line %d <%s>`_ without preceding matching '`%s`'"
% (
t.string,
t.start[0],
assistance.format_file_url(
self.error_info["filename"], t.start[0], t.start[1]
),
openers[t.string],
),
)
elif brace_stack[-1].string != openers[t.string]:
return (
t,
"Found '`%s`' at `line %d <%s>`__ when last unmatched opener was '`%s`' at `line %d <%s>`__"
% (
t.string,
t.start[0],
assistance.format_file_url(
self.error_info["filename"], t.start[0], t.start[1]
),
brace_stack[-1].string,
brace_stack[-1].start[0],
assistance.format_file_url(
self.error_info["filename"],
brace_stack[-1].start[0],
brace_stack[-1].start[1],
),
),
)
else:
brace_stack.pop()
if brace_stack:
return (
brace_stack[-1],
"'`%s`' at `line %d <%s>`_ is not closed by the end of the program"
% (
brace_stack[-1].string,
brace_stack[-1].start[0],
assistance.format_file_url(
self.error_info["filename"],
brace_stack[-1].start[0],
brace_stack[-1].start[1],
),
),
)
return None
class NameErrorHelper(ErrorHelper):
def __init__(self, error_info):
super().__init__(error_info)
names = re.findall(r"\'.*\'", error_info["message"])
assert len(names) == 1
self.name = names[0].strip("'")
self.intro_text = "Python doesn't know what `%s` stands for." % self.name
self.suggestions = [
self._sug_bad_spelling(),
self._sug_missing_quotes(),
self._sug_missing_import(),
self._sug_local_from_global(),
self._sug_not_defined_yet(),
]
def _sug_missing_quotes(self):
if self._is_attribute_value() or self._is_call_function() or self._is_subscript_value():
relevance = 0
else:
relevance = 5
return Suggestion(
"missing-quotes",
"Did you actually mean string (text)?",
'If you didn\'t mean a variable but literal text "%s", then surround it with quotes.'
% self.name,
relevance,
)
def _sug_bad_spelling(self):
# Yes, it would be more proper to consult builtins from the backend,
# but it's easier this way...
all_names = {name for name in dir(builtins) if not name.startswith("_")}
all_names |= {"pass", "break", "continue", "return", "yield"}
if self.last_frame.globals is not None:
all_names |= set(self.last_frame.globals.keys())
if self.last_frame.locals is not None:
all_names |= set(self.last_frame.locals.keys())
similar_names = {self.name}
if all_names:
relevance = 0
for name in all_names:
sim = name_similarity(name, self.name)
if sim > 4:
similar_names.add(name)
relevance = max(sim, relevance)
else:
relevance = 3
if len(similar_names) > 1:
body = "I found similar names. Are all of them spelled correctly?\n\n"
for name in sorted(similar_names, key=lambda x: x.lower()):
# TODO: add location info
body += "* `%s`\n\n" % name
else:
body = (
"Compare the name with corresponding definition / assignment / documentation."
+ " Don't forget that case of the letters matters!"
)
return Suggestion("bad-spelling-name", "Did you misspell it (somewhere)?", body, relevance)
def _sug_missing_import(self):
likely_importable_functions = {
"math": {"ceil", "floor", "sqrt", "sin", "cos", "degrees"},
"random": {"randint"},
"turtle": {
"left",
"right",
"forward",
"fd",
"goto",
"setpos",
"Turtle",
"penup",
"up",
"pendown",
"down",
"color",
"pencolor",
"fillcolor",
"begin_fill",
"end_fill",
"pensize",
"width",
},
"re": {"search", "match", "findall"},
"datetime": {"date", "time", "datetime", "today"},
"statistics": {
"mean",
"median",
"median_low",
"median_high",
"mode",
"pstdev",
"pvariance",
"stdev",
"variance",
},
"os": {"listdir"},
"time": {"time", "sleep"},
}
body = None
if self._is_call_function():
relevance = 5
for mod in likely_importable_functions:
if self.name in likely_importable_functions[mod]:
relevance += 3
body = (
"If you meant `%s` from module `%s`, then add\n\n`from %s import %s`\n\nto the beginning of your script."
% (self.name, mod, mod, self.name)
)
break
elif self._is_attribute_value():
relevance = 5
body = (
"If you meant module `%s`, then add `import %s` to the beginning of your script"
% (self.name, self.name)
)
if self.name in likely_importable_functions:
relevance += 3
elif self._is_subscript_value() and self.name != "argv":
relevance = 0
elif self.name == "pi":
body = "If you meant the constant π, then add `from math import pi` to the beginning of your script."
relevance = 8
elif self.name == "argv":
body = "If you meant the list with program arguments, then add `from sys import argv` to the beginning of your script."
relevance = 8
else:
relevance = 3
if body is None:
body = "Some functions/variables need to be imported before they can be used."
return Suggestion("missing-import", "Did you forget to import it?", body, relevance)
def _sug_local_from_global(self):
relevance = 0
body = None
if self.last_frame.code_name == "<module>" and self.last_frame_module_ast is not None:
function_names = set()
for node in ast.walk(self.last_frame_module_ast):
if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef)):
if self.name in map(lambda x: x.arg, node.args.args):
function_names.add(node.name)
# TODO: varargs, kw, ...
declared_global = False
for localnode in ast.walk(node):
# print(node.name, localnode)
if (
isinstance(localnode, ast.Name)
and localnode.id == self.name
and isinstance(localnode.ctx, ast.Store)
):
function_names.add(node.name)
elif isinstance(localnode, ast.Global) and self.name in localnode.names:
declared_global = True
if node.name in function_names and declared_global:
function_names.remove(node.name)
if function_names:
relevance = 9
body = (
(
"Name `%s` defined in `%s` is not accessible in the global/module level."
% (self.name, " and ".join(function_names))
)
+ "\n\nIf you need that data at the global level, then consider changing the function so that it `return`-s the value."
)
return Suggestion(
"local-from-global",
"Are you trying to acces a local variable outside of the function?",
body,
relevance,
)
def _sug_not_defined_yet(self):
return Suggestion(
"not-defined-yet",
"Has Python executed the definition?",
(
"Don't forget that name becomes defined when corresponding definition ('=', 'def' or 'import') gets executed."
+ " If the definition comes later in code or is inside an if-statement, Python may not have executed it (yet)."
+ "\n\n"
+ "Make sure Python arrives to the definition before it arrives to this line. When in doubt, "
+ "`use the debugger <debuggers.rst>`_."
),
2,
)
def _sug_maybe_attribute(self):
"TODO:"
def _sug_synonym(self):
"TODO:"
def _is_call_function(self):
return self.name + "(" in (
self.error_info["line"].replace(" ", "").replace("\n", "").replace("\r", "")
)
def _is_subscript_value(self):
return self.name + "[" in (
self.error_info["line"].replace(" ", "").replace("\n", "").replace("\r", "")
)
def _is_attribute_value(self):
return self.name + "." in (
self.error_info["line"].replace(" ", "").replace("\n", "").replace("\r", "")
)
class AttributeErrorHelper(ErrorHelper):
def __init__(self, error_info):
super().__init__(error_info)
names = re.findall(r"\'.*?\'", error_info["message"])
assert len(names) == 2
self.type_name = names[0].strip("'")
self.att_name = names[1].strip("'")
self.intro_text = (
"Your program tries to "
+ ("call method " if self._is_call_function() else "access attribute ")
+ "`%s` of " % self.att_name
+ _get_phrase_for_object(self.type_name)
+ ", but this type doesn't have such "
+ ("method." if self._is_call_function() else "attribute.")
)
self.suggestions = [
self._sug_wrong_attribute_instead_of_len(),
self._sug_bad_spelling(),
self._sug_bad_type(),
]
def _sug_wrong_attribute_instead_of_len(self):
if self.type_name == "str":
goal = "length"
elif self.type_name == "bytes":
goal = "number of bytes"
elif self.type_name == "list":
goal = "number of elements"
elif self.type_name == "tuple":
goal = "number of elements"
elif self.type_name == "set":
goal = "number of elements"
elif self.type_name == "dict":
goal = "number of entries"
else:
return
return Suggestion(
"wrong-attribute-instead-of-len",
"Did you mean to ask the %s?" % goal,
"This can be done with function `len`, eg:\n\n`len(%s)`"
% _get_sample_for_type(self.type_name),
(9 if self.att_name.lower() in ("len", "length", "size") else 0),
)
def _sug_bad_spelling(self):
# TODO: compare with attributes of known types
return Suggestion(
"bad-spelling-attribute",
"Did you misspell the name?",
"Don't forget that case of the letters matters too!",
3,
)
def _sug_bad_type(self):
if self._is_call_function():
action = "call this function on"
else:
action = "ask this attribute from"
return Suggestion(
"wrong-type-attribute",
"Did you expect another type?",
"If you didn't mean %s %s, " % (action, _get_phrase_for_object(self.type_name))
+ "then step through your program to see "
+ "why this type appears here.",
3,
)
def _is_call_function(self):
return "." + self.att_name + "(" in (
self.error_info["line"].replace(" ", "").replace("\n", "").replace("\r", "")
)
class OSErrorHelper(ErrorHelper):
def __init__(self, error_info):
super().__init__(error_info)
if "Address already in use" in self.error_info["message"]:
self.intro_text = "Your programs tries to listen on a port which is already taken."
self.suggestions = [
Suggestion(
"kill-by-port-type-error",
"Want to close the other process?",
self.get_kill_process_instructions(),
5,
),
Suggestion(
"use-another-type-error",
"Can you use another port?",
"If you don't want to mess with the other process, then check whether"
+ " you can configure your program to use another port.",
3,
),
]
else:
self.intro_text = "No specific information is available for this error."
def get_kill_process_instructions(self):
s = (
"Let's say you need port 5000. If you don't know which process is using it,"
+ " then enter following system command into Thonny's Shell:\n\n"
)
if running_on_windows():
s += (
"``!netstat -ano | findstr :5000``\n\n"
+ "You should see the process ID in the last column.\n\n"
)
else:
s += (
"``!lsof -i:5000``\n\n" + "You should see the process ID under the heading PID.\n\n"
)
s += (
"Let's pretend the ID is 12345."
" You can try hard-killing the process with following command:\n\n"
)
if running_on_windows():
s += "``!tskill 12345``\n"
else:
s += (
"``!kill -9 12345``\n\n"
+ "Both steps can be combined into single command:\n\n"
+ "``!kill -9 $(lsof -t -i:5000)``\n\n"
)
return s
class TypeErrorHelper(ErrorHelper):
def __init__(self, error_info):
super().__init__(error_info)
self.intro_text = (
"Python was asked to do an operation with an object which " + "doesn't support it."
)
self.suggestions = [
Suggestion(
"step-to-find-type-error",
"Did you expect another type?",
"Step through your program to see why this type appears here.",
3,
),
Suggestion(
"look-documentation-type-error",
"Maybe you forgot some details about this operation?",
"Look up the documentation or perform a web search with the error message.",
2,
),
]
# overwrite / add for special cases
# something + str or str + something
for r, string_first in [
(r"unsupported operand type\(s\) for \+: '(.+?)' and 'str'", False),
(r"^Can't convert '(.+?)' object to str implicitly$", True), # Python 3.5
(r"^must be str, not (.+)$", True), # Python 3.6
(r'^can only concatenate str (not "(.+?)") to str$', True), # Python 3.7
]:
m = re.match(r, error_info["message"], re.I) # @UndefinedVariable
if m is not None:
self._bad_string_concatenation(m.group(1), string_first)
return
# TODO: other operations, when one side is string
def _bad_string_concatenation(self, other_type_name, string_first):
self.intro_text = "Your program is trying to put together " + (
"a string and %s." if string_first else "%s and a string."
) % _get_phrase_for_object(other_type_name)
self.suggestions.append(
Suggestion(
"convert-other-operand-to-string",
"Did you mean to treat both sides as text and produce a string?",
"In this case you should apply function `str` to the %s "
% _get_phrase_for_object(other_type_name, False)
+ "in order to convert it to string first, eg:\n\n"
+ ("`'abc' + str(%s)`" if string_first else "`str(%s) + 'abc'`")
% _get_sample_for_type(other_type_name),
8,
)
)
if other_type_name in ("float", "int"):
self.suggestions.append(
Suggestion(
"convert-other-operand-to-number",
"Did you mean to treat both sides as numbers and produce a sum?",
"In this case you should first convert the string to a number "
+ "using either function `float` or `int`, eg:\n\n"
+ ("`float('3.14') + 22`" if string_first else "`22 + float('3.14')`"),
7,
)
)
def _get_phrase_for_object(type_name, with_article=True):
friendly_names = {
"str": "a string",
"int": "an integer",
"float": "a float",
"list": "a list",
"tuple": "a tuple",
"dict": "a dictionary",
"set": "a set",
"bool": "a boolean",
}
result = friendly_names.get(type_name, "an object of type '%s'" % type_name)
if with_article:
return result
else:
_, rest = result.split(" ", maxsplit=1)
return rest
def _get_sample_for_type(type_name):
if type_name == "int":
return "42"
elif type_name == "float":
return "3.14"
elif type_name == "str":
return "'abc'"
elif type_name == "bytes":
return "b'abc'"
elif type_name == "list":
return "[1, 2, 3]"
elif type_name == "tuple":
return "(1, 2, 3)"
elif type_name == "set":
return "{1, 2, 3}"
elif type_name == "dict":
return "{1 : 'one', 2 : 'two'}"
else:
return "..."
def load_plugin():
for name in globals():
if name.endswith("ErrorHelper") and not name.startswith("_"):
type_name = name[: -len("Helper")]
add_error_helper(type_name, globals()[name]) | 0.466116 | 0.203668 |
import sys
from pathlib import Path
import logging
import os
from file_converter_worker import FileConverterWorker
from PyQt6.QtCore import Qt, QThread
from PyQt6.QtWidgets import QApplication, QMainWindow, QFileDialog, QMessageBox
from ui.MainWindow import Ui_MainWindow
logging.basicConfig(level=logging.DEBUG)
class MainWindow(QMainWindow, Ui_MainWindow):
def __init__(self):
super().__init__()
self.converted_path = 'converted_pdfs'
self.dir_files = []
self.files_to_convert = []
self.converted_files = []
self.selected_dir = None
self.setupUi(self)
self.show()
self.dirSelectButton.clicked.connect(self.select_folder)
self.selectAllCheckbox.stateChanged.connect(
self.select_all_state_change)
self.dirList.itemSelectionChanged.connect(
self.update_selected_items_label)
self.convertButton.clicked.connect(self.click_convert_button)
self.convertedList.itemDoubleClicked.connect(self.open_file)
def open_file(self, item):
for path in self.converted_files:
if item.text() == path.name:
logging.debug(f"File Path to Open: {Path(path)}")
os.system(f"open '{Path(path)}'")
def select_folder(self):
if self.dirEdit.text().strip() == "":
self.selected_dir = QFileDialog.getExistingDirectory(
self, "Select Folder", str(Path.home()))
else:
self.selected_dir = str(Path(self.dirEdit.text()))
logging.debug(f"Selected Folder: {self.selected_dir}")
self.dirEdit.setText(self.selected_dir)
self.selectedDirLabel.setText(
f"Files in: /{Path(self.selected_dir).stem}")
self.update_files_list(self.selected_dir)
def update_files_list(self, path_str):
dir_content_generator = Path(path_str).iterdir()
self.dir_files = [x for x in dir_content_generator if x.is_file() and x.suffix.lower() in [
'.tif', '.tiff']]
logging.debug(f"Selected Folder Files: {self.dir_files}")
self.update_selected_dir_file_list()
def update_selected_dir_file_list(self):
self.dirList.clear()
self.dirList.addItems([x.name for x in self.dir_files])
def select_all_state_change(self):
state = self.selectAllCheckbox.checkState()
logging.debug(f"Checkbox State: {state}")
if state == Qt.CheckState.Checked:
self.dirList.selectAll()
elif state == Qt.CheckState.Unchecked:
self.dirList.clearSelection()
def update_selected_items_label(self):
num_selected = len(self.dirList.selectedItems())
self.numSelectedLabel.setText(f"{num_selected} files selected")
def click_convert_button(self):
self.progressBar.setValue(0)
self.progressLabel.setText("0%")
self.numConvertedLabel.setText("0 files converted")
self.convertedList.clear()
selected = [item.text() for item in self.dirList.selectedItems()]
self.files_to_convert = [
file for file in self.dir_files if file.name in selected]
logging.debug(f"Selected Items: text={self.files_to_convert}")
if not (self.files_to_convert[0].parent / self.converted_path).is_dir():
Path.mkdir(self.files_to_convert[0].parent / self.converted_path)
self._convert_files_thread()
def _convert_files_thread(self):
logging.debug("Entered convert_files_thread...")
self._thread = QThread()
self._file_converter_worker = FileConverterWorker(
self.files_to_convert, self.converted_path)
self._file_converter_worker.moveToThread(self._thread)
self._thread.started.connect(self._file_converter_worker.convert_files)
logging.debug("running convert_files worker...")
self._file_converter_worker.converted_file.connect(
self._update_state_when_file_converted)
self._file_converter_worker.progress.connect(
self._update_state_progress)
self._file_converter_worker.finished.connect(
self._update_state_finish_converting)
self._thread.finished.connect(self._thread.deleteLater)
self._thread.start()
def _update_state_when_file_converted(self, new_file_path: str):
self.statusbar.showMessage(f"converted {new_file_path.name}")
self.convertedList.addItem(new_file_path.name)
self.converted_files.append(new_file_path)
def _update_state_progress(self, index: int):
percent = int(index / len(self.files_to_convert) * 100)
self.progressBar.setValue(percent)
self.progressLabel.setText(f"{percent}%")
self.numConvertedLabel.setText(f"{index} files converted")
def _update_state_finish_converting(self):
self.statusbar.showMessage("conversion complete...")
self._thread.quit()
self._thread.wait()
msg = QMessageBox.information(
self, "Info", "File conversion is complete.")
def main():
app = QApplication(sys.argv)
window = MainWindow()
sys.exit(app.exec())
if __name__ == "__main__":
main() | tif2pdf.py | import sys
from pathlib import Path
import logging
import os
from file_converter_worker import FileConverterWorker
from PyQt6.QtCore import Qt, QThread
from PyQt6.QtWidgets import QApplication, QMainWindow, QFileDialog, QMessageBox
from ui.MainWindow import Ui_MainWindow
logging.basicConfig(level=logging.DEBUG)
class MainWindow(QMainWindow, Ui_MainWindow):
def __init__(self):
super().__init__()
self.converted_path = 'converted_pdfs'
self.dir_files = []
self.files_to_convert = []
self.converted_files = []
self.selected_dir = None
self.setupUi(self)
self.show()
self.dirSelectButton.clicked.connect(self.select_folder)
self.selectAllCheckbox.stateChanged.connect(
self.select_all_state_change)
self.dirList.itemSelectionChanged.connect(
self.update_selected_items_label)
self.convertButton.clicked.connect(self.click_convert_button)
self.convertedList.itemDoubleClicked.connect(self.open_file)
def open_file(self, item):
for path in self.converted_files:
if item.text() == path.name:
logging.debug(f"File Path to Open: {Path(path)}")
os.system(f"open '{Path(path)}'")
def select_folder(self):
if self.dirEdit.text().strip() == "":
self.selected_dir = QFileDialog.getExistingDirectory(
self, "Select Folder", str(Path.home()))
else:
self.selected_dir = str(Path(self.dirEdit.text()))
logging.debug(f"Selected Folder: {self.selected_dir}")
self.dirEdit.setText(self.selected_dir)
self.selectedDirLabel.setText(
f"Files in: /{Path(self.selected_dir).stem}")
self.update_files_list(self.selected_dir)
def update_files_list(self, path_str):
dir_content_generator = Path(path_str).iterdir()
self.dir_files = [x for x in dir_content_generator if x.is_file() and x.suffix.lower() in [
'.tif', '.tiff']]
logging.debug(f"Selected Folder Files: {self.dir_files}")
self.update_selected_dir_file_list()
def update_selected_dir_file_list(self):
self.dirList.clear()
self.dirList.addItems([x.name for x in self.dir_files])
def select_all_state_change(self):
state = self.selectAllCheckbox.checkState()
logging.debug(f"Checkbox State: {state}")
if state == Qt.CheckState.Checked:
self.dirList.selectAll()
elif state == Qt.CheckState.Unchecked:
self.dirList.clearSelection()
def update_selected_items_label(self):
num_selected = len(self.dirList.selectedItems())
self.numSelectedLabel.setText(f"{num_selected} files selected")
def click_convert_button(self):
self.progressBar.setValue(0)
self.progressLabel.setText("0%")
self.numConvertedLabel.setText("0 files converted")
self.convertedList.clear()
selected = [item.text() for item in self.dirList.selectedItems()]
self.files_to_convert = [
file for file in self.dir_files if file.name in selected]
logging.debug(f"Selected Items: text={self.files_to_convert}")
if not (self.files_to_convert[0].parent / self.converted_path).is_dir():
Path.mkdir(self.files_to_convert[0].parent / self.converted_path)
self._convert_files_thread()
def _convert_files_thread(self):
logging.debug("Entered convert_files_thread...")
self._thread = QThread()
self._file_converter_worker = FileConverterWorker(
self.files_to_convert, self.converted_path)
self._file_converter_worker.moveToThread(self._thread)
self._thread.started.connect(self._file_converter_worker.convert_files)
logging.debug("running convert_files worker...")
self._file_converter_worker.converted_file.connect(
self._update_state_when_file_converted)
self._file_converter_worker.progress.connect(
self._update_state_progress)
self._file_converter_worker.finished.connect(
self._update_state_finish_converting)
self._thread.finished.connect(self._thread.deleteLater)
self._thread.start()
def _update_state_when_file_converted(self, new_file_path: str):
self.statusbar.showMessage(f"converted {new_file_path.name}")
self.convertedList.addItem(new_file_path.name)
self.converted_files.append(new_file_path)
def _update_state_progress(self, index: int):
percent = int(index / len(self.files_to_convert) * 100)
self.progressBar.setValue(percent)
self.progressLabel.setText(f"{percent}%")
self.numConvertedLabel.setText(f"{index} files converted")
def _update_state_finish_converting(self):
self.statusbar.showMessage("conversion complete...")
self._thread.quit()
self._thread.wait()
msg = QMessageBox.information(
self, "Info", "File conversion is complete.")
def main():
app = QApplication(sys.argv)
window = MainWindow()
sys.exit(app.exec())
if __name__ == "__main__":
main() | 0.221182 | 0.084871 |
import tg
import time
from mailtemplates.lib import MailTemplatesError
from mailtemplates.lib import TemplateFiller
from mailtemplates.lib import send_email
from tg.util.webtest import test_context
from tgext.asyncjob.queue import AsyncJobQueue
from tgext.mailer import get_mailer
from tgext.pluggable import app_model
from mailtemplates import model
from pyquery import PyQuery as pq
from .base import configure_app, create_app, flush_db_changes
import re
import mock
find_urls = re.compile('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+')
class MailTemplatesControllerTests(object):
def setup(self):
self.app = create_app(self.app_config, False)
m1 = model.provider.create(model.MailModel, dict(name=u'Email', usage=u'Usage'))
model.provider.create(model.TemplateTranslation, dict(language=u'EN', mail_model=m1, subject=u'Subject',
body=u'''<div>${body}</div>'''))
m2 = model.provider.create(model.MailModel, dict(name=u'TranslateEmail', usage=u'Usage'))
model.provider.create(model.TemplateTranslation, dict(language=u'IT', mail_model=m2, subject=u'Subject',
body=u'''<py:extends href="mailtemplates.templates.md_rich_email_base">
<py:block name="a">${mail_title}</py:block>
altro testo qui dentro
</py:extends>'''))
model.provider.create(model.TemplateTranslation, dict(language=u'EN', mail_model=m2, subject=u'soggetto',
body=u'''<div><py:block name="a">${mail_title}</py:block>
other text
</div>'''))
flush_db_changes()
self.body_formatted = "<div>${body}</div>"
def test_index(self):
resp = self.app.get('/')
assert 'HELLO' in resp.text
def test_mailtemplates(self):
__, mail_model = model.provider.query(model.MailModel, filters=dict(name=u'Email'))
mail_model = mail_model[0]
__, translation = model.provider.query(model.TemplateTranslation,
filters=dict(mail_model_id=mail_model._id))
translation = translation[0]
resp = self.app.get('/mailtemplates', extra_environ={'REMOTE_USER': 'manager'})
assert mail_model.name in resp, resp
assert translation.language in resp, resp
assert self.body_formatted in resp, resp
assert translation.subject in resp, resp
def test_new_translation(self):
__, mail_model = model.provider.query(model.MailModel, filters=dict(name=u'Email'))
mail_model = mail_model[0]
resp = self.app.get('/mailtemplates/new_translation?model_id=' + str(mail_model._id),
extra_environ={'REMOTE_USER': 'manager'})
d = pq(resp.body)
assert d('#model_id').val() == str(mail_model._id), (d('#model_id').val(), str(mail_model._id))
def test_edit_translation(self):
__, mail_model = model.provider.query(model.MailModel, filters=dict(name=u'Email'))
mail_model = mail_model[0]
__, translation = model.provider.query(model.TemplateTranslation,
filters=dict(mail_model_id=mail_model._id))
translation = translation[0]
resp = self.app.get('/mailtemplates/edit_translation?translation_id=' + str(translation._id),
extra_environ={'REMOTE_USER': 'manager'})
d = pq(resp.body)
assert d('#body').text() == translation.body, (d('#body').text(), translation.body)
assert d('#subject').val() == translation.subject, (d('#subject').val(), translation.subject)
assert d('#language').val() == translation.language, (d('#language').val(), translation.language)
def test_edit_non_existent_translation(self):
resp = self.app.get('/mailtemplates/edit_translation', params={'translation_id': 999},
extra_environ={'REMOTE_USER': 'manager'},
status=404)
def test_create_translation(self):
__, mail_model = model.provider.query(model.MailModel, filters=dict(name=u'Email'))
mail_model = mail_model[0]
resp = self.app.get('/mailtemplates/create_translation', params={'model_id': mail_model._id,
'language': 'IT',
'body': '<div>This is a body</div>',
'subject': 'my_subject'},
extra_environ={
'REMOTE_USER': 'manager'}, status=302)
resp = resp.follow(extra_environ={'REMOTE_USER': 'manager'}, status=200)
__, translation = model.provider.query(model.TemplateTranslation, filters={'subject': 'my_subject'})
assert translation, translation
def test_create_translation_no_model(self):
resp = self.app.get('/mailtemplates/create_translation', params={'model_id': 100,
'language': 'JR',
'body': '<div>This is a body</div>',
'subject': 'subject'},
extra_environ={
'REMOTE_USER': 'manager'}, status=404)
def test_update_translation(self):
__, translation = model.provider.query(model.TemplateTranslation,
filters=dict(subject=u'Subject'))
translation = translation[0]
resp = self.app.get('/mailtemplates/update_translation', params={'translation_id': translation._id,
'language': 'EN',
'subject': 'Subject'},
extra_environ={
'REMOTE_USER': 'manager'}, status=302)
resp = resp.follow(extra_environ={'REMOTE_USER': 'manager'}, status=200)
__, translation = model.provider.query(model.TemplateTranslation, filters={'_id': translation._id})
assert translation, translation
def test_update_translation_no_translation(self):
resp = self.app.get('/mailtemplates/update_translation', params={'translation_id': 200},
extra_environ={
'REMOTE_USER': 'manager'}, status=404)
def test_update_translation_already_in(self):
__, mail_model = model.provider.query(model.MailModel, filters=dict(name=u'Email'))
mail_model = mail_model[0]
model.provider.create(model.TemplateTranslation,
dict(language=u'FR',
mail_model=mail_model,
subject=u'sub',
body=u'''<div></div>'''))
flush_db_changes()
__, translation = model.provider.query(model.TemplateTranslation,
filters=dict(subject=u'Subject'))
translation = translation[0]
resp = self.app.get('/mailtemplates/update_translation',
params={'translation_id': translation._id,
'body': '<div>This is a body</div>',
'language': 'FR',
'subject': 'Subject'},
extra_environ={
'REMOTE_USER': 'manager'}, status=302)
resp = resp.follow(extra_environ={'REMOTE_USER': 'manager'})
def test_new_model(self):
resp = self.app.get('/mailtemplates/new_model', extra_environ={'REMOTE_USER': 'manager'},
status=200)
assert tg.config['_mailtemplates']['default_language'] in resp, resp
def test_create_model(self):
resp = self.app.get('/mailtemplates/create_model', params={'name': u'Model', 'usage': 'usage1',
'language': 'IT',
'body': '<div>This is a body</div>',
'subject': 'subject'},
extra_environ={'REMOTE_USER': 'manager'}, status=302)
resp = resp.follow(extra_environ={'REMOTE_USER': 'manager'}, status=200)
__, mail_model = model.provider.query(model.MailModel, filters={'name': u'Model', 'usage': 'usage1'})
assert mail_model[0].name == 'Model', mail_model[0].name
def test_test_email(self):
__, translation = model.provider.query(model.TemplateTranslation,
filters=dict(language='EN'))
translation = translation[0]
resp = self.app.get('/mailtemplates/test_email', params=dict(translation_id=translation._id, language='EN'),
extra_environ={'REMOTE_USER': 'manager'}, status=200)
assert 'Send Test Email' in resp, resp
def test_send_test_email(self):
with test_context(self.app):
app_globals = tg.app_globals._current_obj()
__, translation = model.provider.query(model.TemplateTranslation,
filters=dict(language='EN'))
translation = translation[0]
resp = self.app.get('/mailtemplates/send_test_email',
params=dict(translation_id=translation._id, language=translation.language,
body=translation.body, subject=translation.subject,
email='<EMAIL>'),
extra_environ={'REMOTE_USER': 'manager'}, status=200)
assert 'Test email sent to <EMAIL>' in resp, resp
assert app_globals.asyncjob_queue.queue.qsize() > 0, app_globals.asyncjob_queue.queue.qsize()
def test_edit_description(self):
__, mail_model = model.provider.query(model.MailModel, filters=dict(name=u'Email'))
mail_model = mail_model[0]
resp = self.app.get('/mailtemplates/edit_description', params={'model_id': mail_model._id},
extra_environ={'REMOTE_USER': 'manager'}, status=200)
def test_edit_description_no_model(self):
resp = self.app.get('/mailtemplates/edit_description', params={'model_id': 200},
extra_environ={'REMOTE_USER': 'manager'}, status=404)
def test_update_description(self):
__, mail_model = model.provider.query(model.MailModel, filters=dict(name=u'Email'))
mail_model = mail_model[0]
resp = self.app.get('/mailtemplates/update_description', params={'model_id': mail_model._id,
'description': 'new description'},
extra_environ={'REMOTE_USER': 'manager'}, status=302)
resp = resp.follow(extra_environ={'REMOTE_USER': 'manager'}, status=200)
assert 'Model description edited.' in resp, resp
__, mail_model = model.provider.query(model.MailModel, filters=dict(name=u'Email'))
mail_model = mail_model[0]
assert mail_model.usage == 'new description', mail_model.usage
def test_update_description_no_desc(self):
resp = self.app.get('/mailtemplates/update_description', params={'model_id': 200,
'description': 'new description'},
extra_environ={'REMOTE_USER': 'manager'}, status=404)
def test_validate_template(self):
resp = self.app.get('/mailtemplates/validate_template', params={'language': 'EN',
'body': '<div>${body}</div>'},
extra_environ={'REMOTE_USER': 'manager'}, status=200)
def test_validate_template_edit(self):
resp = self.app.get('/mailtemplates/validate_template_edit', params={'language': 'EN',
'body': '<div>${body}</div>'},
extra_environ={'REMOTE_USER': 'manager'}, status=200)
def test_validate_template_model(self):
resp = self.app.get('/mailtemplates/validate_template_model', params={'language': 'EN',
'body': '<div>${body}</div>',
'name': 'name',
'usage': 'usage'},
extra_environ={'REMOTE_USER': 'manager'}, status=200)
def test_send_email_async(self):
with test_context(self.app):
app_globals = tg.app_globals._current_obj()
__, mail_model = model.provider.query(model.MailModel, filters=dict(name=u'Email'))
mail_model = mail_model[0]
send_email(recipients=['<EMAIL>'], sender='<NAME> <<EMAIL>>',
mail_model_name=mail_model.name, data=dict(body='body'), send_async=True
)
assert app_globals.asyncjob_queue.queue.qsize() > 0, app_globals.asyncjob_queue.queue.qsize()
@mock.patch('mailtemplates.lib._get_request', return_value=None)
def test_send_email(self, _):
with test_context(self.app):
app_globals = tg.app_globals._current_obj()
mailer = get_mailer(app_globals)
__, mail_model = model.provider.query(model.MailModel, filters=dict(name=u'Email'))
mail_model = mail_model[0]
send_email(recipients=['<EMAIL>'], sender='<NAME> <<EMAIL>>',
mail_model_name=mail_model.name, data=dict(body='body'))
assert len(mailer.outbox) > 0, mailer.outbox
@mock.patch('mailtemplates.lib._get_request', return_value=None)
def test_send_email_recipients_not_list(self, _):
with test_context(self.app):
app_globals = tg.app_globals._current_obj()
mailer = get_mailer(app_globals)
__, mail_model = model.provider.query(model.MailModel, filters=dict(name=u'Email'))
mail_model = mail_model[0]
send_email(recipients='<EMAIL>', sender='<NAME> <<EMAIL>>',
mail_model_name=mail_model.name, data=dict(body='body'))
assert len(mailer.outbox) > 0, mailer.outbox
def test_send_email_no_model(self):
try:
send_email(recipients=['<EMAIL>'], sender='<NAME> <<EMAIL>>',
mail_model_name='No model', data=dict(body='body'))
except MailTemplatesError as e:
assert 'Mail model \'No model\' not found' in str(e)
def test_send_email_no_translation(self):
try:
__, mail_model = model.provider.query(model.MailModel, filters=dict(name=u'Email'))
mail_model = mail_model[0]
send_email(recipients=['<EMAIL>'], sender='<NAME> <<EMAIL>>',
mail_model_name=mail_model.name, translation='RU', data=dict(body='body'))
except MailTemplatesError as e:
assert 'Translation for this mail model not found' in str(e)
def test_template_filler(self):
t = TemplateFiller(name='name')
assert str(t.prop) == 'prop', t.prop
assert str(t['attr']) == 'attr', t['attr']
@mock.patch('mailtemplates.lib._get_request', return_value=None)
def test_kajiki_with_context(self, _):
with test_context(self.app):
send_email(
recipients=['<EMAIL>'],
sender='<NAME> <<EMAIL>>',
translation='IT',
mail_model_name=u'TranslateEmail',
data=dict(body='body', mail_title='titolo mail'),
send_async=False,
)
def test_kajiki_with_context_async(self):
# tgext.asyncjob can't start an asyncjob without a context.
with test_context(self.app):
send_email(
recipients=['<EMAIL>'],
sender='<NAME> <<EMAIL>>',
translation='IT',
mail_model_name=u'TranslateEmail',
data=dict(body='body', mail_title='titolo mail'),
send_async=True
)
# TODO: test tgext.celery integration:
class TestMailTemplatesControllerSQLA(MailTemplatesControllerTests):
@classmethod
def setupClass(cls):
cls.app_config = configure_app('sqlalchemy')
class TestMailTemplatesControllerMing(MailTemplatesControllerTests):
@classmethod
def setupClass(cls):
cls.app_config = configure_app('ming') | tests/test_controller.py | import tg
import time
from mailtemplates.lib import MailTemplatesError
from mailtemplates.lib import TemplateFiller
from mailtemplates.lib import send_email
from tg.util.webtest import test_context
from tgext.asyncjob.queue import AsyncJobQueue
from tgext.mailer import get_mailer
from tgext.pluggable import app_model
from mailtemplates import model
from pyquery import PyQuery as pq
from .base import configure_app, create_app, flush_db_changes
import re
import mock
find_urls = re.compile('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+')
class MailTemplatesControllerTests(object):
def setup(self):
self.app = create_app(self.app_config, False)
m1 = model.provider.create(model.MailModel, dict(name=u'Email', usage=u'Usage'))
model.provider.create(model.TemplateTranslation, dict(language=u'EN', mail_model=m1, subject=u'Subject',
body=u'''<div>${body}</div>'''))
m2 = model.provider.create(model.MailModel, dict(name=u'TranslateEmail', usage=u'Usage'))
model.provider.create(model.TemplateTranslation, dict(language=u'IT', mail_model=m2, subject=u'Subject',
body=u'''<py:extends href="mailtemplates.templates.md_rich_email_base">
<py:block name="a">${mail_title}</py:block>
altro testo qui dentro
</py:extends>'''))
model.provider.create(model.TemplateTranslation, dict(language=u'EN', mail_model=m2, subject=u'soggetto',
body=u'''<div><py:block name="a">${mail_title}</py:block>
other text
</div>'''))
flush_db_changes()
self.body_formatted = "<div>${body}</div>"
def test_index(self):
resp = self.app.get('/')
assert 'HELLO' in resp.text
def test_mailtemplates(self):
__, mail_model = model.provider.query(model.MailModel, filters=dict(name=u'Email'))
mail_model = mail_model[0]
__, translation = model.provider.query(model.TemplateTranslation,
filters=dict(mail_model_id=mail_model._id))
translation = translation[0]
resp = self.app.get('/mailtemplates', extra_environ={'REMOTE_USER': 'manager'})
assert mail_model.name in resp, resp
assert translation.language in resp, resp
assert self.body_formatted in resp, resp
assert translation.subject in resp, resp
def test_new_translation(self):
__, mail_model = model.provider.query(model.MailModel, filters=dict(name=u'Email'))
mail_model = mail_model[0]
resp = self.app.get('/mailtemplates/new_translation?model_id=' + str(mail_model._id),
extra_environ={'REMOTE_USER': 'manager'})
d = pq(resp.body)
assert d('#model_id').val() == str(mail_model._id), (d('#model_id').val(), str(mail_model._id))
def test_edit_translation(self):
__, mail_model = model.provider.query(model.MailModel, filters=dict(name=u'Email'))
mail_model = mail_model[0]
__, translation = model.provider.query(model.TemplateTranslation,
filters=dict(mail_model_id=mail_model._id))
translation = translation[0]
resp = self.app.get('/mailtemplates/edit_translation?translation_id=' + str(translation._id),
extra_environ={'REMOTE_USER': 'manager'})
d = pq(resp.body)
assert d('#body').text() == translation.body, (d('#body').text(), translation.body)
assert d('#subject').val() == translation.subject, (d('#subject').val(), translation.subject)
assert d('#language').val() == translation.language, (d('#language').val(), translation.language)
def test_edit_non_existent_translation(self):
resp = self.app.get('/mailtemplates/edit_translation', params={'translation_id': 999},
extra_environ={'REMOTE_USER': 'manager'},
status=404)
def test_create_translation(self):
__, mail_model = model.provider.query(model.MailModel, filters=dict(name=u'Email'))
mail_model = mail_model[0]
resp = self.app.get('/mailtemplates/create_translation', params={'model_id': mail_model._id,
'language': 'IT',
'body': '<div>This is a body</div>',
'subject': 'my_subject'},
extra_environ={
'REMOTE_USER': 'manager'}, status=302)
resp = resp.follow(extra_environ={'REMOTE_USER': 'manager'}, status=200)
__, translation = model.provider.query(model.TemplateTranslation, filters={'subject': 'my_subject'})
assert translation, translation
def test_create_translation_no_model(self):
resp = self.app.get('/mailtemplates/create_translation', params={'model_id': 100,
'language': 'JR',
'body': '<div>This is a body</div>',
'subject': 'subject'},
extra_environ={
'REMOTE_USER': 'manager'}, status=404)
def test_update_translation(self):
__, translation = model.provider.query(model.TemplateTranslation,
filters=dict(subject=u'Subject'))
translation = translation[0]
resp = self.app.get('/mailtemplates/update_translation', params={'translation_id': translation._id,
'language': 'EN',
'subject': 'Subject'},
extra_environ={
'REMOTE_USER': 'manager'}, status=302)
resp = resp.follow(extra_environ={'REMOTE_USER': 'manager'}, status=200)
__, translation = model.provider.query(model.TemplateTranslation, filters={'_id': translation._id})
assert translation, translation
def test_update_translation_no_translation(self):
resp = self.app.get('/mailtemplates/update_translation', params={'translation_id': 200},
extra_environ={
'REMOTE_USER': 'manager'}, status=404)
def test_update_translation_already_in(self):
__, mail_model = model.provider.query(model.MailModel, filters=dict(name=u'Email'))
mail_model = mail_model[0]
model.provider.create(model.TemplateTranslation,
dict(language=u'FR',
mail_model=mail_model,
subject=u'sub',
body=u'''<div></div>'''))
flush_db_changes()
__, translation = model.provider.query(model.TemplateTranslation,
filters=dict(subject=u'Subject'))
translation = translation[0]
resp = self.app.get('/mailtemplates/update_translation',
params={'translation_id': translation._id,
'body': '<div>This is a body</div>',
'language': 'FR',
'subject': 'Subject'},
extra_environ={
'REMOTE_USER': 'manager'}, status=302)
resp = resp.follow(extra_environ={'REMOTE_USER': 'manager'})
def test_new_model(self):
resp = self.app.get('/mailtemplates/new_model', extra_environ={'REMOTE_USER': 'manager'},
status=200)
assert tg.config['_mailtemplates']['default_language'] in resp, resp
def test_create_model(self):
resp = self.app.get('/mailtemplates/create_model', params={'name': u'Model', 'usage': 'usage1',
'language': 'IT',
'body': '<div>This is a body</div>',
'subject': 'subject'},
extra_environ={'REMOTE_USER': 'manager'}, status=302)
resp = resp.follow(extra_environ={'REMOTE_USER': 'manager'}, status=200)
__, mail_model = model.provider.query(model.MailModel, filters={'name': u'Model', 'usage': 'usage1'})
assert mail_model[0].name == 'Model', mail_model[0].name
def test_test_email(self):
__, translation = model.provider.query(model.TemplateTranslation,
filters=dict(language='EN'))
translation = translation[0]
resp = self.app.get('/mailtemplates/test_email', params=dict(translation_id=translation._id, language='EN'),
extra_environ={'REMOTE_USER': 'manager'}, status=200)
assert 'Send Test Email' in resp, resp
def test_send_test_email(self):
with test_context(self.app):
app_globals = tg.app_globals._current_obj()
__, translation = model.provider.query(model.TemplateTranslation,
filters=dict(language='EN'))
translation = translation[0]
resp = self.app.get('/mailtemplates/send_test_email',
params=dict(translation_id=translation._id, language=translation.language,
body=translation.body, subject=translation.subject,
email='<EMAIL>'),
extra_environ={'REMOTE_USER': 'manager'}, status=200)
assert 'Test email sent to <EMAIL>' in resp, resp
assert app_globals.asyncjob_queue.queue.qsize() > 0, app_globals.asyncjob_queue.queue.qsize()
def test_edit_description(self):
__, mail_model = model.provider.query(model.MailModel, filters=dict(name=u'Email'))
mail_model = mail_model[0]
resp = self.app.get('/mailtemplates/edit_description', params={'model_id': mail_model._id},
extra_environ={'REMOTE_USER': 'manager'}, status=200)
def test_edit_description_no_model(self):
resp = self.app.get('/mailtemplates/edit_description', params={'model_id': 200},
extra_environ={'REMOTE_USER': 'manager'}, status=404)
def test_update_description(self):
__, mail_model = model.provider.query(model.MailModel, filters=dict(name=u'Email'))
mail_model = mail_model[0]
resp = self.app.get('/mailtemplates/update_description', params={'model_id': mail_model._id,
'description': 'new description'},
extra_environ={'REMOTE_USER': 'manager'}, status=302)
resp = resp.follow(extra_environ={'REMOTE_USER': 'manager'}, status=200)
assert 'Model description edited.' in resp, resp
__, mail_model = model.provider.query(model.MailModel, filters=dict(name=u'Email'))
mail_model = mail_model[0]
assert mail_model.usage == 'new description', mail_model.usage
def test_update_description_no_desc(self):
resp = self.app.get('/mailtemplates/update_description', params={'model_id': 200,
'description': 'new description'},
extra_environ={'REMOTE_USER': 'manager'}, status=404)
def test_validate_template(self):
resp = self.app.get('/mailtemplates/validate_template', params={'language': 'EN',
'body': '<div>${body}</div>'},
extra_environ={'REMOTE_USER': 'manager'}, status=200)
def test_validate_template_edit(self):
resp = self.app.get('/mailtemplates/validate_template_edit', params={'language': 'EN',
'body': '<div>${body}</div>'},
extra_environ={'REMOTE_USER': 'manager'}, status=200)
def test_validate_template_model(self):
resp = self.app.get('/mailtemplates/validate_template_model', params={'language': 'EN',
'body': '<div>${body}</div>',
'name': 'name',
'usage': 'usage'},
extra_environ={'REMOTE_USER': 'manager'}, status=200)
def test_send_email_async(self):
with test_context(self.app):
app_globals = tg.app_globals._current_obj()
__, mail_model = model.provider.query(model.MailModel, filters=dict(name=u'Email'))
mail_model = mail_model[0]
send_email(recipients=['<EMAIL>'], sender='<NAME> <<EMAIL>>',
mail_model_name=mail_model.name, data=dict(body='body'), send_async=True
)
assert app_globals.asyncjob_queue.queue.qsize() > 0, app_globals.asyncjob_queue.queue.qsize()
@mock.patch('mailtemplates.lib._get_request', return_value=None)
def test_send_email(self, _):
with test_context(self.app):
app_globals = tg.app_globals._current_obj()
mailer = get_mailer(app_globals)
__, mail_model = model.provider.query(model.MailModel, filters=dict(name=u'Email'))
mail_model = mail_model[0]
send_email(recipients=['<EMAIL>'], sender='<NAME> <<EMAIL>>',
mail_model_name=mail_model.name, data=dict(body='body'))
assert len(mailer.outbox) > 0, mailer.outbox
@mock.patch('mailtemplates.lib._get_request', return_value=None)
def test_send_email_recipients_not_list(self, _):
with test_context(self.app):
app_globals = tg.app_globals._current_obj()
mailer = get_mailer(app_globals)
__, mail_model = model.provider.query(model.MailModel, filters=dict(name=u'Email'))
mail_model = mail_model[0]
send_email(recipients='<EMAIL>', sender='<NAME> <<EMAIL>>',
mail_model_name=mail_model.name, data=dict(body='body'))
assert len(mailer.outbox) > 0, mailer.outbox
def test_send_email_no_model(self):
try:
send_email(recipients=['<EMAIL>'], sender='<NAME> <<EMAIL>>',
mail_model_name='No model', data=dict(body='body'))
except MailTemplatesError as e:
assert 'Mail model \'No model\' not found' in str(e)
def test_send_email_no_translation(self):
try:
__, mail_model = model.provider.query(model.MailModel, filters=dict(name=u'Email'))
mail_model = mail_model[0]
send_email(recipients=['<EMAIL>'], sender='<NAME> <<EMAIL>>',
mail_model_name=mail_model.name, translation='RU', data=dict(body='body'))
except MailTemplatesError as e:
assert 'Translation for this mail model not found' in str(e)
def test_template_filler(self):
t = TemplateFiller(name='name')
assert str(t.prop) == 'prop', t.prop
assert str(t['attr']) == 'attr', t['attr']
@mock.patch('mailtemplates.lib._get_request', return_value=None)
def test_kajiki_with_context(self, _):
with test_context(self.app):
send_email(
recipients=['<EMAIL>'],
sender='<NAME> <<EMAIL>>',
translation='IT',
mail_model_name=u'TranslateEmail',
data=dict(body='body', mail_title='titolo mail'),
send_async=False,
)
def test_kajiki_with_context_async(self):
# tgext.asyncjob can't start an asyncjob without a context.
with test_context(self.app):
send_email(
recipients=['<EMAIL>'],
sender='<NAME> <<EMAIL>>',
translation='IT',
mail_model_name=u'TranslateEmail',
data=dict(body='body', mail_title='titolo mail'),
send_async=True
)
# TODO: test tgext.celery integration:
class TestMailTemplatesControllerSQLA(MailTemplatesControllerTests):
@classmethod
def setupClass(cls):
cls.app_config = configure_app('sqlalchemy')
class TestMailTemplatesControllerMing(MailTemplatesControllerTests):
@classmethod
def setupClass(cls):
cls.app_config = configure_app('ming') | 0.366136 | 0.086825 |
class PID:
""" Simple PID control.
This class implements a simplistic PID control algorithm. When first
instantiated all the gain variables are set to zero, so calling
the method GenOut will just return zero.
"""
def __init__(self):
# initialze gains
self.Kp = 0
self.Kd = 0
self.Ki = 0
self.dt = 0
self.Initialize()
def SetKp(self, invar):
""" Set proportional gain. """
self.Kp = invar
def SetKi(self, invar):
""" Set integral gain. """
self.Ki = invar
def SetKd(self, invar):
""" Set derivative gain. """
self.Kd = invar
def SetPrevErr(self, preverr):
""" Set previous error value. """
self.prev_err = preverr
def Initialize(self):
# initialize delta t variables
self.currtm = None
self.prevtm = None
self.prev_err = 0
# term result variables
self.Cp = 0
self.Ci = 0
self.Cd = 0
def GenOut(self, error, time=None):
""" Performs a PID computation and returns a control value based on
the elapsed time (dt) and the error signal from a summing junction
(the error parameter).
"""
if time is None:
self.currtm = get_time() # get t
else:
self.currtm = time
# at first call, we don't have a valid prevtime and therefore cannot
# comput a valid dt. Just set prev_err and prevtm and return with 0
# (i.e. don't do something in the first step).
if self.prevtm is None:
self.prev_err = error
self.prevtm = self.currtm
return 0
dt = self.currtm - self.prevtm # get delta t
de = error - self.prev_err # get delta error
self.Cp = self.Kp * error # proportional term
self.Ci += error * dt # integral term
self.Cd = 0
if dt > 0: # no div by zero
self.Cd = de/dt # derivative term
self.prevtm = self.currtm # save t for next pass
self.prev_err = error # save t-1 error
self.dt = dt
# sum the terms and return the result
return self.Cp + (self.Ki * self.Ci) + (self.Kd * self.Cd) | src/blmc/pid.py |
class PID:
""" Simple PID control.
This class implements a simplistic PID control algorithm. When first
instantiated all the gain variables are set to zero, so calling
the method GenOut will just return zero.
"""
def __init__(self):
# initialze gains
self.Kp = 0
self.Kd = 0
self.Ki = 0
self.dt = 0
self.Initialize()
def SetKp(self, invar):
""" Set proportional gain. """
self.Kp = invar
def SetKi(self, invar):
""" Set integral gain. """
self.Ki = invar
def SetKd(self, invar):
""" Set derivative gain. """
self.Kd = invar
def SetPrevErr(self, preverr):
""" Set previous error value. """
self.prev_err = preverr
def Initialize(self):
# initialize delta t variables
self.currtm = None
self.prevtm = None
self.prev_err = 0
# term result variables
self.Cp = 0
self.Ci = 0
self.Cd = 0
def GenOut(self, error, time=None):
""" Performs a PID computation and returns a control value based on
the elapsed time (dt) and the error signal from a summing junction
(the error parameter).
"""
if time is None:
self.currtm = get_time() # get t
else:
self.currtm = time
# at first call, we don't have a valid prevtime and therefore cannot
# comput a valid dt. Just set prev_err and prevtm and return with 0
# (i.e. don't do something in the first step).
if self.prevtm is None:
self.prev_err = error
self.prevtm = self.currtm
return 0
dt = self.currtm - self.prevtm # get delta t
de = error - self.prev_err # get delta error
self.Cp = self.Kp * error # proportional term
self.Ci += error * dt # integral term
self.Cd = 0
if dt > 0: # no div by zero
self.Cd = de/dt # derivative term
self.prevtm = self.currtm # save t for next pass
self.prev_err = error # save t-1 error
self.dt = dt
# sum the terms and return the result
return self.Cp + (self.Ki * self.Ci) + (self.Kd * self.Cd) | 0.747616 | 0.507202 |
from __future__ import print_function
import collections
import logging
import os
from datetime import datetime, timedelta
from glob import glob
from airflow import models
from airflow.operators.bash_operator import BashOperator
from airflow.operators.email_operator import EmailOperator
from airflow.operators.python_operator import PythonOperator
from airflow.operators.sensors import ExternalTaskSensor
from google.cloud import bigquery
from ethereumetl_airflow.bigquery_utils import create_view, share_dataset_all_users_read
from ethereumetl_airflow.common import read_json_file, read_file
from ethereumetl_airflow.parse.parse_logic import ref_regex, parse, create_dataset
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
dags_folder = os.environ.get('DAGS_FOLDER', '/home/airflow/gcs/dags')
def build_parse_dag(
dag_id,
dataset_folder,
parse_destination_dataset_project_id,
notification_emails=None,
parse_start_date=datetime(2018, 7, 1),
schedule_interval='0 0 * * *',
parse_all_partitions=None,
send_success_email=False
):
logging.info('parse_all_partitions is {}'.format(parse_all_partitions))
if parse_all_partitions:
dag_id = dag_id + '_FULL'
if 'ethereum_kovan_parse' in dag_id:
SOURCE_PROJECT_ID = 'public-data-finance'
SOURCE_DATASET_NAME = 'crypto_ethereum_kovan'
PARTITION_DAG_ID = 'ethereum_kovan_partition_dag'
else:
SOURCE_PROJECT_ID = 'bigquery-public-data'
SOURCE_DATASET_NAME = 'crypto_ethereum'
PARTITION_DAG_ID = 'ethereum_partition_dag'
default_dag_args = {
'depends_on_past': True,
'start_date': parse_start_date,
'email_on_failure': True,
'email_on_retry': False,
'retries': 5,
'retry_delay': timedelta(minutes=5)
}
if notification_emails and len(notification_emails) > 0:
default_dag_args['email'] = [email.strip() for email in notification_emails.split(',')]
dag = models.DAG(
dag_id,
catchup=False,
schedule_interval=schedule_interval,
default_args=default_dag_args)
validation_error = None
try:
validate_definition_files(dataset_folder)
except ValueError as e:
validation_error = e
# This prevents failing all dags as they are constructed in a loop in ethereum_parse_dag.py
if validation_error is not None:
def raise_validation_error(ds, **kwargs):
raise validation_error
validation_error_operator = PythonOperator(
task_id='validation_error',
python_callable=raise_validation_error,
provide_context=True,
execution_timeout=timedelta(minutes=10),
dag=dag
)
return dag
def create_parse_task(table_definition):
def parse_task(ds, **kwargs):
client = bigquery.Client()
parse(
bigquery_client=client,
table_definition=table_definition,
ds=ds,
source_project_id=SOURCE_PROJECT_ID,
source_dataset_name=SOURCE_DATASET_NAME,
destination_project_id=parse_destination_dataset_project_id,
sqls_folder=os.path.join(dags_folder, 'resources/stages/parse/sqls'),
parse_all_partitions=parse_all_partitions
)
table_name = table_definition['table']['table_name']
parsing_operator = PythonOperator(
task_id=table_name,
python_callable=parse_task,
provide_context=True,
execution_timeout=timedelta(minutes=60),
dag=dag
)
contract_address = table_definition['parser']['contract_address']
if contract_address is not None:
ref_dependencies = ref_regex.findall(table_definition['parser']['contract_address'])
else:
ref_dependencies = []
return parsing_operator, ref_dependencies
def create_add_view_task(dataset_name, view_name, sql):
def create_view_task(ds, **kwargs):
client = bigquery.Client()
dest_table_name = view_name
dest_table_ref = create_dataset(client, dataset_name, parse_destination_dataset_project_id).table(dest_table_name)
print('View sql: \n' + sql)
create_view(client, sql, dest_table_ref)
create_view_operator = PythonOperator(
task_id=f'create_view_{view_name}',
python_callable=create_view_task,
provide_context=True,
execution_timeout=timedelta(minutes=10),
dag=dag
)
return create_view_operator
def create_share_dataset_task(dataset_name):
def share_dataset_task(**kwargs):
if parse_destination_dataset_project_id != 'blockchain-etl':
logging.info('Skipping sharing dataset.')
else:
client = bigquery.Client()
share_dataset_all_users_read(client, f'{parse_destination_dataset_project_id}.{dataset_name}')
share_dataset_all_users_read(client, f'{parse_destination_dataset_project_id}-internal.{dataset_name}')
share_dataset_operator = PythonOperator(
task_id='share_dataset',
python_callable=share_dataset_task,
provide_context=True,
execution_timeout=timedelta(minutes=10),
dag=dag
)
return share_dataset_operator
wait_for_ethereum_load_dag_task = ExternalTaskSensor(
task_id='wait_for_ethereum_partition_dag',
external_dag_id=PARTITION_DAG_ID,
external_task_id='done',
execution_delta=timedelta(minutes=30),
priority_weight=0,
mode='reschedule',
poke_interval=5 * 60,
timeout=60 * 60 * 12,
dag=dag)
json_files = get_list_of_files(dataset_folder, '*.json')
logging.info(json_files)
all_parse_tasks = {}
task_dependencies = {}
for json_file in json_files:
table_definition = read_json_file(json_file)
task, dependencies = create_parse_task(table_definition)
wait_for_ethereum_load_dag_task >> task
all_parse_tasks[task.task_id] = task
task_dependencies[task.task_id] = dependencies
checkpoint_task = BashOperator(
task_id='parse_all_checkpoint',
bash_command='echo parse_all_checkpoint',
priority_weight=1000,
dag=dag
)
for task, dependencies in task_dependencies.items():
for dependency in dependencies:
if dependency not in all_parse_tasks:
raise ValueError(
'Table {} is not found in the the dataset. Check your ref() in contract_address field.'.format(
dependency))
all_parse_tasks[dependency] >> all_parse_tasks[task]
all_parse_tasks[task] >> checkpoint_task
final_tasks = [checkpoint_task]
dataset_name = os.path.basename(dataset_folder)
full_dataset_name = 'ethereum_' + dataset_name
share_dataset_task = create_share_dataset_task(full_dataset_name)
checkpoint_task >> share_dataset_task
final_tasks.append(share_dataset_task)
# Create views
sql_files = get_list_of_files(dataset_folder, '*.sql')
logging.info(sql_files)
for sql_file in sql_files:
sql = read_file(sql_file)
base_name = os.path.basename(sql_file)
view_name = os.path.splitext(base_name)[0]
create_view_task = create_add_view_task(full_dataset_name, view_name, sql)
checkpoint_task >> create_view_task
final_tasks.append(create_view_task)
if notification_emails and len(notification_emails) > 0 and send_success_email:
send_email_task = EmailOperator(
task_id='send_email',
to=[email.strip() for email in notification_emails.split(',')],
subject='Ethereum ETL Airflow Parse DAG Succeeded',
html_content='Ethereum ETL Airflow Parse DAG Succeeded for {}'.format(dag_id),
dag=dag
)
for final_task in final_tasks:
final_task >> send_email_task
return dag
def get_list_of_files(dataset_folder, filter='*.json'):
logging.info('get_list_of_files')
logging.info(dataset_folder)
logging.info(os.path.join(dataset_folder, filter))
return [f for f in glob(os.path.join(dataset_folder, filter))]
def validate_definition_files(dataset_folder):
json_files = get_list_of_files(dataset_folder, '*.json')
dataset_folder_name = dataset_folder.split('/')[-1]
all_lowercase_table_names = []
for json_file in json_files:
file_name = json_file.split('/')[-1].replace('.json', '')
table_definition = read_json_file(json_file)
table = table_definition.get('table')
if not table:
raise ValueError(f'table is empty in file {json_file}')
dataset_name = table.get('dataset_name')
if not dataset_name:
raise ValueError(f'dataset_name is empty in file {json_file}')
if dataset_folder_name != dataset_name:
raise ValueError(f'dataset_name {dataset_name} is not equal to dataset_folder_name {dataset_folder_name}')
table_name = table.get('table_name')
if not table_name:
raise ValueError(f'table_name is empty in file {json_file}')
if file_name != table_name:
raise ValueError(f'file_name {file_name} doest match the table_name {table_name}')
all_lowercase_table_names.append(table_name.lower())
table_name_counts = collections.defaultdict(lambda: 0)
for table_name in all_lowercase_table_names:
table_name_counts[table_name] += 1
non_unique_table_names = [name for name, count in table_name_counts.items() if count > 1]
if len(non_unique_table_names) > 0:
raise ValueError(f'The following table names are not unique {",".join(non_unique_table_names)}') | dags/ethereumetl_airflow/build_parse_dag.py | from __future__ import print_function
import collections
import logging
import os
from datetime import datetime, timedelta
from glob import glob
from airflow import models
from airflow.operators.bash_operator import BashOperator
from airflow.operators.email_operator import EmailOperator
from airflow.operators.python_operator import PythonOperator
from airflow.operators.sensors import ExternalTaskSensor
from google.cloud import bigquery
from ethereumetl_airflow.bigquery_utils import create_view, share_dataset_all_users_read
from ethereumetl_airflow.common import read_json_file, read_file
from ethereumetl_airflow.parse.parse_logic import ref_regex, parse, create_dataset
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
dags_folder = os.environ.get('DAGS_FOLDER', '/home/airflow/gcs/dags')
def build_parse_dag(
dag_id,
dataset_folder,
parse_destination_dataset_project_id,
notification_emails=None,
parse_start_date=datetime(2018, 7, 1),
schedule_interval='0 0 * * *',
parse_all_partitions=None,
send_success_email=False
):
logging.info('parse_all_partitions is {}'.format(parse_all_partitions))
if parse_all_partitions:
dag_id = dag_id + '_FULL'
if 'ethereum_kovan_parse' in dag_id:
SOURCE_PROJECT_ID = 'public-data-finance'
SOURCE_DATASET_NAME = 'crypto_ethereum_kovan'
PARTITION_DAG_ID = 'ethereum_kovan_partition_dag'
else:
SOURCE_PROJECT_ID = 'bigquery-public-data'
SOURCE_DATASET_NAME = 'crypto_ethereum'
PARTITION_DAG_ID = 'ethereum_partition_dag'
default_dag_args = {
'depends_on_past': True,
'start_date': parse_start_date,
'email_on_failure': True,
'email_on_retry': False,
'retries': 5,
'retry_delay': timedelta(minutes=5)
}
if notification_emails and len(notification_emails) > 0:
default_dag_args['email'] = [email.strip() for email in notification_emails.split(',')]
dag = models.DAG(
dag_id,
catchup=False,
schedule_interval=schedule_interval,
default_args=default_dag_args)
validation_error = None
try:
validate_definition_files(dataset_folder)
except ValueError as e:
validation_error = e
# This prevents failing all dags as they are constructed in a loop in ethereum_parse_dag.py
if validation_error is not None:
def raise_validation_error(ds, **kwargs):
raise validation_error
validation_error_operator = PythonOperator(
task_id='validation_error',
python_callable=raise_validation_error,
provide_context=True,
execution_timeout=timedelta(minutes=10),
dag=dag
)
return dag
def create_parse_task(table_definition):
def parse_task(ds, **kwargs):
client = bigquery.Client()
parse(
bigquery_client=client,
table_definition=table_definition,
ds=ds,
source_project_id=SOURCE_PROJECT_ID,
source_dataset_name=SOURCE_DATASET_NAME,
destination_project_id=parse_destination_dataset_project_id,
sqls_folder=os.path.join(dags_folder, 'resources/stages/parse/sqls'),
parse_all_partitions=parse_all_partitions
)
table_name = table_definition['table']['table_name']
parsing_operator = PythonOperator(
task_id=table_name,
python_callable=parse_task,
provide_context=True,
execution_timeout=timedelta(minutes=60),
dag=dag
)
contract_address = table_definition['parser']['contract_address']
if contract_address is not None:
ref_dependencies = ref_regex.findall(table_definition['parser']['contract_address'])
else:
ref_dependencies = []
return parsing_operator, ref_dependencies
def create_add_view_task(dataset_name, view_name, sql):
def create_view_task(ds, **kwargs):
client = bigquery.Client()
dest_table_name = view_name
dest_table_ref = create_dataset(client, dataset_name, parse_destination_dataset_project_id).table(dest_table_name)
print('View sql: \n' + sql)
create_view(client, sql, dest_table_ref)
create_view_operator = PythonOperator(
task_id=f'create_view_{view_name}',
python_callable=create_view_task,
provide_context=True,
execution_timeout=timedelta(minutes=10),
dag=dag
)
return create_view_operator
def create_share_dataset_task(dataset_name):
def share_dataset_task(**kwargs):
if parse_destination_dataset_project_id != 'blockchain-etl':
logging.info('Skipping sharing dataset.')
else:
client = bigquery.Client()
share_dataset_all_users_read(client, f'{parse_destination_dataset_project_id}.{dataset_name}')
share_dataset_all_users_read(client, f'{parse_destination_dataset_project_id}-internal.{dataset_name}')
share_dataset_operator = PythonOperator(
task_id='share_dataset',
python_callable=share_dataset_task,
provide_context=True,
execution_timeout=timedelta(minutes=10),
dag=dag
)
return share_dataset_operator
wait_for_ethereum_load_dag_task = ExternalTaskSensor(
task_id='wait_for_ethereum_partition_dag',
external_dag_id=PARTITION_DAG_ID,
external_task_id='done',
execution_delta=timedelta(minutes=30),
priority_weight=0,
mode='reschedule',
poke_interval=5 * 60,
timeout=60 * 60 * 12,
dag=dag)
json_files = get_list_of_files(dataset_folder, '*.json')
logging.info(json_files)
all_parse_tasks = {}
task_dependencies = {}
for json_file in json_files:
table_definition = read_json_file(json_file)
task, dependencies = create_parse_task(table_definition)
wait_for_ethereum_load_dag_task >> task
all_parse_tasks[task.task_id] = task
task_dependencies[task.task_id] = dependencies
checkpoint_task = BashOperator(
task_id='parse_all_checkpoint',
bash_command='echo parse_all_checkpoint',
priority_weight=1000,
dag=dag
)
for task, dependencies in task_dependencies.items():
for dependency in dependencies:
if dependency not in all_parse_tasks:
raise ValueError(
'Table {} is not found in the the dataset. Check your ref() in contract_address field.'.format(
dependency))
all_parse_tasks[dependency] >> all_parse_tasks[task]
all_parse_tasks[task] >> checkpoint_task
final_tasks = [checkpoint_task]
dataset_name = os.path.basename(dataset_folder)
full_dataset_name = 'ethereum_' + dataset_name
share_dataset_task = create_share_dataset_task(full_dataset_name)
checkpoint_task >> share_dataset_task
final_tasks.append(share_dataset_task)
# Create views
sql_files = get_list_of_files(dataset_folder, '*.sql')
logging.info(sql_files)
for sql_file in sql_files:
sql = read_file(sql_file)
base_name = os.path.basename(sql_file)
view_name = os.path.splitext(base_name)[0]
create_view_task = create_add_view_task(full_dataset_name, view_name, sql)
checkpoint_task >> create_view_task
final_tasks.append(create_view_task)
if notification_emails and len(notification_emails) > 0 and send_success_email:
send_email_task = EmailOperator(
task_id='send_email',
to=[email.strip() for email in notification_emails.split(',')],
subject='Ethereum ETL Airflow Parse DAG Succeeded',
html_content='Ethereum ETL Airflow Parse DAG Succeeded for {}'.format(dag_id),
dag=dag
)
for final_task in final_tasks:
final_task >> send_email_task
return dag
def get_list_of_files(dataset_folder, filter='*.json'):
logging.info('get_list_of_files')
logging.info(dataset_folder)
logging.info(os.path.join(dataset_folder, filter))
return [f for f in glob(os.path.join(dataset_folder, filter))]
def validate_definition_files(dataset_folder):
json_files = get_list_of_files(dataset_folder, '*.json')
dataset_folder_name = dataset_folder.split('/')[-1]
all_lowercase_table_names = []
for json_file in json_files:
file_name = json_file.split('/')[-1].replace('.json', '')
table_definition = read_json_file(json_file)
table = table_definition.get('table')
if not table:
raise ValueError(f'table is empty in file {json_file}')
dataset_name = table.get('dataset_name')
if not dataset_name:
raise ValueError(f'dataset_name is empty in file {json_file}')
if dataset_folder_name != dataset_name:
raise ValueError(f'dataset_name {dataset_name} is not equal to dataset_folder_name {dataset_folder_name}')
table_name = table.get('table_name')
if not table_name:
raise ValueError(f'table_name is empty in file {json_file}')
if file_name != table_name:
raise ValueError(f'file_name {file_name} doest match the table_name {table_name}')
all_lowercase_table_names.append(table_name.lower())
table_name_counts = collections.defaultdict(lambda: 0)
for table_name in all_lowercase_table_names:
table_name_counts[table_name] += 1
non_unique_table_names = [name for name, count in table_name_counts.items() if count > 1]
if len(non_unique_table_names) > 0:
raise ValueError(f'The following table names are not unique {",".join(non_unique_table_names)}') | 0.415847 | 0.126003 |
import re
from edgedb.lang import _testbase as tb
from edgedb.lang.graphql import generate_source as gql_to_source
from edgedb.lang.graphql.parser import parser as gql_parser
from edgedb.lang.graphql.parser.errors import (GraphQLParserError,
GraphQLUniquenessError,
UnterminatedStringError,
InvalidStringTokenError)
class GraphQLSyntaxTest(tb.BaseSyntaxTest):
re_filter = re.compile(r'''[\s,]+|(\#.*?\n)''')
parser_debug_flag = 'DEBUG_GRAPHQL'
markup_dump_lexer = 'graphql'
ast_to_source = gql_to_source
def get_parser(self, *, spec):
return gql_parser.GraphQLParser()
class TestGraphQLParser(GraphQLSyntaxTest):
def test_graphql_syntax_empty01(self):
""""""
@tb.must_fail(GraphQLParserError, 'Unexpected', line=1, col=1)
def test_graphql_syntax_empty02(self):
"""\v"""
@tb.must_fail(GraphQLParserError, 'Unexpected', line=1, col=1)
def test_graphql_syntax_empty03(self):
"""\f"""
@tb.must_fail(GraphQLParserError, 'Unexpected', line=1, col=1)
def test_graphql_syntax_empty04(self):
"""\xa0"""
@tb.must_fail(GraphQLParserError, 'Unexpected', line=2, col=1)
def test_graphql_syntax_empty05(self):
"""\r\n;"""
@tb.must_fail(UnterminatedStringError, line=1, col=2)
def test_graphql_syntax_empty06(self):
'''"'''
@tb.must_fail(UnterminatedStringError, line=2, col=10)
def test_graphql_syntax_empty07(self):
"""
"
"
"""
@tb.must_fail(GraphQLParserError, 'Unexpected token', line=1, col=1)
def test_graphql_syntax_empty08(self):
"""..."""
@tb.must_fail(InvalidStringTokenError, line=2, col=22)
def test_graphql_syntax_string01(self):
"""
{ field(arg:"\b") }
"""
@tb.must_fail(InvalidStringTokenError, line=2, col=22)
def test_graphql_syntax_string02(self):
R"""
{ field(arg:"\x") }
"""
@tb.must_fail(InvalidStringTokenError, line=2, col=22)
def test_graphql_syntax_string03(self):
R"""
{ field(arg:"\u1") }
"""
@tb.must_fail(InvalidStringTokenError, line=2, col=22)
def test_graphql_syntax_string04(self):
R"""
{ field(arg:"\u0XX1") }
"""
@tb.must_fail(InvalidStringTokenError, line=2, col=22)
def test_graphql_syntax_string05(self):
R"""
{ field(arg:"\uXXXX") }
"""
@tb.must_fail(InvalidStringTokenError, line=2, col=25)
def test_graphql_syntax_string06(self):
R"""
{ field(arg:"foo\uFXXX") }
"""
@tb.must_fail(InvalidStringTokenError, line=2, col=22)
def test_graphql_syntax_string07(self):
R"""
{ field(arg:"\uXXXF") }
"""
@tb.must_fail(GraphQLParserError, 'Unexpected', line=2, col=34)
def test_graphql_syntax_string08(self):
R"""
{ field(arg:"\uFEFF\n") };
"""
@tb.must_fail(UnterminatedStringError, line=2, col=29)
def test_graphql_syntax_string09(self):
"""
{ field(arg:"foo') }
"""
@tb.must_fail(UnterminatedStringError, line=3, col=23)
def test_graphql_syntax_string10(self):
r"""
{ field(
arg:"foo \
) }
"""
def test_graphql_syntax_string11(self):
r"""
{ field(arg: "\\/ \\\/") }
% OK %
{ field(arg: "\\/ \\/") }
"""
def test_graphql_syntax_string12(self):
r"""
{ field(arg: "\\\\x") }
"""
@tb.must_fail(InvalidStringTokenError, line=2, col=25)
def test_graphql_syntax_string13(self):
r"""
{ field(arg: "\\\x") }
"""
def test_graphql_syntax_string14(self):
r"""
{ field(arg: "\\'") }
"""
def test_graphql_syntax_string15(self):
r"""
{ field(arg: "\\\n \\\\n") }
"""
def test_graphql_syntax_short01(self):
"""{id}"""
def test_graphql_syntax_short02(self):
"""
{id, name, description}
"""
@tb.must_fail(GraphQLParserError, 'short form is not allowed here',
line=2, col=9)
def test_graphql_syntax_short03(self):
"""
{id}
{name}
"""
@tb.must_fail(GraphQLParserError, 'short form is not allowed here',
line=3, col=9)
def test_graphql_syntax_short04(self):
"""
query {id}
{name}
"""
@tb.must_fail(GraphQLParserError, 'Unexpected token', line=2, col=18)
def test_graphql_syntax_short05(self):
"""
{ field: {} }
"""
def test_graphql_syntax_field01(self):
"""
{
id
}
"""
def test_graphql_syntax_field02(self):
"""
{
foo: id
}
"""
def test_graphql_syntax_field03(self):
"""
{
name(q: "bar")
}
"""
def test_graphql_syntax_field04(self):
"""
{
foo: id(q: 42)
}
"""
def test_graphql_syntax_field05(self):
"""
{
foo: name(q: 42, w: "bar")
}
"""
def test_graphql_syntax_field06(self):
"""
{
foo: name (q: 42, w: "bar") @skip(if: true)
}
"""
def test_graphql_syntax_field07(self):
"""
{
foo: name (q: 42, w: "bar") @skip(if: false), @include(if: true)
}
"""
def test_graphql_syntax_inline_fragment01(self):
"""
{
...{
foo
}
}
"""
def test_graphql_syntax_inline_fragment02(self):
"""
{
... @skip(if: true) {
foo
}
}
"""
def test_graphql_syntax_inline_fragment03(self):
"""
{
... @skip(if: true), @include(if: true) {
foo
}
}
"""
def test_graphql_syntax_inline_fragment04(self):
"""
{
... on User {
foo
}
}
"""
def test_graphql_syntax_inline_fragment05(self):
"""
{
... on User @skip(if: true), @include(if: true) {
foo
}
}
"""
def test_graphql_syntax_fragment01(self):
"""
fragment friendFields on User {
id
name
profilePic(size: 50)
}
{ ... friendFields }
"""
def test_graphql_syntax_fragment02(self):
"""
fragment friendFields on User @skip(if: false), @include(if: true) {
id
name
profilePic(size: 50)
}
{ ... friendFields }
"""
def test_graphql_syntax_fragment03(self):
"""
fragment someFields on User { id }
{
...someFields @skip(if: true)
}
"""
def test_graphql_syntax_fragment04(self):
"""
fragment someFields on User { id }
{
...someFields @skip(if: true), @include(if: false)
}
"""
@tb.must_fail(GraphQLParserError, 'Unexpected token', line=3, col=28)
def test_graphql_syntax_fragment05(self):
"""
{ ...MissingOn }
fragment MissingOn Type {name}
"""
@tb.must_fail(GraphQLParserError, 'undefined fragment', line=2, col=10)
def test_graphql_syntax_fragment06(self):
"""
{...Missing}
"""
@tb.must_fail(GraphQLParserError, 'unused fragment', line=2, col=9)
def test_graphql_syntax_fragment07(self):
"""
fragment Missing on Type {name}
"""
@tb.must_fail(GraphQLParserError, 'cycle in fragment definitions',
line=2, col=9)
def test_graphql_syntax_fragment08(self):
"""
fragment cyclceFrag on Type {
...cyclceFrag
}
{... cyclceFrag}
"""
@tb.must_fail(GraphQLParserError, 'cycle in fragment definitions',
line=2, col=9)
def test_graphql_syntax_fragment09(self):
"""
fragment cyclceFrag on Type {
...otherFrag
}
fragment otherFrag on Type {
...cyclceFrag
}
{... cyclceFrag}
"""
@tb.must_fail(GraphQLParserError, 'cycle in fragment definitions',
line=2, col=9)
def test_graphql_syntax_fragment10(self):
"""
fragment A on Type {...B}
fragment B on Type {...C}
fragment C on Type {...D}
fragment D on Type {...A}
{... C}
"""
def test_graphql_syntax_query01(self):
"""
query getZuckProfile {
id
name
}
"""
def test_graphql_syntax_query02(self):
"""
query getZuckProfile($devicePicSize: Int) {
id
name
}
"""
def test_graphql_syntax_query03(self):
"""
query getZuckProfile($devicePicSize: Int) @skip(if: true) {
id
name
}
"""
def test_graphql_syntax_query04(self):
"""
query noFragments {
user(id: 4) {
friends(first: 10) {
id
name
profilePic(size: 50)
}
mutualFriends(first: 10) {
id
name
profilePic(size: 50)
}
}
}
"""
@tb.must_fail(GraphQLParserError, 'Unexpected token', line=2, col=23)
def test_graphql_syntax_query05(self):
r"""
query myquery on type { field }
"""
@tb.must_fail(GraphQLParserError, 'Unexpected', line=2, col=32)
def test_graphql_syntax_query06(self):
r"""
query myquery { field };
"""
@tb.must_fail(GraphQLParserError, 'Unexpected', line=2, col=25)
def test_graphql_syntax_query07(self):
r"""
query myQuery { \a }
"""
@tb.must_fail(GraphQLParserError, 'Unexpected token', line=2, col=9)
def test_graphql_syntax_query08(self):
"""
notanoperation Foo { field }
"""
@tb.must_fail(GraphQLUniquenessError,
r'operation with name \S+ already exists',
line=3, col=9)
def test_graphql_syntax_query09(self):
"""
query myQuery { id }
query myQuery { id }
"""
@tb.must_fail(GraphQLParserError, 'unnamed operation is not allowed here',
line=2, col=9)
def test_graphql_syntax_query10(self):
"""
query { id }
query myQuery { id }
"""
def test_graphql_syntax_mutation01(self):
"""
mutation {
likeStory(storyID: 12345) {
story {
likeCount
}
}
}
"""
def test_graphql_syntax_mutation02(self):
"""
mutation ($storyId: Int) {
likeStory(storyID: $storyId) {
story {
likeCount
}
}
}
"""
def test_graphql_syntax_mutation03(self):
"""
mutation ($storyId: Int, $likes: Int) @include(if: $likes) {
likeStory(storyID: $storyId, likeCount: $likes) {
story {
likeCount
}
}
}
"""
@tb.must_fail(GraphQLUniquenessError, 'operation', line=3, col=9)
def test_graphql_syntax_mutation04(self):
"""
mutation myQuery { id }
query myQuery { id }
"""
def test_graphql_syntax_subscription01(self):
"""
subscription {
id
name
}
"""
@tb.must_fail(GraphQLUniquenessError, 'operation', line=3, col=9)
def test_graphql_syntax_subscription02(self):
"""
mutation myQuery { id }
subscription myQuery { id }
"""
def test_graphql_syntax_values01(self):
"""
{
user(id: 4) {
friends(first: 10) {
id
name
profilePic(size: 50)
}
}
}
"""
def test_graphql_syntax_values02(self):
"""
{
foo(id: 4) {
id
bar(x: 23.1, y: -42.1, z: -999)
}
}
"""
def test_graphql_syntax_values03(self):
"""
{
foo(id: 4) {
id
bar(x: 2.31e-08, y: -4.21e+33, z: -9e+12)
}
}
"""
def test_graphql_syntax_values04(self):
# graphql escapes: \", \\, \/, \b, \f, \n, \r, \t
r"""
{
foo(id: 4) {
id
bar(name: "\"something\"",
more: "",
description: "\\\/\b\f\n\r\t 'blah' спам")
}
}
% OK %
{
foo(id: 4) {
id
bar(name: "\"something\"",
more: "",
description: "\\/\b\f\n\r\t 'blah' спам")
}
}
"""
def test_graphql_syntax_values05(self):
r"""
{
foo(id: 4) {
id
bar(param: MOBILE_WEB)
}
}
"""
def test_graphql_syntax_values06(self):
r"""
{
foo(id: 4) {
id
bar(array: [])
}
}
"""
def test_graphql_syntax_values07(self):
r"""
{
foo(id: 4) {
id
bar(array: [1, "two", 3])
}
}
"""
def test_graphql_syntax_values08(self):
r"""
{
foo(id: 4) {
id
bar(array: {})
}
}
"""
def test_graphql_syntax_values09(self):
r"""
{
foo(id: 4) {
id
bar(map: {
home: "416 123 4567"
work: "416 123 4567"
})
}
}
"""
def test_graphql_syntax_values10(self):
r"""
{
foo(id: 4) {
id
bar(map: {
messy: [1, "two", [], [3, {}, 4]]
home: "416 123 4567"
work: "416 123 4567"
nested: {
deeper: [{
stuff: 42
}, {
spam: "ham"
}]
}
})
}
}
"""
def test_graphql_syntax_values11(self):
"""
query getZuckProfile($devicePicSize: Int = 42) {
user(id: 4) {
id
name
profilePic(size: $devicePicSize)
}
}
"""
def test_graphql_syntax_values12(self):
r"""
query myQuery($special: Int = 42) {
foo(id: 4) {
id
bar(map: {
messy: [1, "two", [], [3, {}, 4]]
home: "416 123 4567"
work: "416 123 4567"
nested: {
deeper: [{
stuff: $special
}, {
spam: "ham"
}]
}
})
}
}
"""
def test_graphql_syntax_values13(self):
r"""
{
foo(id: null) {
id
bar(param: NULL)
}
}
"""
def test_graphql_syntax_values14(self):
r"""
{
foo(id: NULL) {
id
bar(param: null)
}
}
"""
def test_graphql_syntax_values15(self):
r"""
query myQuery($var: Int) {
field(complex: { a: { b: [ $var ] } })
}
"""
def test_graphql_syntax_values16(self):
r"""
query Foo($x: Complex = { a: { b: [ "var" ] } }) {
field
}
"""
@tb.must_fail(GraphQLParserError, r"undefined variable '\$var'",
line=2, col=45)
def test_graphql_syntax_values17(self):
r"""
query Foo($x: Complex = { a: { b: [ $var ] } }) {
field
}
"""
def test_graphql_syntax_values18(self):
r"""
{
fieldWithNullableStringInput(input: null)
}
"""
@tb.must_fail(GraphQLParserError, 'Unexpected', line=3, col=49)
def test_graphql_syntax_values19(self):
r"""
{
fieldWithNullableStringInput(input: .123)
}
"""
@tb.must_fail(GraphQLParserError, 'Unexpected', line=3, col=49)
def test_graphql_syntax_values20(self):
r"""
{
fieldWithNullableStringInput(input: 0123)
}
"""
@tb.must_fail(GraphQLParserError, 'Unexpected', line=3, col=49)
def test_graphql_syntax_values21(self):
r"""
{
fieldWithNullableStringInput(input: +123)
}
"""
def test_graphql_syntax_values22(self):
r"""
{
foo(bar: ["spam", "ham"]) {
id
name
}
}
"""
def test_graphql_syntax_var01(self):
r"""
query ($name: String!) {
User(name: $name) {
id
name
}
}
"""
def test_graphql_syntax_var02(self):
r"""
query A($atOtherHomes: Boolean) {
...HouseTrainedFragment
}
query B($atOtherHomes: Boolean) {
...HouseTrainedFragment
}
fragment HouseTrainedFragment on Base {
dog {
isHousetrained(atOtherHomes: $atOtherHomes)
}
}
"""
@tb.must_fail(GraphQLParserError, r"undefined variable '\$var'",
line=3, col=49)
def test_graphql_syntax_scope01(self):
r"""
{
fieldWithNullableStringInput(input: $var)
}
"""
def test_graphql_syntax_scope02(self):
r"""
fragment goodVar on User {name(first: $var)}
query ($var: String) {
fieldWithNullableStringInput(input: $var)
... goodVar
}
"""
@tb.must_fail(GraphQLParserError, r"undefined variable '\$bad'",
line=3, col=46)
def test_graphql_syntax_scope03(self):
r"""
fragment goodVar on User {name(first: $var)}
fragment badVar on User {name(first: $bad)}
query ($var: String) {
fieldWithNullableStringInput(input: $var)
... goodVar
... badVar
}
"""
@tb.must_fail(GraphQLParserError, r"undefined variable '\$bad'",
line=10, col=53)
def test_graphql_syntax_scope04(self):
r"""
fragment goodVar on User {
name(first: $var)
... midVar
}
fragment midVar on User {
id
... badVar
}
fragment badVar on User {description(first: $bad)}
query ($var: String) {
fieldWithNullableStringInput(input: $var)
... goodVar
}
"""
def test_graphql_syntax_scope05(self):
r"""
fragment goodVar on User {
name(first: $var)
... midVar
}
fragment midVar on User {
id
... badVar
}
fragment badVar on User {description(first: $bad)}
query ($var: String, $bad: String) {
fieldWithNullableStringInput(input: $var)
... goodVar
}
"""
@tb.must_fail(GraphQLParserError, r"undefined variable '\$bad'",
line=10, col=53)
def test_graphql_syntax_scope06(self):
r"""
fragment goodVar on User {
name(first: $var)
... midVar
}
fragment midVar on User {
id
... badVar
}
fragment badVar on User {description(first: $bad)}
query goodQuery ($var: String, $bad: String) {
fieldWithNullableStringInput(input: $var)
... goodVar
}
query badQuery {
... midVar
}
"""
def test_graphql_syntax_names01(self):
r"""
{
on
fragment
query
mutation
subscription
true
false
null
}
"""
def test_graphql_syntax_names02(self):
r"""
{
on: on_ok
fragment: fragment_ok
query: query_ok
mutation: mutation_ok
subscription: subscription_ok
true: true_ok
false: false_ok
null: null_ok
}
"""
def test_graphql_syntax_names03(self):
r"""
{
on_ok: on
fragment_ok: fragment
query_ok: query
mutation_ok: mutation
subscription_ok: subscription
true_ok: true
false_ok: false
null_ok: null
}
"""
def test_graphql_syntax_names04(self):
r"""
{
foo(someObj: {
on: 42
fragment: 42
query: 42
mutation: 42
subscription: 42
true: 42
false: 42
null: 42
}) {
id
}
}
"""
def test_graphql_syntax_names05(self):
r"""
{
foo(
on: 42
fragment: 42
query: 42
mutation: 42
subscription: 42
true: 42
false: 42
null: 42
) {
id
}
}
"""
def test_graphql_syntax_names06(self):
r"""
fragment name_on on on {id}
fragment name_fragment on fragment {id}
fragment name_query on query {id}
fragment name_mutation on mutation {id}
fragment name_subscription on subscription {id}
fragment name_true on true {id}
fragment name_false on false {id}
fragment name_null on null {id}
{
... name_on
... name_fragment
... name_query
... name_mutation
... name_subscription
... name_true
... name_false
... name_null
}
"""
def test_graphql_syntax_names07(self):
r"""
fragment fragment on fragmentFoo {id}
fragment query on queryFoo {id}
fragment mutation on mutationFoo {id}
fragment subscription on subscriptionFoo {id}
fragment true on trueFoo {id}
fragment false on falseFoo {id}
fragment null on nullFoo {id}
{
... fragment
... query
... mutation
... subscription
... true
... false
... null
}
"""
def test_graphql_syntax_names08(self):
r"""
query A { ... on on {id} }
query B { ... on fragment {id} }
query C { ... on query {id} }
query D { ... on mutation {id} }
query E { ... on subscription {id} }
query F { ... on true {id} }
query G { ... on false {id} }
query H { ... on null {id} }
"""
def test_graphql_syntax_names09(self):
r"""
# fragment not_on on Foo {name}
# fragment fragment on Foo {name}
# fragment query on Foo {name}
# fragment mutation on Foo {name}
# fragment subscription on Foo {name}
# fragment true on Foo {name}
fragment false on Foo {name}
fragment null on Foo {name}
# query A { ... not_on on on {id} }
# query B { ... fragment on fragmentFoo {id} }
# query C { ... query on queryFoo {id} }
# query D { ... mutation on mutationFoo {id} }
# query E { ... subscription on subscriptionFoo {id} }
# query F { ... true on trueFoo {id} }
query G { ... false on falseFoo {id} }
query H { ... null on nullFoo {id} }
"""
def test_graphql_syntax_names10(self):
r"""
query (
$on: on = on
$fragment: fragment = fragment
$query: query = query
$mutation: mutation = mutation
$subscription: subscription = subscription
$true: true = true
$false: false = false
$null: null = NULL
) {
id
}
"""
def test_graphql_syntax_names11(self):
r"""
fragment someFragment on Foo {id}
query A { ...someFragment @on }
query B { ...someFragment @fragment }
query C { ...someFragment @query }
query D { ...someFragment @mutation }
query E { ...someFragment @subscription }
query F { ...someFragment @true }
query G { ...someFragment @false }
query H { ...someFragment @null }
"""
@tb.must_fail(GraphQLParserError, 'Unexpected token', line=2, col=21)
def test_graphql_syntax_names12(self):
r"""
{ ... on on on {id} }
"""
@tb.must_fail(GraphQLParserError, 'Unexpected token', line=2, col=18)
def test_graphql_syntax_names13(self):
r"""
fragment on on on {id}
"""
@tb.must_fail(GraphQLParserError, 'Unexpected token', line=2, col=18)
def test_graphql_syntax_names14(self):
r"""
{ ... on }
"""
@tb.must_fail(GraphQLUniquenessError, 'variabledefinition', line=2, col=32)
def test_graphql_syntax_names15(self):
r"""
query myQuery($x: Int, $x: Int) { id }
"""
@tb.must_fail(GraphQLUniquenessError, 'variabledefinition', line=2, col=32)
def test_graphql_syntax_names16(self):
r"""
query myQuery($x: Int, $x: Float) { id }
"""
@tb.must_fail(GraphQLUniquenessError, 'argument', line=3, col=23)
def test_graphql_syntax_names17(self):
r"""
{
foo(x: 1, x: 2)
}
"""
@tb.must_fail(GraphQLUniquenessError, 'argument', line=3, col=23)
def test_graphql_syntax_names18(self):
r"""
{
foo(x: 1, x: "one")
}
"""
def test_graphql_syntax_comments01(self):
"""
# some comment
query noFragments {
user(id: 4) {
friends(first: 10) { # end of line comment
# user id
id
# full name
name
# avatar
profilePic(size: 50)
}
mutualFriends(
# commenting on arguments
first: 10
) {
id
name
profilePic(size: 50)
}
}
}
""" | tests/test_graphql_syntax.py |
import re
from edgedb.lang import _testbase as tb
from edgedb.lang.graphql import generate_source as gql_to_source
from edgedb.lang.graphql.parser import parser as gql_parser
from edgedb.lang.graphql.parser.errors import (GraphQLParserError,
GraphQLUniquenessError,
UnterminatedStringError,
InvalidStringTokenError)
class GraphQLSyntaxTest(tb.BaseSyntaxTest):
re_filter = re.compile(r'''[\s,]+|(\#.*?\n)''')
parser_debug_flag = 'DEBUG_GRAPHQL'
markup_dump_lexer = 'graphql'
ast_to_source = gql_to_source
def get_parser(self, *, spec):
return gql_parser.GraphQLParser()
class TestGraphQLParser(GraphQLSyntaxTest):
def test_graphql_syntax_empty01(self):
""""""
@tb.must_fail(GraphQLParserError, 'Unexpected', line=1, col=1)
def test_graphql_syntax_empty02(self):
"""\v"""
@tb.must_fail(GraphQLParserError, 'Unexpected', line=1, col=1)
def test_graphql_syntax_empty03(self):
"""\f"""
@tb.must_fail(GraphQLParserError, 'Unexpected', line=1, col=1)
def test_graphql_syntax_empty04(self):
"""\xa0"""
@tb.must_fail(GraphQLParserError, 'Unexpected', line=2, col=1)
def test_graphql_syntax_empty05(self):
"""\r\n;"""
@tb.must_fail(UnterminatedStringError, line=1, col=2)
def test_graphql_syntax_empty06(self):
'''"'''
@tb.must_fail(UnterminatedStringError, line=2, col=10)
def test_graphql_syntax_empty07(self):
"""
"
"
"""
@tb.must_fail(GraphQLParserError, 'Unexpected token', line=1, col=1)
def test_graphql_syntax_empty08(self):
"""..."""
@tb.must_fail(InvalidStringTokenError, line=2, col=22)
def test_graphql_syntax_string01(self):
"""
{ field(arg:"\b") }
"""
@tb.must_fail(InvalidStringTokenError, line=2, col=22)
def test_graphql_syntax_string02(self):
R"""
{ field(arg:"\x") }
"""
@tb.must_fail(InvalidStringTokenError, line=2, col=22)
def test_graphql_syntax_string03(self):
R"""
{ field(arg:"\u1") }
"""
@tb.must_fail(InvalidStringTokenError, line=2, col=22)
def test_graphql_syntax_string04(self):
R"""
{ field(arg:"\u0XX1") }
"""
@tb.must_fail(InvalidStringTokenError, line=2, col=22)
def test_graphql_syntax_string05(self):
R"""
{ field(arg:"\uXXXX") }
"""
@tb.must_fail(InvalidStringTokenError, line=2, col=25)
def test_graphql_syntax_string06(self):
R"""
{ field(arg:"foo\uFXXX") }
"""
@tb.must_fail(InvalidStringTokenError, line=2, col=22)
def test_graphql_syntax_string07(self):
R"""
{ field(arg:"\uXXXF") }
"""
@tb.must_fail(GraphQLParserError, 'Unexpected', line=2, col=34)
def test_graphql_syntax_string08(self):
R"""
{ field(arg:"\uFEFF\n") };
"""
@tb.must_fail(UnterminatedStringError, line=2, col=29)
def test_graphql_syntax_string09(self):
"""
{ field(arg:"foo') }
"""
@tb.must_fail(UnterminatedStringError, line=3, col=23)
def test_graphql_syntax_string10(self):
r"""
{ field(
arg:"foo \
) }
"""
def test_graphql_syntax_string11(self):
r"""
{ field(arg: "\\/ \\\/") }
% OK %
{ field(arg: "\\/ \\/") }
"""
def test_graphql_syntax_string12(self):
r"""
{ field(arg: "\\\\x") }
"""
@tb.must_fail(InvalidStringTokenError, line=2, col=25)
def test_graphql_syntax_string13(self):
r"""
{ field(arg: "\\\x") }
"""
def test_graphql_syntax_string14(self):
r"""
{ field(arg: "\\'") }
"""
def test_graphql_syntax_string15(self):
r"""
{ field(arg: "\\\n \\\\n") }
"""
def test_graphql_syntax_short01(self):
"""{id}"""
def test_graphql_syntax_short02(self):
"""
{id, name, description}
"""
@tb.must_fail(GraphQLParserError, 'short form is not allowed here',
line=2, col=9)
def test_graphql_syntax_short03(self):
"""
{id}
{name}
"""
@tb.must_fail(GraphQLParserError, 'short form is not allowed here',
line=3, col=9)
def test_graphql_syntax_short04(self):
"""
query {id}
{name}
"""
@tb.must_fail(GraphQLParserError, 'Unexpected token', line=2, col=18)
def test_graphql_syntax_short05(self):
"""
{ field: {} }
"""
def test_graphql_syntax_field01(self):
"""
{
id
}
"""
def test_graphql_syntax_field02(self):
"""
{
foo: id
}
"""
def test_graphql_syntax_field03(self):
"""
{
name(q: "bar")
}
"""
def test_graphql_syntax_field04(self):
"""
{
foo: id(q: 42)
}
"""
def test_graphql_syntax_field05(self):
"""
{
foo: name(q: 42, w: "bar")
}
"""
def test_graphql_syntax_field06(self):
"""
{
foo: name (q: 42, w: "bar") @skip(if: true)
}
"""
def test_graphql_syntax_field07(self):
"""
{
foo: name (q: 42, w: "bar") @skip(if: false), @include(if: true)
}
"""
def test_graphql_syntax_inline_fragment01(self):
"""
{
...{
foo
}
}
"""
def test_graphql_syntax_inline_fragment02(self):
"""
{
... @skip(if: true) {
foo
}
}
"""
def test_graphql_syntax_inline_fragment03(self):
"""
{
... @skip(if: true), @include(if: true) {
foo
}
}
"""
def test_graphql_syntax_inline_fragment04(self):
"""
{
... on User {
foo
}
}
"""
def test_graphql_syntax_inline_fragment05(self):
"""
{
... on User @skip(if: true), @include(if: true) {
foo
}
}
"""
def test_graphql_syntax_fragment01(self):
"""
fragment friendFields on User {
id
name
profilePic(size: 50)
}
{ ... friendFields }
"""
def test_graphql_syntax_fragment02(self):
"""
fragment friendFields on User @skip(if: false), @include(if: true) {
id
name
profilePic(size: 50)
}
{ ... friendFields }
"""
def test_graphql_syntax_fragment03(self):
"""
fragment someFields on User { id }
{
...someFields @skip(if: true)
}
"""
def test_graphql_syntax_fragment04(self):
"""
fragment someFields on User { id }
{
...someFields @skip(if: true), @include(if: false)
}
"""
@tb.must_fail(GraphQLParserError, 'Unexpected token', line=3, col=28)
def test_graphql_syntax_fragment05(self):
"""
{ ...MissingOn }
fragment MissingOn Type {name}
"""
@tb.must_fail(GraphQLParserError, 'undefined fragment', line=2, col=10)
def test_graphql_syntax_fragment06(self):
"""
{...Missing}
"""
@tb.must_fail(GraphQLParserError, 'unused fragment', line=2, col=9)
def test_graphql_syntax_fragment07(self):
"""
fragment Missing on Type {name}
"""
@tb.must_fail(GraphQLParserError, 'cycle in fragment definitions',
line=2, col=9)
def test_graphql_syntax_fragment08(self):
"""
fragment cyclceFrag on Type {
...cyclceFrag
}
{... cyclceFrag}
"""
@tb.must_fail(GraphQLParserError, 'cycle in fragment definitions',
line=2, col=9)
def test_graphql_syntax_fragment09(self):
"""
fragment cyclceFrag on Type {
...otherFrag
}
fragment otherFrag on Type {
...cyclceFrag
}
{... cyclceFrag}
"""
@tb.must_fail(GraphQLParserError, 'cycle in fragment definitions',
line=2, col=9)
def test_graphql_syntax_fragment10(self):
"""
fragment A on Type {...B}
fragment B on Type {...C}
fragment C on Type {...D}
fragment D on Type {...A}
{... C}
"""
def test_graphql_syntax_query01(self):
"""
query getZuckProfile {
id
name
}
"""
def test_graphql_syntax_query02(self):
"""
query getZuckProfile($devicePicSize: Int) {
id
name
}
"""
def test_graphql_syntax_query03(self):
"""
query getZuckProfile($devicePicSize: Int) @skip(if: true) {
id
name
}
"""
def test_graphql_syntax_query04(self):
"""
query noFragments {
user(id: 4) {
friends(first: 10) {
id
name
profilePic(size: 50)
}
mutualFriends(first: 10) {
id
name
profilePic(size: 50)
}
}
}
"""
@tb.must_fail(GraphQLParserError, 'Unexpected token', line=2, col=23)
def test_graphql_syntax_query05(self):
r"""
query myquery on type { field }
"""
@tb.must_fail(GraphQLParserError, 'Unexpected', line=2, col=32)
def test_graphql_syntax_query06(self):
r"""
query myquery { field };
"""
@tb.must_fail(GraphQLParserError, 'Unexpected', line=2, col=25)
def test_graphql_syntax_query07(self):
r"""
query myQuery { \a }
"""
@tb.must_fail(GraphQLParserError, 'Unexpected token', line=2, col=9)
def test_graphql_syntax_query08(self):
"""
notanoperation Foo { field }
"""
@tb.must_fail(GraphQLUniquenessError,
r'operation with name \S+ already exists',
line=3, col=9)
def test_graphql_syntax_query09(self):
"""
query myQuery { id }
query myQuery { id }
"""
@tb.must_fail(GraphQLParserError, 'unnamed operation is not allowed here',
line=2, col=9)
def test_graphql_syntax_query10(self):
"""
query { id }
query myQuery { id }
"""
def test_graphql_syntax_mutation01(self):
"""
mutation {
likeStory(storyID: 12345) {
story {
likeCount
}
}
}
"""
def test_graphql_syntax_mutation02(self):
"""
mutation ($storyId: Int) {
likeStory(storyID: $storyId) {
story {
likeCount
}
}
}
"""
def test_graphql_syntax_mutation03(self):
"""
mutation ($storyId: Int, $likes: Int) @include(if: $likes) {
likeStory(storyID: $storyId, likeCount: $likes) {
story {
likeCount
}
}
}
"""
@tb.must_fail(GraphQLUniquenessError, 'operation', line=3, col=9)
def test_graphql_syntax_mutation04(self):
"""
mutation myQuery { id }
query myQuery { id }
"""
def test_graphql_syntax_subscription01(self):
"""
subscription {
id
name
}
"""
@tb.must_fail(GraphQLUniquenessError, 'operation', line=3, col=9)
def test_graphql_syntax_subscription02(self):
"""
mutation myQuery { id }
subscription myQuery { id }
"""
def test_graphql_syntax_values01(self):
"""
{
user(id: 4) {
friends(first: 10) {
id
name
profilePic(size: 50)
}
}
}
"""
def test_graphql_syntax_values02(self):
"""
{
foo(id: 4) {
id
bar(x: 23.1, y: -42.1, z: -999)
}
}
"""
def test_graphql_syntax_values03(self):
"""
{
foo(id: 4) {
id
bar(x: 2.31e-08, y: -4.21e+33, z: -9e+12)
}
}
"""
def test_graphql_syntax_values04(self):
# graphql escapes: \", \\, \/, \b, \f, \n, \r, \t
r"""
{
foo(id: 4) {
id
bar(name: "\"something\"",
more: "",
description: "\\\/\b\f\n\r\t 'blah' спам")
}
}
% OK %
{
foo(id: 4) {
id
bar(name: "\"something\"",
more: "",
description: "\\/\b\f\n\r\t 'blah' спам")
}
}
"""
def test_graphql_syntax_values05(self):
r"""
{
foo(id: 4) {
id
bar(param: MOBILE_WEB)
}
}
"""
def test_graphql_syntax_values06(self):
r"""
{
foo(id: 4) {
id
bar(array: [])
}
}
"""
def test_graphql_syntax_values07(self):
r"""
{
foo(id: 4) {
id
bar(array: [1, "two", 3])
}
}
"""
def test_graphql_syntax_values08(self):
r"""
{
foo(id: 4) {
id
bar(array: {})
}
}
"""
def test_graphql_syntax_values09(self):
r"""
{
foo(id: 4) {
id
bar(map: {
home: "416 123 4567"
work: "416 123 4567"
})
}
}
"""
def test_graphql_syntax_values10(self):
r"""
{
foo(id: 4) {
id
bar(map: {
messy: [1, "two", [], [3, {}, 4]]
home: "416 123 4567"
work: "416 123 4567"
nested: {
deeper: [{
stuff: 42
}, {
spam: "ham"
}]
}
})
}
}
"""
def test_graphql_syntax_values11(self):
"""
query getZuckProfile($devicePicSize: Int = 42) {
user(id: 4) {
id
name
profilePic(size: $devicePicSize)
}
}
"""
def test_graphql_syntax_values12(self):
r"""
query myQuery($special: Int = 42) {
foo(id: 4) {
id
bar(map: {
messy: [1, "two", [], [3, {}, 4]]
home: "416 123 4567"
work: "416 123 4567"
nested: {
deeper: [{
stuff: $special
}, {
spam: "ham"
}]
}
})
}
}
"""
def test_graphql_syntax_values13(self):
r"""
{
foo(id: null) {
id
bar(param: NULL)
}
}
"""
def test_graphql_syntax_values14(self):
r"""
{
foo(id: NULL) {
id
bar(param: null)
}
}
"""
def test_graphql_syntax_values15(self):
r"""
query myQuery($var: Int) {
field(complex: { a: { b: [ $var ] } })
}
"""
def test_graphql_syntax_values16(self):
r"""
query Foo($x: Complex = { a: { b: [ "var" ] } }) {
field
}
"""
@tb.must_fail(GraphQLParserError, r"undefined variable '\$var'",
line=2, col=45)
def test_graphql_syntax_values17(self):
r"""
query Foo($x: Complex = { a: { b: [ $var ] } }) {
field
}
"""
def test_graphql_syntax_values18(self):
r"""
{
fieldWithNullableStringInput(input: null)
}
"""
@tb.must_fail(GraphQLParserError, 'Unexpected', line=3, col=49)
def test_graphql_syntax_values19(self):
r"""
{
fieldWithNullableStringInput(input: .123)
}
"""
@tb.must_fail(GraphQLParserError, 'Unexpected', line=3, col=49)
def test_graphql_syntax_values20(self):
r"""
{
fieldWithNullableStringInput(input: 0123)
}
"""
@tb.must_fail(GraphQLParserError, 'Unexpected', line=3, col=49)
def test_graphql_syntax_values21(self):
r"""
{
fieldWithNullableStringInput(input: +123)
}
"""
def test_graphql_syntax_values22(self):
r"""
{
foo(bar: ["spam", "ham"]) {
id
name
}
}
"""
def test_graphql_syntax_var01(self):
r"""
query ($name: String!) {
User(name: $name) {
id
name
}
}
"""
def test_graphql_syntax_var02(self):
r"""
query A($atOtherHomes: Boolean) {
...HouseTrainedFragment
}
query B($atOtherHomes: Boolean) {
...HouseTrainedFragment
}
fragment HouseTrainedFragment on Base {
dog {
isHousetrained(atOtherHomes: $atOtherHomes)
}
}
"""
@tb.must_fail(GraphQLParserError, r"undefined variable '\$var'",
line=3, col=49)
def test_graphql_syntax_scope01(self):
r"""
{
fieldWithNullableStringInput(input: $var)
}
"""
def test_graphql_syntax_scope02(self):
r"""
fragment goodVar on User {name(first: $var)}
query ($var: String) {
fieldWithNullableStringInput(input: $var)
... goodVar
}
"""
@tb.must_fail(GraphQLParserError, r"undefined variable '\$bad'",
line=3, col=46)
def test_graphql_syntax_scope03(self):
r"""
fragment goodVar on User {name(first: $var)}
fragment badVar on User {name(first: $bad)}
query ($var: String) {
fieldWithNullableStringInput(input: $var)
... goodVar
... badVar
}
"""
@tb.must_fail(GraphQLParserError, r"undefined variable '\$bad'",
line=10, col=53)
def test_graphql_syntax_scope04(self):
r"""
fragment goodVar on User {
name(first: $var)
... midVar
}
fragment midVar on User {
id
... badVar
}
fragment badVar on User {description(first: $bad)}
query ($var: String) {
fieldWithNullableStringInput(input: $var)
... goodVar
}
"""
def test_graphql_syntax_scope05(self):
r"""
fragment goodVar on User {
name(first: $var)
... midVar
}
fragment midVar on User {
id
... badVar
}
fragment badVar on User {description(first: $bad)}
query ($var: String, $bad: String) {
fieldWithNullableStringInput(input: $var)
... goodVar
}
"""
@tb.must_fail(GraphQLParserError, r"undefined variable '\$bad'",
line=10, col=53)
def test_graphql_syntax_scope06(self):
r"""
fragment goodVar on User {
name(first: $var)
... midVar
}
fragment midVar on User {
id
... badVar
}
fragment badVar on User {description(first: $bad)}
query goodQuery ($var: String, $bad: String) {
fieldWithNullableStringInput(input: $var)
... goodVar
}
query badQuery {
... midVar
}
"""
def test_graphql_syntax_names01(self):
r"""
{
on
fragment
query
mutation
subscription
true
false
null
}
"""
def test_graphql_syntax_names02(self):
r"""
{
on: on_ok
fragment: fragment_ok
query: query_ok
mutation: mutation_ok
subscription: subscription_ok
true: true_ok
false: false_ok
null: null_ok
}
"""
def test_graphql_syntax_names03(self):
r"""
{
on_ok: on
fragment_ok: fragment
query_ok: query
mutation_ok: mutation
subscription_ok: subscription
true_ok: true
false_ok: false
null_ok: null
}
"""
def test_graphql_syntax_names04(self):
r"""
{
foo(someObj: {
on: 42
fragment: 42
query: 42
mutation: 42
subscription: 42
true: 42
false: 42
null: 42
}) {
id
}
}
"""
def test_graphql_syntax_names05(self):
r"""
{
foo(
on: 42
fragment: 42
query: 42
mutation: 42
subscription: 42
true: 42
false: 42
null: 42
) {
id
}
}
"""
def test_graphql_syntax_names06(self):
r"""
fragment name_on on on {id}
fragment name_fragment on fragment {id}
fragment name_query on query {id}
fragment name_mutation on mutation {id}
fragment name_subscription on subscription {id}
fragment name_true on true {id}
fragment name_false on false {id}
fragment name_null on null {id}
{
... name_on
... name_fragment
... name_query
... name_mutation
... name_subscription
... name_true
... name_false
... name_null
}
"""
def test_graphql_syntax_names07(self):
r"""
fragment fragment on fragmentFoo {id}
fragment query on queryFoo {id}
fragment mutation on mutationFoo {id}
fragment subscription on subscriptionFoo {id}
fragment true on trueFoo {id}
fragment false on falseFoo {id}
fragment null on nullFoo {id}
{
... fragment
... query
... mutation
... subscription
... true
... false
... null
}
"""
def test_graphql_syntax_names08(self):
r"""
query A { ... on on {id} }
query B { ... on fragment {id} }
query C { ... on query {id} }
query D { ... on mutation {id} }
query E { ... on subscription {id} }
query F { ... on true {id} }
query G { ... on false {id} }
query H { ... on null {id} }
"""
def test_graphql_syntax_names09(self):
r"""
# fragment not_on on Foo {name}
# fragment fragment on Foo {name}
# fragment query on Foo {name}
# fragment mutation on Foo {name}
# fragment subscription on Foo {name}
# fragment true on Foo {name}
fragment false on Foo {name}
fragment null on Foo {name}
# query A { ... not_on on on {id} }
# query B { ... fragment on fragmentFoo {id} }
# query C { ... query on queryFoo {id} }
# query D { ... mutation on mutationFoo {id} }
# query E { ... subscription on subscriptionFoo {id} }
# query F { ... true on trueFoo {id} }
query G { ... false on falseFoo {id} }
query H { ... null on nullFoo {id} }
"""
def test_graphql_syntax_names10(self):
r"""
query (
$on: on = on
$fragment: fragment = fragment
$query: query = query
$mutation: mutation = mutation
$subscription: subscription = subscription
$true: true = true
$false: false = false
$null: null = NULL
) {
id
}
"""
def test_graphql_syntax_names11(self):
r"""
fragment someFragment on Foo {id}
query A { ...someFragment @on }
query B { ...someFragment @fragment }
query C { ...someFragment @query }
query D { ...someFragment @mutation }
query E { ...someFragment @subscription }
query F { ...someFragment @true }
query G { ...someFragment @false }
query H { ...someFragment @null }
"""
@tb.must_fail(GraphQLParserError, 'Unexpected token', line=2, col=21)
def test_graphql_syntax_names12(self):
r"""
{ ... on on on {id} }
"""
@tb.must_fail(GraphQLParserError, 'Unexpected token', line=2, col=18)
def test_graphql_syntax_names13(self):
r"""
fragment on on on {id}
"""
@tb.must_fail(GraphQLParserError, 'Unexpected token', line=2, col=18)
def test_graphql_syntax_names14(self):
r"""
{ ... on }
"""
@tb.must_fail(GraphQLUniquenessError, 'variabledefinition', line=2, col=32)
def test_graphql_syntax_names15(self):
r"""
query myQuery($x: Int, $x: Int) { id }
"""
@tb.must_fail(GraphQLUniquenessError, 'variabledefinition', line=2, col=32)
def test_graphql_syntax_names16(self):
r"""
query myQuery($x: Int, $x: Float) { id }
"""
@tb.must_fail(GraphQLUniquenessError, 'argument', line=3, col=23)
def test_graphql_syntax_names17(self):
r"""
{
foo(x: 1, x: 2)
}
"""
@tb.must_fail(GraphQLUniquenessError, 'argument', line=3, col=23)
def test_graphql_syntax_names18(self):
r"""
{
foo(x: 1, x: "one")
}
"""
def test_graphql_syntax_comments01(self):
"""
# some comment
query noFragments {
user(id: 4) {
friends(first: 10) { # end of line comment
# user id
id
# full name
name
# avatar
profilePic(size: 50)
}
mutualFriends(
# commenting on arguments
first: 10
) {
id
name
profilePic(size: 50)
}
}
}
""" | 0.460046 | 0.121895 |
from numpy import s_, array
from numpy.random import normal
from exotk.priors import PriorSet, UP, NP
from .core import *
from .extcore import map_uv_to_qq
from .lpf import CLPF
from .lpfsd import LPFSD
class LPFMD(CLPF):
def __init__(self, passband, lctype='relative', use_ldtk=False, n_threads=1, pipeline='gc'):
self.lpf1 = l1 = LPFSD(passband, lctype, use_ldtk=False, pipeline=pipeline, night=1, n_threads=n_threads)
self.lpf2 = l2 = LPFSD(passband, lctype, use_ldtk=False, pipeline=pipeline, night=2, n_threads=n_threads)
super().__init__((l1,l2), constant_k=False, noise='red', use_ldtk=False)
l1._sgp = l2._sgp = self.ps.ndim
l1.slgp = l2.slgp = s_[l1._sgp:l1._sgp+4]
self.priors[:7] = l1.priors[:7]
self.priors.extend(l1.priors[-4:])
self.ps = PriorSet(self.priors)
self.filters = l1.filters
self.passband = l1.passband
self.use_ldtk = use_ldtk
if use_ldtk:
self.sc = LDPSetCreator([4150,100], [4.6,0.2], [-0.14,0.16], self.filters)
self.lp = self.sc.create_profiles(2000)
self.lp.resample_linear_z()
self.lp.set_uncertainty_multiplier(2)
@property
def times(self):
return [lpf.times for lpf in self.lpfs]
@property
def fluxes(self):
return [lpf.fluxes for lpf in self.lpfs]
def compute_baseline(self, pv):
return [lpf.compute_baseline(pv) for lpf in self.lpfs]
def compute_transit(self, pv):
return [lpf.compute_transit(pv) for lpf in self.lpfs]
def compute_lc_model(self, pv):
return [lpf.compute_lc_model(pv) for lpf in self.lpfs]
def fit_ldc(self, pvpop, emul=1.):
pvt = pvpop.copy()
uv, uve = self.lp.coeffs_qd()
us = array([normal(um, emul*ue, size=pvt.shape[0]) for um,ue in zip(uv[:,0],uve[:,0])]).T
vs = array([normal(vm, emul*ve, size=pvt.shape[0]) for vm,ve in zip(uv[:,1],uve[:,1])]).T
q1s, q2s = map_uv_to_qq(us, vs)
pvt[:, self.uq1] = q1s
pvt[:, self.uq2] = q2s
return pvt | src/lpfmd.py | from numpy import s_, array
from numpy.random import normal
from exotk.priors import PriorSet, UP, NP
from .core import *
from .extcore import map_uv_to_qq
from .lpf import CLPF
from .lpfsd import LPFSD
class LPFMD(CLPF):
def __init__(self, passband, lctype='relative', use_ldtk=False, n_threads=1, pipeline='gc'):
self.lpf1 = l1 = LPFSD(passband, lctype, use_ldtk=False, pipeline=pipeline, night=1, n_threads=n_threads)
self.lpf2 = l2 = LPFSD(passband, lctype, use_ldtk=False, pipeline=pipeline, night=2, n_threads=n_threads)
super().__init__((l1,l2), constant_k=False, noise='red', use_ldtk=False)
l1._sgp = l2._sgp = self.ps.ndim
l1.slgp = l2.slgp = s_[l1._sgp:l1._sgp+4]
self.priors[:7] = l1.priors[:7]
self.priors.extend(l1.priors[-4:])
self.ps = PriorSet(self.priors)
self.filters = l1.filters
self.passband = l1.passband
self.use_ldtk = use_ldtk
if use_ldtk:
self.sc = LDPSetCreator([4150,100], [4.6,0.2], [-0.14,0.16], self.filters)
self.lp = self.sc.create_profiles(2000)
self.lp.resample_linear_z()
self.lp.set_uncertainty_multiplier(2)
@property
def times(self):
return [lpf.times for lpf in self.lpfs]
@property
def fluxes(self):
return [lpf.fluxes for lpf in self.lpfs]
def compute_baseline(self, pv):
return [lpf.compute_baseline(pv) for lpf in self.lpfs]
def compute_transit(self, pv):
return [lpf.compute_transit(pv) for lpf in self.lpfs]
def compute_lc_model(self, pv):
return [lpf.compute_lc_model(pv) for lpf in self.lpfs]
def fit_ldc(self, pvpop, emul=1.):
pvt = pvpop.copy()
uv, uve = self.lp.coeffs_qd()
us = array([normal(um, emul*ue, size=pvt.shape[0]) for um,ue in zip(uv[:,0],uve[:,0])]).T
vs = array([normal(vm, emul*ve, size=pvt.shape[0]) for vm,ve in zip(uv[:,1],uve[:,1])]).T
q1s, q2s = map_uv_to_qq(us, vs)
pvt[:, self.uq1] = q1s
pvt[:, self.uq2] = q2s
return pvt | 0.754734 | 0.198064 |
from pymongo import MongoClient, errors
import sys, datetime
# added for future
params = {
'db_name': 'AWESOME_DS',
'maxSevSelDelay': 1,
'log_fl': "/tmp/mongo_instance.live.log"
}
class mongod_instance:
def __init__(self, conn_cfg=params):
self.client = MongoClient(serverSelectionTimeoutMS=conn_cfg.get('maxSevSelDelay', 1))
self.check_mongod_running()
self.db = self.client[conn_cfg.get("db_name")]
self.log_fl = conn_cfg.get("log_fl")
with open(self.log_fl, "a") as log_fl:
log_fl.write("New instance created at %s\n" % str(datetime.datetime.now()))
def check_mongod_running(self, conn_cfg=params):
try:
self.client.server_info()
except errors.ServerSelectionTimeoutError as err:
print("MongoDB instance has not started or is dead..!")
self.client = err
sys.exit(-2)
return self.client
def get_mongod_db(self, conn_cfg=params):
return self.db
def is_alive(self):
self.check_mongod_running()
return True
def reset(self):
for collection in self.db.collection_names():
tbl = self.db[collection]
tbl.drop()
class mongod_table:
'''
Expects MongoD database object and table name tbl_nm as a string
'''
def __init__(self, mongod_obj, tbl_nm, source):
if mongod_obj.is_alive():
self.db_obj = mongod_obj.get_mongod_db()
self.tbl = self.db_obj.get_collection(tbl_nm)
self.tbl_str = tbl_nm
self.log_fl = mongod_obj.log_fl
self.source = source
def get_table(self):
return self.tbl
def __str__(self):
return self.tbl_str
'''
Creates works only when the table tbl_nm does not exist
Once created, it adds the document specified by doc(JSON expected) to the table
Caution: Exception handling done per row, does not stop if there is a bad record.
'''
def add_data(self, doc):
fail_count = 0
failed_keys = []
for key in doc.keys():
try:
self.tbl.insert(doc.get(key))
except Exception:
fail_count += 1
failed_keys.append(key)
failed_keys.append("\n")
print("Data added successfully with %d insert failures" % fail_count)
if fail_count:
with open(self.log_fl, "a") as log:
log.write("\n".join(failed_keys))
return 0
'''
Expects MongoD table object and a query in dict/BSON format
Returns a cursor which is an iterable
'''
def query(self, query_obj=None, cols = []):
if query_obj:
query_obj.update({'source' : self.source})
else:
query_obj = {'source' : self.source}
col_dict = {}
if len(cols):
for col in cols:
col_dict[col] = 1
try:
assert(self.check_tbl_exist())
except AssertionError as e:
print("Table does not exist in the database")
print(e)
sys.exit(-2)
if col_dict:
return self.tbl.find(query_obj, col_dict)
else:
return self.tbl.find(query_obj)
def check_tbl_exist(self):
if self.tbl_str in self.db_obj.collection_names():
return True
else:
return False
def drop_table(self):
if self.check_tbl_exist():
self.tbl.drop()
else:
print("Table does not exist in the database")
sys.exit(-2)
'''
This method will iterate through the cursor object that find returns
'''
def result_iterator(cursor_obj):
try:
assert(cursor_obj.count() != 0)
except AssertionError as e:
print("Empty cursor. No records found.")
sys.exit(-2)
result_obj = {}
while cursor_obj.alive:
obj = cursor_obj.next()
key = obj.get("_id")
obj.pop("_id")
result_obj[key] = obj
return result_obj
'''
Easy access method to return result in the form of gid : column
'''
def key_val_converter(cursor_obj, col_nm):
result = {}
while cursor_obj.alive:
next_obj = cursor_obj.next()
result[next_obj.get("_id")] = next_obj.get(col_nm)
# result[next_obj.get("gid")] = next_obj.get(col_nm)
return result
def __main__():
client = mongod_instance()
print(client.is_alive())
exif_tbl_obj = mongod_table(client, 'exif_tab', 'flickr_giraffe')
cursor = exif_tbl_obj.query(query_obj=None, cols=['long', 'lat'])
print(cursor.count())
print(result_iterator(cursor))
if __name__ == "__main__":
__main__()
# client = mongod_instance()
# client.reset() | script/mongod_helper.py | from pymongo import MongoClient, errors
import sys, datetime
# added for future
params = {
'db_name': 'AWESOME_DS',
'maxSevSelDelay': 1,
'log_fl': "/tmp/mongo_instance.live.log"
}
class mongod_instance:
def __init__(self, conn_cfg=params):
self.client = MongoClient(serverSelectionTimeoutMS=conn_cfg.get('maxSevSelDelay', 1))
self.check_mongod_running()
self.db = self.client[conn_cfg.get("db_name")]
self.log_fl = conn_cfg.get("log_fl")
with open(self.log_fl, "a") as log_fl:
log_fl.write("New instance created at %s\n" % str(datetime.datetime.now()))
def check_mongod_running(self, conn_cfg=params):
try:
self.client.server_info()
except errors.ServerSelectionTimeoutError as err:
print("MongoDB instance has not started or is dead..!")
self.client = err
sys.exit(-2)
return self.client
def get_mongod_db(self, conn_cfg=params):
return self.db
def is_alive(self):
self.check_mongod_running()
return True
def reset(self):
for collection in self.db.collection_names():
tbl = self.db[collection]
tbl.drop()
class mongod_table:
'''
Expects MongoD database object and table name tbl_nm as a string
'''
def __init__(self, mongod_obj, tbl_nm, source):
if mongod_obj.is_alive():
self.db_obj = mongod_obj.get_mongod_db()
self.tbl = self.db_obj.get_collection(tbl_nm)
self.tbl_str = tbl_nm
self.log_fl = mongod_obj.log_fl
self.source = source
def get_table(self):
return self.tbl
def __str__(self):
return self.tbl_str
'''
Creates works only when the table tbl_nm does not exist
Once created, it adds the document specified by doc(JSON expected) to the table
Caution: Exception handling done per row, does not stop if there is a bad record.
'''
def add_data(self, doc):
fail_count = 0
failed_keys = []
for key in doc.keys():
try:
self.tbl.insert(doc.get(key))
except Exception:
fail_count += 1
failed_keys.append(key)
failed_keys.append("\n")
print("Data added successfully with %d insert failures" % fail_count)
if fail_count:
with open(self.log_fl, "a") as log:
log.write("\n".join(failed_keys))
return 0
'''
Expects MongoD table object and a query in dict/BSON format
Returns a cursor which is an iterable
'''
def query(self, query_obj=None, cols = []):
if query_obj:
query_obj.update({'source' : self.source})
else:
query_obj = {'source' : self.source}
col_dict = {}
if len(cols):
for col in cols:
col_dict[col] = 1
try:
assert(self.check_tbl_exist())
except AssertionError as e:
print("Table does not exist in the database")
print(e)
sys.exit(-2)
if col_dict:
return self.tbl.find(query_obj, col_dict)
else:
return self.tbl.find(query_obj)
def check_tbl_exist(self):
if self.tbl_str in self.db_obj.collection_names():
return True
else:
return False
def drop_table(self):
if self.check_tbl_exist():
self.tbl.drop()
else:
print("Table does not exist in the database")
sys.exit(-2)
'''
This method will iterate through the cursor object that find returns
'''
def result_iterator(cursor_obj):
try:
assert(cursor_obj.count() != 0)
except AssertionError as e:
print("Empty cursor. No records found.")
sys.exit(-2)
result_obj = {}
while cursor_obj.alive:
obj = cursor_obj.next()
key = obj.get("_id")
obj.pop("_id")
result_obj[key] = obj
return result_obj
'''
Easy access method to return result in the form of gid : column
'''
def key_val_converter(cursor_obj, col_nm):
result = {}
while cursor_obj.alive:
next_obj = cursor_obj.next()
result[next_obj.get("_id")] = next_obj.get(col_nm)
# result[next_obj.get("gid")] = next_obj.get(col_nm)
return result
def __main__():
client = mongod_instance()
print(client.is_alive())
exif_tbl_obj = mongod_table(client, 'exif_tab', 'flickr_giraffe')
cursor = exif_tbl_obj.query(query_obj=None, cols=['long', 'lat'])
print(cursor.count())
print(result_iterator(cursor))
if __name__ == "__main__":
__main__()
# client = mongod_instance()
# client.reset() | 0.253584 | 0.113506 |
from __future__ import unicode_literals
from django.template import defaultfilters
from django.db import models
# Create your models here.
CLASIFICACION_CHOICES = (
(0, "Articulo"),
(1, "Presentación"),
(2, "Libro"),
)
# ______________________________________________________________________
class Pais(models.Model):
pais = models.CharField(max_length=15)
slug = models.SlugField(unique=True, null=False, blank=False, default="url-separado-por-guiones")
def __unicode__(self):
return self.pais
def save(self, *args, **kwargs):
if not self.id:
self.slug = defaultfilters.slugify(self.pais)
super(Pais, self).save(*args, **kwargs)
# .....
class Meta:
verbose_name_plural = 'Paises'
# ______________________________________________________________________
class Tematica(models.Model):
nombreTematica = models.CharField(max_length=100)
slug = models.SlugField(unique=True, null=False, blank=False, default="url-separado-por-guiones")
def save(self, *args, **kwargs):
if not self.id:
self.slug = defaultfilters.slugify(self.nombreTematica)
super(Tematica, self).save(*args, **kwargs)
def __unicode__(self): # __str__ para python 3
return self.nombreTematica
# ______________________________________________________________________
class Autor(models.Model):
# Un autor tiene un nombre, un apellido y un email
nombre = models.CharField(max_length=20)
apellido = models.CharField(max_length=20)
puesto = models.CharField(max_length=150)
institucion = models.CharField(max_length=100)
paisResidencia = models.ForeignKey(Pais)
cv = models.URLField(default='http://www.redue-alcue.org')
email = models.EmailField(blank=True) # La BBDD aceptara valores vacios para este atributo
tematicas = models.ManyToManyField(Tematica)
def __unicode__(self): # __str__ para python 3
cadena = "%s %s" % (self.nombre, self.apellido)
return cadena
# .....
class Meta:
verbose_name_plural = 'Autores'
# ______________________________________________________________________
class Evento(models.Model):
evento = models.CharField(max_length=150)
slug = models.SlugField(unique=True, null=False, blank=False, default="url-separado-por-guiones")
def __unicode__(self): # __str__ para python 3
return self.evento
# ______________________________________________________________________
class Libro(models.Model):
titulo = models.CharField(max_length=250)
edicion = models.CharField(max_length=150)
anoPublicacion = models.PositiveSmallIntegerField(null=False, blank=False)
portada = models.ImageField(
upload_to='portadas/') # carpeta llamada portadas, donde guardara las imagenes de portadas de libros,
# al final la imagen tendra que cargarse en: media/portadas/
slug = models.SlugField(unique=True, null=False, blank=False, default="url-separado-por-guiones")
def save(self, *args, **kwargs):
if not self.id:
self.slug = defaultfilters.slugify(self.titulo)
super(Libro, self).save(*args, **kwargs)
def __unicode__(self): # __str__ para python 3
return self.titulo
# ______________________________________________________________________
class Contenido(models.Model):
# un material tiene Titulo,Descripcion,Ano de Publicacion,Tematica,Evento,Pais,Video, AUTOR, etc
autores = models.ManyToManyField(Autor)
tematica = models.ForeignKey(Tematica)
titulo = models.CharField(max_length=200) # el atributo nombre tendra maximo 150 caracteres
slug = models.SlugField(max_length=80, default="url-separado-por-guiones")
descripcion = models.TextField(max_length=1000, blank=True)
tipo = models.IntegerField(choices=CLASIFICACION_CHOICES, default=0)
evento = models.ForeignKey(Evento)
libro = models.ForeignKey(Libro)
anoPublicacion = models.PositiveIntegerField(null=True)
pais = models.ManyToManyField(Pais)
timestamp = models.DateTimeField(auto_now=True) # fecha en que se publico
issuu = models.TextField(max_length=250, default="codigo para insertar issuu (Embeded)")
portada = models.ImageField(
upload_to='portadas/') # carpeta llamada portadas, donde guardara las imagenes de portadas de libros,
# al final la imagen tendra que cargarse en: media/portadas/
descarga = models.URLField(default="http://www.redue-alcue.org")
video = models.URLField(default="http://www.youtube.com")
destacar = models.BooleanField(default=False)
def __unicode__(self): # __str__ para python 3
return self.titulo | home/models.py | from __future__ import unicode_literals
from django.template import defaultfilters
from django.db import models
# Create your models here.
CLASIFICACION_CHOICES = (
(0, "Articulo"),
(1, "Presentación"),
(2, "Libro"),
)
# ______________________________________________________________________
class Pais(models.Model):
pais = models.CharField(max_length=15)
slug = models.SlugField(unique=True, null=False, blank=False, default="url-separado-por-guiones")
def __unicode__(self):
return self.pais
def save(self, *args, **kwargs):
if not self.id:
self.slug = defaultfilters.slugify(self.pais)
super(Pais, self).save(*args, **kwargs)
# .....
class Meta:
verbose_name_plural = 'Paises'
# ______________________________________________________________________
class Tematica(models.Model):
nombreTematica = models.CharField(max_length=100)
slug = models.SlugField(unique=True, null=False, blank=False, default="url-separado-por-guiones")
def save(self, *args, **kwargs):
if not self.id:
self.slug = defaultfilters.slugify(self.nombreTematica)
super(Tematica, self).save(*args, **kwargs)
def __unicode__(self): # __str__ para python 3
return self.nombreTematica
# ______________________________________________________________________
class Autor(models.Model):
# Un autor tiene un nombre, un apellido y un email
nombre = models.CharField(max_length=20)
apellido = models.CharField(max_length=20)
puesto = models.CharField(max_length=150)
institucion = models.CharField(max_length=100)
paisResidencia = models.ForeignKey(Pais)
cv = models.URLField(default='http://www.redue-alcue.org')
email = models.EmailField(blank=True) # La BBDD aceptara valores vacios para este atributo
tematicas = models.ManyToManyField(Tematica)
def __unicode__(self): # __str__ para python 3
cadena = "%s %s" % (self.nombre, self.apellido)
return cadena
# .....
class Meta:
verbose_name_plural = 'Autores'
# ______________________________________________________________________
class Evento(models.Model):
evento = models.CharField(max_length=150)
slug = models.SlugField(unique=True, null=False, blank=False, default="url-separado-por-guiones")
def __unicode__(self): # __str__ para python 3
return self.evento
# ______________________________________________________________________
class Libro(models.Model):
titulo = models.CharField(max_length=250)
edicion = models.CharField(max_length=150)
anoPublicacion = models.PositiveSmallIntegerField(null=False, blank=False)
portada = models.ImageField(
upload_to='portadas/') # carpeta llamada portadas, donde guardara las imagenes de portadas de libros,
# al final la imagen tendra que cargarse en: media/portadas/
slug = models.SlugField(unique=True, null=False, blank=False, default="url-separado-por-guiones")
def save(self, *args, **kwargs):
if not self.id:
self.slug = defaultfilters.slugify(self.titulo)
super(Libro, self).save(*args, **kwargs)
def __unicode__(self): # __str__ para python 3
return self.titulo
# ______________________________________________________________________
class Contenido(models.Model):
# un material tiene Titulo,Descripcion,Ano de Publicacion,Tematica,Evento,Pais,Video, AUTOR, etc
autores = models.ManyToManyField(Autor)
tematica = models.ForeignKey(Tematica)
titulo = models.CharField(max_length=200) # el atributo nombre tendra maximo 150 caracteres
slug = models.SlugField(max_length=80, default="url-separado-por-guiones")
descripcion = models.TextField(max_length=1000, blank=True)
tipo = models.IntegerField(choices=CLASIFICACION_CHOICES, default=0)
evento = models.ForeignKey(Evento)
libro = models.ForeignKey(Libro)
anoPublicacion = models.PositiveIntegerField(null=True)
pais = models.ManyToManyField(Pais)
timestamp = models.DateTimeField(auto_now=True) # fecha en que se publico
issuu = models.TextField(max_length=250, default="codigo para insertar issuu (Embeded)")
portada = models.ImageField(
upload_to='portadas/') # carpeta llamada portadas, donde guardara las imagenes de portadas de libros,
# al final la imagen tendra que cargarse en: media/portadas/
descarga = models.URLField(default="http://www.redue-alcue.org")
video = models.URLField(default="http://www.youtube.com")
destacar = models.BooleanField(default=False)
def __unicode__(self): # __str__ para python 3
return self.titulo | 0.494385 | 0.137851 |
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from chirpstack_api.as_pb.external.api import gatewayProfile_pb2 as chirpstack__api_dot_as__pb_dot_external_dot_api_dot_gatewayProfile__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class GatewayProfileServiceStub(object):
"""GatewayProfileService is the service managing the gateway-profiles.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Create = channel.unary_unary(
'/api.GatewayProfileService/Create',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_gatewayProfile__pb2.CreateGatewayProfileRequest.SerializeToString,
response_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_gatewayProfile__pb2.CreateGatewayProfileResponse.FromString,
)
self.Get = channel.unary_unary(
'/api.GatewayProfileService/Get',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_gatewayProfile__pb2.GetGatewayProfileRequest.SerializeToString,
response_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_gatewayProfile__pb2.GetGatewayProfileResponse.FromString,
)
self.Update = channel.unary_unary(
'/api.GatewayProfileService/Update',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_gatewayProfile__pb2.UpdateGatewayProfileRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.Delete = channel.unary_unary(
'/api.GatewayProfileService/Delete',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_gatewayProfile__pb2.DeleteGatewayProfileRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.List = channel.unary_unary(
'/api.GatewayProfileService/List',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_gatewayProfile__pb2.ListGatewayProfilesRequest.SerializeToString,
response_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_gatewayProfile__pb2.ListGatewayProfilesResponse.FromString,
)
class GatewayProfileServiceServicer(object):
"""GatewayProfileService is the service managing the gateway-profiles.
"""
def Create(self, request, context):
"""Create creates the given gateway-profile.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Get(self, request, context):
"""Get returns the gateway-profile matching the given id.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Update(self, request, context):
"""Update updates the given gateway-profile.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Delete(self, request, context):
"""Delete deletes the gateway-profile matching the given id.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def List(self, request, context):
"""List returns the existing gateway-profiles.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_GatewayProfileServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'Create': grpc.unary_unary_rpc_method_handler(
servicer.Create,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_gatewayProfile__pb2.CreateGatewayProfileRequest.FromString,
response_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_gatewayProfile__pb2.CreateGatewayProfileResponse.SerializeToString,
),
'Get': grpc.unary_unary_rpc_method_handler(
servicer.Get,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_gatewayProfile__pb2.GetGatewayProfileRequest.FromString,
response_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_gatewayProfile__pb2.GetGatewayProfileResponse.SerializeToString,
),
'Update': grpc.unary_unary_rpc_method_handler(
servicer.Update,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_gatewayProfile__pb2.UpdateGatewayProfileRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'Delete': grpc.unary_unary_rpc_method_handler(
servicer.Delete,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_gatewayProfile__pb2.DeleteGatewayProfileRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'List': grpc.unary_unary_rpc_method_handler(
servicer.List,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_gatewayProfile__pb2.ListGatewayProfilesRequest.FromString,
response_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_gatewayProfile__pb2.ListGatewayProfilesResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'api.GatewayProfileService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class GatewayProfileService(object):
"""GatewayProfileService is the service managing the gateway-profiles.
"""
@staticmethod
def Create(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.GatewayProfileService/Create',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_gatewayProfile__pb2.CreateGatewayProfileRequest.SerializeToString,
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_gatewayProfile__pb2.CreateGatewayProfileResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Get(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.GatewayProfileService/Get',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_gatewayProfile__pb2.GetGatewayProfileRequest.SerializeToString,
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_gatewayProfile__pb2.GetGatewayProfileResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Update(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.GatewayProfileService/Update',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_gatewayProfile__pb2.UpdateGatewayProfileRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Delete(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.GatewayProfileService/Delete',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_gatewayProfile__pb2.DeleteGatewayProfileRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def List(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.GatewayProfileService/List',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_gatewayProfile__pb2.ListGatewayProfilesRequest.SerializeToString,
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_gatewayProfile__pb2.ListGatewayProfilesResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata) | python/src/chirpstack_api/as_pb/external/api/gatewayProfile_pb2_grpc.py | """Client and server classes corresponding to protobuf-defined services."""
import grpc
from chirpstack_api.as_pb.external.api import gatewayProfile_pb2 as chirpstack__api_dot_as__pb_dot_external_dot_api_dot_gatewayProfile__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class GatewayProfileServiceStub(object):
"""GatewayProfileService is the service managing the gateway-profiles.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Create = channel.unary_unary(
'/api.GatewayProfileService/Create',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_gatewayProfile__pb2.CreateGatewayProfileRequest.SerializeToString,
response_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_gatewayProfile__pb2.CreateGatewayProfileResponse.FromString,
)
self.Get = channel.unary_unary(
'/api.GatewayProfileService/Get',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_gatewayProfile__pb2.GetGatewayProfileRequest.SerializeToString,
response_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_gatewayProfile__pb2.GetGatewayProfileResponse.FromString,
)
self.Update = channel.unary_unary(
'/api.GatewayProfileService/Update',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_gatewayProfile__pb2.UpdateGatewayProfileRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.Delete = channel.unary_unary(
'/api.GatewayProfileService/Delete',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_gatewayProfile__pb2.DeleteGatewayProfileRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.List = channel.unary_unary(
'/api.GatewayProfileService/List',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_gatewayProfile__pb2.ListGatewayProfilesRequest.SerializeToString,
response_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_gatewayProfile__pb2.ListGatewayProfilesResponse.FromString,
)
class GatewayProfileServiceServicer(object):
"""GatewayProfileService is the service managing the gateway-profiles.
"""
def Create(self, request, context):
"""Create creates the given gateway-profile.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Get(self, request, context):
"""Get returns the gateway-profile matching the given id.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Update(self, request, context):
"""Update updates the given gateway-profile.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Delete(self, request, context):
"""Delete deletes the gateway-profile matching the given id.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def List(self, request, context):
"""List returns the existing gateway-profiles.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_GatewayProfileServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'Create': grpc.unary_unary_rpc_method_handler(
servicer.Create,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_gatewayProfile__pb2.CreateGatewayProfileRequest.FromString,
response_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_gatewayProfile__pb2.CreateGatewayProfileResponse.SerializeToString,
),
'Get': grpc.unary_unary_rpc_method_handler(
servicer.Get,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_gatewayProfile__pb2.GetGatewayProfileRequest.FromString,
response_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_gatewayProfile__pb2.GetGatewayProfileResponse.SerializeToString,
),
'Update': grpc.unary_unary_rpc_method_handler(
servicer.Update,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_gatewayProfile__pb2.UpdateGatewayProfileRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'Delete': grpc.unary_unary_rpc_method_handler(
servicer.Delete,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_gatewayProfile__pb2.DeleteGatewayProfileRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'List': grpc.unary_unary_rpc_method_handler(
servicer.List,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_gatewayProfile__pb2.ListGatewayProfilesRequest.FromString,
response_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_gatewayProfile__pb2.ListGatewayProfilesResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'api.GatewayProfileService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class GatewayProfileService(object):
"""GatewayProfileService is the service managing the gateway-profiles.
"""
@staticmethod
def Create(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.GatewayProfileService/Create',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_gatewayProfile__pb2.CreateGatewayProfileRequest.SerializeToString,
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_gatewayProfile__pb2.CreateGatewayProfileResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Get(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.GatewayProfileService/Get',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_gatewayProfile__pb2.GetGatewayProfileRequest.SerializeToString,
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_gatewayProfile__pb2.GetGatewayProfileResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Update(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.GatewayProfileService/Update',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_gatewayProfile__pb2.UpdateGatewayProfileRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Delete(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.GatewayProfileService/Delete',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_gatewayProfile__pb2.DeleteGatewayProfileRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def List(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.GatewayProfileService/List',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_gatewayProfile__pb2.ListGatewayProfilesRequest.SerializeToString,
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_gatewayProfile__pb2.ListGatewayProfilesResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata) | 0.658308 | 0.101056 |
import os
import textwrap
from urllib.parse import urlparse
from ...utils import parse_readable_size
def _remove_nones(cfg):
return dict((k, v) for k, v in cfg.items() if v is not None)
def _get_local_app_module(mod_name):
return __name__.rsplit('.', 1)[0] + '.' + mod_name.rsplit('.', 1)[-1]
class SecurityConfig:
def __init__(self, cert_file=None, key_file=None):
self._cert_file = cert_file
self._key_file = key_file
def build(self):
return dict(cert_file=self._cert_file, key_file=self._key_file)
class AppFileConfig:
def __init__(self, source, file_type=None, visibility=None,
size=None, timestamp=None):
self._source = source
self._file_type = file_type
self._visibility = visibility
self._size = size
self._timestamp = timestamp
def build(self):
if all(v is None for v in (self._file_type, self._visibility, self._size, self._timestamp)):
return self._source
else:
return _remove_nones(dict(
source=self._source, type=self._file_type, visibility=self._visibility,
size=self._size, timestamp=self._timestamp
))
class AppContainerConfig:
def __init__(self, cpu=None, memory=None, env=None, files=None, script=None):
self._cpu = cpu
if memory is not None:
real_mem, is_percent = parse_readable_size(memory)
assert not is_percent
self._memory = real_mem
else:
self._memory = None
self._env = env
self._script = script
self._files = files
self.add_default_envs()
def build_script(self):
return self._script
def add_default_envs(self):
pass
def add_env(self, k, v):
if self._env is None:
self._env = dict()
self._env[k] = v
def build(self):
return _remove_nones(dict(
resources=dict(
vcores=self._cpu,
memory=f'{self._memory // 1024 ** 2} MiB' if self._memory else None,
),
env=self._env,
script=self.build_script(),
files=dict((k, v.build()) for k, v in self._files.items()) if self._files else None,
))
class AppMasterConfig(AppContainerConfig):
def __init__(self, security=None, **kwargs):
super().__init__(**kwargs)
self._security = security
def build(self):
d = super().build()
if self._security is not None:
d['security'] = self._security.build()
return d
class AppServiceConfig(AppContainerConfig):
def __init__(self, instances=1, depends=None, allow_failures=False,
max_restarts=0, **kwargs):
super().__init__(**kwargs)
if isinstance(depends, str):
depends = [depends]
self._allow_failures = allow_failures
self._depends = depends or []
self._max_restarts = max_restarts
self._instances = instances
def build(self):
d = super().build()
d.update(dict(
instances=self._instances,
depends=self._depends,
allow_failures=self._allow_failures,
max_restarts=self._max_restarts,
))
return d
class MarsServiceConfig(AppServiceConfig):
service_name = None
def __init__(self, environment, modules=None, cmd_tmpl=None, cpu=None, memory=None,
log_config=None, extra_args=None, **kwargs):
files = kwargs.pop('files', dict())
kwargs['files'] = files
parsed = urlparse(environment)
self._env_scheme = parsed.scheme
if parsed.scheme:
import mars
self._source_path = os.path.dirname(os.path.dirname(os.path.abspath(mars.__file__)))
self._env_path = environment[len(parsed.scheme) + 3:]
self._path_environ = os.environ['PATH']
else:
self._source_path = None
self._env_path = environment
self._path_environ = None
self._cmd_tmpl = cmd_tmpl or '"{executable}"'
if not self._env_scheme:
files['mars_env'] = AppFileConfig(environment)
self._log_config = log_config
if log_config:
files['logging.conf'] = AppFileConfig(log_config)
self._modules = modules.split(',') if isinstance(modules, str) else modules
self._extra_args = extra_args or ''
cpu = cpu or 1
memory = memory or '1 GiB'
super().__init__(cpu=cpu, memory=memory, **kwargs)
def add_default_envs(self):
if self._cpu:
self.add_env('MKL_NUM_THREADS', str(self._cpu))
self.add_env('MARS_CPU_TOTAL', str(self._cpu))
self.add_env('MARS_USE_PROCESS_STAT', '1')
if self._memory:
self.add_env('MARS_MEMORY_TOTAL', str(int(self._memory)))
if self._modules:
self.add_env('MARS_LOAD_MODULES', ','.join(self._modules))
if self._path_environ:
self.add_env('MARS_YARN_PATH', self._path_environ)
if self._source_path:
self.add_env('MARS_SOURCE_PATH', self._source_path)
def build_script(self):
bash_lines = [textwrap.dedent("""
#!/bin/bash
if [[ "$YARN_CONTAINER_RUNTIME_TYPE" == "docker" ]]; then
export MARS_USE_CGROUP_STAT=1
else
export MARS_USE_PROCESS_STAT=1
fi
if [[ -n $MARS_SOURCE_PATH ]]; then export PYTHONPATH=$PYTHONPATH:$MARS_SOURCE_PATH; fi
if [[ -n $MARS_YARN_PATH ]]; then export PATH=$MARS_YARN_PATH:$PATH; fi
""").strip()]
if not self._env_scheme:
bash_lines.append('source mars_env/bin/activate')
python_executable = 'mars_env/bin/python'
elif self._env_scheme == 'conda':
bash_lines.append(f'conda activate "{self._env_path}"')
python_executable = 'python'
elif self._env_scheme == 'venv':
bash_lines.append(f'source "{self._env_path}/bin/activate"')
python_executable = self._env_path + '/bin/python'
else: # pragma: no cover
python_executable = self._env_path
cmd = self._cmd_tmpl.format(executable=python_executable)
bash_lines.append(f'{cmd} -m {_get_local_app_module(self.service_name)} {self._extra_args} > /tmp/{self.service_name}.stdout.log 2> /tmp/{self.service_name}.stderr.log')
return '\n'.join(bash_lines) + '\n'
class MarsSupervisorConfig(MarsServiceConfig):
service_name = 'mars.supervisor'
web_service_name = 'mars.web'
class MarsWorkerConfig(MarsServiceConfig):
service_name = 'mars.worker'
def __init__(self, environment, worker_cache_mem=None, spill_dirs=None, **kwargs):
kwargs['depends'] = MarsSupervisorConfig.service_name
super().__init__(environment, **kwargs)
if worker_cache_mem:
self.add_env('MARS_CACHE_MEM_SIZE', worker_cache_mem)
if spill_dirs:
self.add_env('MARS_SPILL_DIRS',
spill_dirs if isinstance(spill_dirs, str) else ':'.join(spill_dirs))
class MarsApplicationConfig:
def __init__(self, name=None, queue=None, file_systems=None, master=None,
supervisor_config=None, worker_config=None):
self._name = name
self._queue = queue or 'default'
self._file_systems = file_systems or []
self._master = master or AppMasterConfig(cpu=1, memory='512 MiB')
self._supervisor_config = supervisor_config
self._worker_config = worker_config
def build(self):
services = _remove_nones({
MarsSupervisorConfig.service_name: self._supervisor_config.build() if self._supervisor_config else None,
MarsWorkerConfig.service_name: self._worker_config.build() if self._worker_config else None,
})
return dict(
name=self._name,
queue=self._queue,
file_systems=self._file_systems,
master=self._master.build() if self._master else None,
services=services,
) | mars/deploy/yarn/config.py |
import os
import textwrap
from urllib.parse import urlparse
from ...utils import parse_readable_size
def _remove_nones(cfg):
return dict((k, v) for k, v in cfg.items() if v is not None)
def _get_local_app_module(mod_name):
return __name__.rsplit('.', 1)[0] + '.' + mod_name.rsplit('.', 1)[-1]
class SecurityConfig:
def __init__(self, cert_file=None, key_file=None):
self._cert_file = cert_file
self._key_file = key_file
def build(self):
return dict(cert_file=self._cert_file, key_file=self._key_file)
class AppFileConfig:
def __init__(self, source, file_type=None, visibility=None,
size=None, timestamp=None):
self._source = source
self._file_type = file_type
self._visibility = visibility
self._size = size
self._timestamp = timestamp
def build(self):
if all(v is None for v in (self._file_type, self._visibility, self._size, self._timestamp)):
return self._source
else:
return _remove_nones(dict(
source=self._source, type=self._file_type, visibility=self._visibility,
size=self._size, timestamp=self._timestamp
))
class AppContainerConfig:
def __init__(self, cpu=None, memory=None, env=None, files=None, script=None):
self._cpu = cpu
if memory is not None:
real_mem, is_percent = parse_readable_size(memory)
assert not is_percent
self._memory = real_mem
else:
self._memory = None
self._env = env
self._script = script
self._files = files
self.add_default_envs()
def build_script(self):
return self._script
def add_default_envs(self):
pass
def add_env(self, k, v):
if self._env is None:
self._env = dict()
self._env[k] = v
def build(self):
return _remove_nones(dict(
resources=dict(
vcores=self._cpu,
memory=f'{self._memory // 1024 ** 2} MiB' if self._memory else None,
),
env=self._env,
script=self.build_script(),
files=dict((k, v.build()) for k, v in self._files.items()) if self._files else None,
))
class AppMasterConfig(AppContainerConfig):
def __init__(self, security=None, **kwargs):
super().__init__(**kwargs)
self._security = security
def build(self):
d = super().build()
if self._security is not None:
d['security'] = self._security.build()
return d
class AppServiceConfig(AppContainerConfig):
def __init__(self, instances=1, depends=None, allow_failures=False,
max_restarts=0, **kwargs):
super().__init__(**kwargs)
if isinstance(depends, str):
depends = [depends]
self._allow_failures = allow_failures
self._depends = depends or []
self._max_restarts = max_restarts
self._instances = instances
def build(self):
d = super().build()
d.update(dict(
instances=self._instances,
depends=self._depends,
allow_failures=self._allow_failures,
max_restarts=self._max_restarts,
))
return d
class MarsServiceConfig(AppServiceConfig):
service_name = None
def __init__(self, environment, modules=None, cmd_tmpl=None, cpu=None, memory=None,
log_config=None, extra_args=None, **kwargs):
files = kwargs.pop('files', dict())
kwargs['files'] = files
parsed = urlparse(environment)
self._env_scheme = parsed.scheme
if parsed.scheme:
import mars
self._source_path = os.path.dirname(os.path.dirname(os.path.abspath(mars.__file__)))
self._env_path = environment[len(parsed.scheme) + 3:]
self._path_environ = os.environ['PATH']
else:
self._source_path = None
self._env_path = environment
self._path_environ = None
self._cmd_tmpl = cmd_tmpl or '"{executable}"'
if not self._env_scheme:
files['mars_env'] = AppFileConfig(environment)
self._log_config = log_config
if log_config:
files['logging.conf'] = AppFileConfig(log_config)
self._modules = modules.split(',') if isinstance(modules, str) else modules
self._extra_args = extra_args or ''
cpu = cpu or 1
memory = memory or '1 GiB'
super().__init__(cpu=cpu, memory=memory, **kwargs)
def add_default_envs(self):
if self._cpu:
self.add_env('MKL_NUM_THREADS', str(self._cpu))
self.add_env('MARS_CPU_TOTAL', str(self._cpu))
self.add_env('MARS_USE_PROCESS_STAT', '1')
if self._memory:
self.add_env('MARS_MEMORY_TOTAL', str(int(self._memory)))
if self._modules:
self.add_env('MARS_LOAD_MODULES', ','.join(self._modules))
if self._path_environ:
self.add_env('MARS_YARN_PATH', self._path_environ)
if self._source_path:
self.add_env('MARS_SOURCE_PATH', self._source_path)
def build_script(self):
bash_lines = [textwrap.dedent("""
#!/bin/bash
if [[ "$YARN_CONTAINER_RUNTIME_TYPE" == "docker" ]]; then
export MARS_USE_CGROUP_STAT=1
else
export MARS_USE_PROCESS_STAT=1
fi
if [[ -n $MARS_SOURCE_PATH ]]; then export PYTHONPATH=$PYTHONPATH:$MARS_SOURCE_PATH; fi
if [[ -n $MARS_YARN_PATH ]]; then export PATH=$MARS_YARN_PATH:$PATH; fi
""").strip()]
if not self._env_scheme:
bash_lines.append('source mars_env/bin/activate')
python_executable = 'mars_env/bin/python'
elif self._env_scheme == 'conda':
bash_lines.append(f'conda activate "{self._env_path}"')
python_executable = 'python'
elif self._env_scheme == 'venv':
bash_lines.append(f'source "{self._env_path}/bin/activate"')
python_executable = self._env_path + '/bin/python'
else: # pragma: no cover
python_executable = self._env_path
cmd = self._cmd_tmpl.format(executable=python_executable)
bash_lines.append(f'{cmd} -m {_get_local_app_module(self.service_name)} {self._extra_args} > /tmp/{self.service_name}.stdout.log 2> /tmp/{self.service_name}.stderr.log')
return '\n'.join(bash_lines) + '\n'
class MarsSupervisorConfig(MarsServiceConfig):
service_name = 'mars.supervisor'
web_service_name = 'mars.web'
class MarsWorkerConfig(MarsServiceConfig):
service_name = 'mars.worker'
def __init__(self, environment, worker_cache_mem=None, spill_dirs=None, **kwargs):
kwargs['depends'] = MarsSupervisorConfig.service_name
super().__init__(environment, **kwargs)
if worker_cache_mem:
self.add_env('MARS_CACHE_MEM_SIZE', worker_cache_mem)
if spill_dirs:
self.add_env('MARS_SPILL_DIRS',
spill_dirs if isinstance(spill_dirs, str) else ':'.join(spill_dirs))
class MarsApplicationConfig:
def __init__(self, name=None, queue=None, file_systems=None, master=None,
supervisor_config=None, worker_config=None):
self._name = name
self._queue = queue or 'default'
self._file_systems = file_systems or []
self._master = master or AppMasterConfig(cpu=1, memory='512 MiB')
self._supervisor_config = supervisor_config
self._worker_config = worker_config
def build(self):
services = _remove_nones({
MarsSupervisorConfig.service_name: self._supervisor_config.build() if self._supervisor_config else None,
MarsWorkerConfig.service_name: self._worker_config.build() if self._worker_config else None,
})
return dict(
name=self._name,
queue=self._queue,
file_systems=self._file_systems,
master=self._master.build() if self._master else None,
services=services,
) | 0.449634 | 0.07343 |
from optparse import OptionParser
import numpy as np
import pandas as pd
from scipy.stats import wilcoxon
def main():
usage = "%prog"
parser = OptionParser(usage=usage)
#parser.add_option('--keyword', dest='key', default=None,
# help='Keyword argument: default=%default')
#parser.add_option('--boolarg', action="store_true", dest="boolarg", default=False,
# help='Keyword argument: default=%default')
(options, args) = parser.parse_args()
amazon_df = pd.read_csv('amazon.csv', header=0, index_col=0)
framing_df = pd.read_csv('framing.csv', header=0, index_col=0)
yelp_df = pd.read_csv('yelp.csv', header=0, index_col=0)
twitter_df = pd.read_csv('twitter.csv', header=0, index_col=0)
datasets = {'framing': framing_df, 'amazon': amazon_df, 'yelp': yelp_df, 'twitter': twitter_df}
keys = ['framing', 'amazon', 'yelp', 'twitter']
print("rest vs cal")
for k in keys:
df = datasets[k]
n_train_vals = list(set(df['n_train'].values))
n_train_vals.sort()
for penalty in ['l1']:
for n_train in n_train_vals:
for method in ['nontest', 'cc', 'pcc', 'acc', 'cshift', 'platt']:
vals1 = df[(df.Penalty == penalty) & (df.n_train == n_train) & (df.method == method)].values[0, 4:]
vals2 = df[(df.Penalty == penalty) & (df.n_train == n_train) & (df.method == 'cal')].values[0, 4:]
test_result = wilcoxon(vals1, vals2)
pval = test_result[1]
print(k, penalty, n_train, method, len(vals1), len(vals2), np.mean(vals1) - np.mean(vals2), pval)
print("worse than cal")
for k in keys:
df = datasets[k]
n_train_vals = list(set(df['n_train'].values))
n_train_vals.sort()
for penalty in ['l1']:
for n_train in n_train_vals:
for method in ['nontest', 'cc', 'pcc', 'acc', 'cshift', 'platt']:
vals1 = df[(df.Penalty == penalty) & (df.n_train == n_train) & (df.method == method)].values[0, 4:]
vals2 = df[(df.Penalty == penalty) & (df.n_train == n_train) & (df.method == 'cal')].values[0, 4:]
test_result = wilcoxon(vals1, vals2)
pval = test_result[1]
if np.mean(vals1) - np.mean(vals2) > 0 and pval < 0.05/48:
print(k, penalty, n_train, method, len(vals1), len(vals2), np.mean(vals1) - np.mean(vals2), pval)
print("better than cc")
for k in keys:
df = datasets[k]
n_train_vals = list(set(df['n_train'].values))
n_train_vals.sort()
for penalty in ['l1']:
for n_train in n_train_vals:
for method in ['nontest', 'pcc', 'acc', 'cshift', 'platt', 'cal']:
vals1 = df[(df.Penalty == penalty) & (df.n_train == n_train) & (df.method == method)].values[0, 4:]
vals2 = df[(df.Penalty == penalty) & (df.n_train == n_train) & (df.method == 'cc')].values[0, 4:]
test_result = wilcoxon(vals1, vals2)
pval = test_result[1]
if np.mean(vals1) - np.mean(vals2) < 0 and pval < 0.05/48:
print(k, penalty, n_train, method, len(vals1), len(vals2), np.mean(vals1) - np.mean(vals2), pval)
print("better than pcc")
for k in keys:
df = datasets[k]
n_train_vals = list(set(df['n_train'].values))
n_train_vals.sort()
for penalty in ['l1']:
for n_train in n_train_vals:
for method in ['nontest', 'cc', 'acc', 'cshift', 'platt', 'cal']:
vals1 = df[(df.Penalty == penalty) & (df.n_train == n_train) & (df.method == method)].values[0, 4:]
vals2 = df[(df.Penalty == penalty) & (df.n_train == n_train) & (df.method == 'cc')].values[0, 4:]
test_result = wilcoxon(vals1, vals2)
pval = test_result[1]
if np.mean(vals1) - np.mean(vals2) < 0 and pval < 0.05/48:
print(k, penalty, n_train, method, len(vals1), len(vals2), np.mean(vals1) - np.mean(vals2), pval)
print("better than acc")
for k in keys:
df = datasets[k]
n_train_vals = list(set(df['n_train'].values))
n_train_vals.sort()
for penalty in ['l1']:
for n_train in n_train_vals:
for method in ['nontest', 'cc', 'pcc', 'cshift', 'platt', 'cal']:
vals1 = df[(df.Penalty == penalty) & (df.n_train == n_train) & (df.method == method)].values[0, 4:]
vals2 = df[(df.Penalty == penalty) & (df.n_train == n_train) & (df.method == 'acc')].values[0, 4:]
test_result = wilcoxon(vals1, vals2)
pval = test_result[1]
if np.mean(vals1) - np.mean(vals2) < 0 and pval < 0.05/48:
print(k, penalty, n_train, method, len(vals1), len(vals2), np.mean(vals1) - np.mean(vals2), pval)
print("worse than ACC")
for k in keys:
df = datasets[k]
n_train_vals = list(set(df['n_train'].values))
n_train_vals.sort()
for penalty in ['l1']:
for n_train in n_train_vals:
for method in ['nontest', 'cc', 'pcc', 'cshift', 'platt', 'cal']:
vals1 = df[(df.Penalty == penalty) & (df.n_train == n_train) & (df.method == method)].values[0, 4:]
vals2 = df[(df.Penalty == penalty) & (df.n_train == n_train) & (df.method == 'acc')].values[0, 4:]
test_result = wilcoxon(vals1, vals2)
pval = test_result[1]
if np.mean(vals1) - np.mean(vals2) > 0 and pval < 0.05/48:
print(k, penalty, n_train, method, len(vals1), len(vals2), np.mean(vals1) - np.mean(vals2), pval)
print("worse than platt")
for k in keys:
df = datasets[k]
n_train_vals = list(set(df['n_train'].values))
n_train_vals.sort()
for penalty in ['l1']:
for n_train in n_train_vals:
for method in ['nontest', 'cc', 'pcc', 'acc', 'cshift', 'cal']:
vals1 = df[(df.Penalty == penalty) & (df.n_train == n_train) & (df.method == method)].values[0, 4:]
vals2 = df[(df.Penalty == penalty) & (df.n_train == n_train) & (df.method == 'platt')].values[0, 4:]
test_result = wilcoxon(vals1, vals2)
pval = test_result[1]
if np.mean(vals1) - np.mean(vals2) > 0 and pval < 0.05/48:
print(k, penalty, n_train, method, len(vals1), len(vals2), np.mean(vals1) - np.mean(vals2), pval)
print("L1 better than L2")
for method in ['cc', 'pcc', 'acc', 'cshift', 'platt', 'cal']:
for k in keys:
df = datasets[k]
n_train_vals = list(set(df['n_train'].values))
n_train_vals.sort()
for n_train in n_train_vals:
vals1 = df[(df.Penalty == 'l1') & (df.n_train == n_train) & (df.method == method)].values[0, 4:]
vals2 = df[(df.Penalty == 'l2') & (df.n_train == n_train) & (df.method == method)].values[0, 4:]
test_result = wilcoxon(vals1, vals2)
pval = test_result[1]
if np.mean(vals1) - np.mean(vals2) < 0 and pval < 0.05/48:
print(k, method, n_train, len(vals1), len(vals2), np.mean(vals1) - np.mean(vals2), pval)
print("L2 better than L1")
for method in ['cc', 'pcc', 'acc', 'cshift', 'platt', 'cal']:
for k in keys:
df = datasets[k]
n_train_vals = list(set(df['n_train'].values))
n_train_vals.sort()
for n_train in n_train_vals:
vals1 = df[(df.Penalty == 'l1') & (df.n_train == n_train) & (df.method == method)].values[0, 4:]
vals2 = df[(df.Penalty == 'l2') & (df.n_train == n_train) & (df.method == method)].values[0, 4:]
test_result = wilcoxon(vals1, vals2)
pval = test_result[1]
if np.mean(vals1) - np.mean(vals2) > 0 and pval < 0.05/48:
print(k, method, n_train, len(vals1), len(vals2), np.mean(vals1) - np.mean(vals2), pval)
if __name__ == '__main__':
main() | post/do_tests.py | from optparse import OptionParser
import numpy as np
import pandas as pd
from scipy.stats import wilcoxon
def main():
usage = "%prog"
parser = OptionParser(usage=usage)
#parser.add_option('--keyword', dest='key', default=None,
# help='Keyword argument: default=%default')
#parser.add_option('--boolarg', action="store_true", dest="boolarg", default=False,
# help='Keyword argument: default=%default')
(options, args) = parser.parse_args()
amazon_df = pd.read_csv('amazon.csv', header=0, index_col=0)
framing_df = pd.read_csv('framing.csv', header=0, index_col=0)
yelp_df = pd.read_csv('yelp.csv', header=0, index_col=0)
twitter_df = pd.read_csv('twitter.csv', header=0, index_col=0)
datasets = {'framing': framing_df, 'amazon': amazon_df, 'yelp': yelp_df, 'twitter': twitter_df}
keys = ['framing', 'amazon', 'yelp', 'twitter']
print("rest vs cal")
for k in keys:
df = datasets[k]
n_train_vals = list(set(df['n_train'].values))
n_train_vals.sort()
for penalty in ['l1']:
for n_train in n_train_vals:
for method in ['nontest', 'cc', 'pcc', 'acc', 'cshift', 'platt']:
vals1 = df[(df.Penalty == penalty) & (df.n_train == n_train) & (df.method == method)].values[0, 4:]
vals2 = df[(df.Penalty == penalty) & (df.n_train == n_train) & (df.method == 'cal')].values[0, 4:]
test_result = wilcoxon(vals1, vals2)
pval = test_result[1]
print(k, penalty, n_train, method, len(vals1), len(vals2), np.mean(vals1) - np.mean(vals2), pval)
print("worse than cal")
for k in keys:
df = datasets[k]
n_train_vals = list(set(df['n_train'].values))
n_train_vals.sort()
for penalty in ['l1']:
for n_train in n_train_vals:
for method in ['nontest', 'cc', 'pcc', 'acc', 'cshift', 'platt']:
vals1 = df[(df.Penalty == penalty) & (df.n_train == n_train) & (df.method == method)].values[0, 4:]
vals2 = df[(df.Penalty == penalty) & (df.n_train == n_train) & (df.method == 'cal')].values[0, 4:]
test_result = wilcoxon(vals1, vals2)
pval = test_result[1]
if np.mean(vals1) - np.mean(vals2) > 0 and pval < 0.05/48:
print(k, penalty, n_train, method, len(vals1), len(vals2), np.mean(vals1) - np.mean(vals2), pval)
print("better than cc")
for k in keys:
df = datasets[k]
n_train_vals = list(set(df['n_train'].values))
n_train_vals.sort()
for penalty in ['l1']:
for n_train in n_train_vals:
for method in ['nontest', 'pcc', 'acc', 'cshift', 'platt', 'cal']:
vals1 = df[(df.Penalty == penalty) & (df.n_train == n_train) & (df.method == method)].values[0, 4:]
vals2 = df[(df.Penalty == penalty) & (df.n_train == n_train) & (df.method == 'cc')].values[0, 4:]
test_result = wilcoxon(vals1, vals2)
pval = test_result[1]
if np.mean(vals1) - np.mean(vals2) < 0 and pval < 0.05/48:
print(k, penalty, n_train, method, len(vals1), len(vals2), np.mean(vals1) - np.mean(vals2), pval)
print("better than pcc")
for k in keys:
df = datasets[k]
n_train_vals = list(set(df['n_train'].values))
n_train_vals.sort()
for penalty in ['l1']:
for n_train in n_train_vals:
for method in ['nontest', 'cc', 'acc', 'cshift', 'platt', 'cal']:
vals1 = df[(df.Penalty == penalty) & (df.n_train == n_train) & (df.method == method)].values[0, 4:]
vals2 = df[(df.Penalty == penalty) & (df.n_train == n_train) & (df.method == 'cc')].values[0, 4:]
test_result = wilcoxon(vals1, vals2)
pval = test_result[1]
if np.mean(vals1) - np.mean(vals2) < 0 and pval < 0.05/48:
print(k, penalty, n_train, method, len(vals1), len(vals2), np.mean(vals1) - np.mean(vals2), pval)
print("better than acc")
for k in keys:
df = datasets[k]
n_train_vals = list(set(df['n_train'].values))
n_train_vals.sort()
for penalty in ['l1']:
for n_train in n_train_vals:
for method in ['nontest', 'cc', 'pcc', 'cshift', 'platt', 'cal']:
vals1 = df[(df.Penalty == penalty) & (df.n_train == n_train) & (df.method == method)].values[0, 4:]
vals2 = df[(df.Penalty == penalty) & (df.n_train == n_train) & (df.method == 'acc')].values[0, 4:]
test_result = wilcoxon(vals1, vals2)
pval = test_result[1]
if np.mean(vals1) - np.mean(vals2) < 0 and pval < 0.05/48:
print(k, penalty, n_train, method, len(vals1), len(vals2), np.mean(vals1) - np.mean(vals2), pval)
print("worse than ACC")
for k in keys:
df = datasets[k]
n_train_vals = list(set(df['n_train'].values))
n_train_vals.sort()
for penalty in ['l1']:
for n_train in n_train_vals:
for method in ['nontest', 'cc', 'pcc', 'cshift', 'platt', 'cal']:
vals1 = df[(df.Penalty == penalty) & (df.n_train == n_train) & (df.method == method)].values[0, 4:]
vals2 = df[(df.Penalty == penalty) & (df.n_train == n_train) & (df.method == 'acc')].values[0, 4:]
test_result = wilcoxon(vals1, vals2)
pval = test_result[1]
if np.mean(vals1) - np.mean(vals2) > 0 and pval < 0.05/48:
print(k, penalty, n_train, method, len(vals1), len(vals2), np.mean(vals1) - np.mean(vals2), pval)
print("worse than platt")
for k in keys:
df = datasets[k]
n_train_vals = list(set(df['n_train'].values))
n_train_vals.sort()
for penalty in ['l1']:
for n_train in n_train_vals:
for method in ['nontest', 'cc', 'pcc', 'acc', 'cshift', 'cal']:
vals1 = df[(df.Penalty == penalty) & (df.n_train == n_train) & (df.method == method)].values[0, 4:]
vals2 = df[(df.Penalty == penalty) & (df.n_train == n_train) & (df.method == 'platt')].values[0, 4:]
test_result = wilcoxon(vals1, vals2)
pval = test_result[1]
if np.mean(vals1) - np.mean(vals2) > 0 and pval < 0.05/48:
print(k, penalty, n_train, method, len(vals1), len(vals2), np.mean(vals1) - np.mean(vals2), pval)
print("L1 better than L2")
for method in ['cc', 'pcc', 'acc', 'cshift', 'platt', 'cal']:
for k in keys:
df = datasets[k]
n_train_vals = list(set(df['n_train'].values))
n_train_vals.sort()
for n_train in n_train_vals:
vals1 = df[(df.Penalty == 'l1') & (df.n_train == n_train) & (df.method == method)].values[0, 4:]
vals2 = df[(df.Penalty == 'l2') & (df.n_train == n_train) & (df.method == method)].values[0, 4:]
test_result = wilcoxon(vals1, vals2)
pval = test_result[1]
if np.mean(vals1) - np.mean(vals2) < 0 and pval < 0.05/48:
print(k, method, n_train, len(vals1), len(vals2), np.mean(vals1) - np.mean(vals2), pval)
print("L2 better than L1")
for method in ['cc', 'pcc', 'acc', 'cshift', 'platt', 'cal']:
for k in keys:
df = datasets[k]
n_train_vals = list(set(df['n_train'].values))
n_train_vals.sort()
for n_train in n_train_vals:
vals1 = df[(df.Penalty == 'l1') & (df.n_train == n_train) & (df.method == method)].values[0, 4:]
vals2 = df[(df.Penalty == 'l2') & (df.n_train == n_train) & (df.method == method)].values[0, 4:]
test_result = wilcoxon(vals1, vals2)
pval = test_result[1]
if np.mean(vals1) - np.mean(vals2) > 0 and pval < 0.05/48:
print(k, method, n_train, len(vals1), len(vals2), np.mean(vals1) - np.mean(vals2), pval)
if __name__ == '__main__':
main() | 0.238196 | 0.237046 |
import numpy as np
import os
from functools import partial
try:
import nibabel as nib
import cifti
except ModuleNotFoundError:
raise Exception('Please install nibabel and cifti in your work station')
def load_brainimg(imgpath, ismask=False):
"""
Load brain image identified by its suffix.
The supporting suffixes are as follows:
Nifti: .nii.gz
freesurfer: .mgz, .mgh
gifti: .func.gii, .shape.gii
cifti: .dscalar.nii, .dlabel.nii, .dtseries.nii
Parameters
----------
imgpath : str
Brain image data path
Returns
-------
brain_img : array
Data of brain image
header : header
Header of brain image
"""
imgname = os.path.basename(imgpath)
if imgname.endswith('dscalar.nii') or imgname.endswith('dlabel.nii') or imgname.endswith('dtseries.nii'):
brain_img, header = cifti.read(imgpath)
if not ismask:
brain_img = brain_img[...,None,None]
else:
brain_img = brain_img[...,None]
elif ('nii.gz' in imgname) or (imgname.split('.')[-1]=='nii'):
brain_img = nib.load(imgpath).get_data()
if not ismask:
brain_img = np.transpose(brain_img,(3,0,1,2))
header = nib.load(imgpath).header
elif imgname.endswith('mgz') or imgname.endswith('mgh'):
brain_img = nib.freesurfer.load(imgpath).get_data()
if not ismask:
if brain_img.ndim == 3:
brain_img = brain_img[...,None]
brain_img = np.transpose(brain_img, (3,0,1,2))
header = nib.freesurfer.load(imgpath).header
elif imgname.endswith('gii'):
assert not imgname.endswith('surf.gii'), "surf.gii is a geometry file, not an array activation."
brain_img = nib.load(imgpath).darrays[0].data
if not ismask:
brain_img = brain_img[None,:,None,None]
else:
brain_img = brain_img[None,:,None]
header = nib.load(imgpath).header
else:
raise Exception('Not support this format of brain image data, please contact with author to update this function.')
return brain_img, header
def save_brainimg(imgpath, data, header):
"""
Save brain image identified by its suffix.
The supporting suffixes are as follows:
Nifti: .nii.gz
freesurfer: .mgz, .mgh
cifti: .dscalar.nii, .dlabel.nii, .dtseries.nii
Note that due to ways to store gifti image are differ from other images,
we didn't support to save data as a gifti image.
Parameters
----------
imgpath : str
Brain image path to be saved
data : ndarray
Brain image data matrix
header : header
Brain image header
"""
imgname = os.path.basename(imgpath)
imgsuffix = imgname.split('.')[1:]
assert len(imgsuffix)<4, "Please rename your brain image file for too many . in your filename."
imgsuffix = '.'.join(imgsuffix)
if imgsuffix == 'nii.gz':
data = np.transpose(data, (1, 2, 3, 0))
outimg = nib.Nifti1Image(data, None, header)
nib.save(outimg, imgpath)
elif imgsuffix == 'mgz' or imgsuffix == 'mgh':
data = np.transpose(data, (1, 2, 3, 0))
outimg = nib.MGHImage(data, None, header)
nib.save(outimg, imgpath)
elif imgsuffix == 'dscalar.nii' or imgsuffix == 'dlabel.nii' or imgsuffix == 'dtseries.nii':
data = data[..., 0, 0]
map_name = ['']*data.shape[0]
bm_full = header[1]
cifti.write(imgpath, data, (cifti.Scalar.from_names(map_name), bm_full))
else:
raise Exception('Not support this format of brain image data, please contact with author to update this function.')
def extract_brain_activation(brainimg, mask, roilabels, method='mean'):
"""
Extract brain activation from ROI.
Parameters
----------
brainimg : array
A 4D brain image array with the first dimension correspond to pictures and the rest 3D correspond to brain images
mask : array
A 3D brain image array with the same size as the rest 3D of brainimg.
roilabels : list, array
ROI labels
method : str
Method to integrate activation from each ROI, by default is 'mean'.
Returns
-------
roisignals : list
Extracted brain activation.
Each element in the list is the extracted activation of the roilabels.
Due to different label may contain different number of activation voxels,
the output activation could not stored as numpy array list.
"""
if method == 'mean':
calc_way = partial(np.mean, axis=1)
elif method == 'std':
calc_way = partial(np.std, axis=1)
elif method == 'max':
calc_way = partial(np.max, axis=1)
elif method == 'voxel':
calc_way = np.array
else:
raise Exception('We haven''t support this method, please contact authors to implement.')
assert brainimg.shape[1:] == mask.shape, "brainimg and mask are mismatched."
roisignals = []
for i, lbl in enumerate(roilabels):
roisignals.append(calc_way(brainimg[:, mask==lbl]))
return roisignals | dnnbrain/brain/io.py | import numpy as np
import os
from functools import partial
try:
import nibabel as nib
import cifti
except ModuleNotFoundError:
raise Exception('Please install nibabel and cifti in your work station')
def load_brainimg(imgpath, ismask=False):
"""
Load brain image identified by its suffix.
The supporting suffixes are as follows:
Nifti: .nii.gz
freesurfer: .mgz, .mgh
gifti: .func.gii, .shape.gii
cifti: .dscalar.nii, .dlabel.nii, .dtseries.nii
Parameters
----------
imgpath : str
Brain image data path
Returns
-------
brain_img : array
Data of brain image
header : header
Header of brain image
"""
imgname = os.path.basename(imgpath)
if imgname.endswith('dscalar.nii') or imgname.endswith('dlabel.nii') or imgname.endswith('dtseries.nii'):
brain_img, header = cifti.read(imgpath)
if not ismask:
brain_img = brain_img[...,None,None]
else:
brain_img = brain_img[...,None]
elif ('nii.gz' in imgname) or (imgname.split('.')[-1]=='nii'):
brain_img = nib.load(imgpath).get_data()
if not ismask:
brain_img = np.transpose(brain_img,(3,0,1,2))
header = nib.load(imgpath).header
elif imgname.endswith('mgz') or imgname.endswith('mgh'):
brain_img = nib.freesurfer.load(imgpath).get_data()
if not ismask:
if brain_img.ndim == 3:
brain_img = brain_img[...,None]
brain_img = np.transpose(brain_img, (3,0,1,2))
header = nib.freesurfer.load(imgpath).header
elif imgname.endswith('gii'):
assert not imgname.endswith('surf.gii'), "surf.gii is a geometry file, not an array activation."
brain_img = nib.load(imgpath).darrays[0].data
if not ismask:
brain_img = brain_img[None,:,None,None]
else:
brain_img = brain_img[None,:,None]
header = nib.load(imgpath).header
else:
raise Exception('Not support this format of brain image data, please contact with author to update this function.')
return brain_img, header
def save_brainimg(imgpath, data, header):
"""
Save brain image identified by its suffix.
The supporting suffixes are as follows:
Nifti: .nii.gz
freesurfer: .mgz, .mgh
cifti: .dscalar.nii, .dlabel.nii, .dtseries.nii
Note that due to ways to store gifti image are differ from other images,
we didn't support to save data as a gifti image.
Parameters
----------
imgpath : str
Brain image path to be saved
data : ndarray
Brain image data matrix
header : header
Brain image header
"""
imgname = os.path.basename(imgpath)
imgsuffix = imgname.split('.')[1:]
assert len(imgsuffix)<4, "Please rename your brain image file for too many . in your filename."
imgsuffix = '.'.join(imgsuffix)
if imgsuffix == 'nii.gz':
data = np.transpose(data, (1, 2, 3, 0))
outimg = nib.Nifti1Image(data, None, header)
nib.save(outimg, imgpath)
elif imgsuffix == 'mgz' or imgsuffix == 'mgh':
data = np.transpose(data, (1, 2, 3, 0))
outimg = nib.MGHImage(data, None, header)
nib.save(outimg, imgpath)
elif imgsuffix == 'dscalar.nii' or imgsuffix == 'dlabel.nii' or imgsuffix == 'dtseries.nii':
data = data[..., 0, 0]
map_name = ['']*data.shape[0]
bm_full = header[1]
cifti.write(imgpath, data, (cifti.Scalar.from_names(map_name), bm_full))
else:
raise Exception('Not support this format of brain image data, please contact with author to update this function.')
def extract_brain_activation(brainimg, mask, roilabels, method='mean'):
"""
Extract brain activation from ROI.
Parameters
----------
brainimg : array
A 4D brain image array with the first dimension correspond to pictures and the rest 3D correspond to brain images
mask : array
A 3D brain image array with the same size as the rest 3D of brainimg.
roilabels : list, array
ROI labels
method : str
Method to integrate activation from each ROI, by default is 'mean'.
Returns
-------
roisignals : list
Extracted brain activation.
Each element in the list is the extracted activation of the roilabels.
Due to different label may contain different number of activation voxels,
the output activation could not stored as numpy array list.
"""
if method == 'mean':
calc_way = partial(np.mean, axis=1)
elif method == 'std':
calc_way = partial(np.std, axis=1)
elif method == 'max':
calc_way = partial(np.max, axis=1)
elif method == 'voxel':
calc_way = np.array
else:
raise Exception('We haven''t support this method, please contact authors to implement.')
assert brainimg.shape[1:] == mask.shape, "brainimg and mask are mismatched."
roisignals = []
for i, lbl in enumerate(roilabels):
roisignals.append(calc_way(brainimg[:, mask==lbl]))
return roisignals | 0.534612 | 0.359955 |
from parcellearning.conv.pairconv import PAIRConv
from parcellearning.conv.gatconv import GATConv
import numpy as np
import dgl
from dgl import data
from dgl.data import DGLDataset
import dgl.function as fn
from dgl.nn.pytorch import edge_softmax
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Linear
class PAIRGAT(nn.Module):
"""
Instantiate a pairwise-similarity graph nttention network model.
Parameters:
- - - - -
num_layers: int
number of layers in network
in_dim: int
input feature dimension
num_hidden: int
number of nodes per hidden layer
num_classes: int
number of output classes
num_heads: list of length (2)
number of independent heads per layer (multi-head attention mechanisms)
num_heads[0] = hidden heads
num_heads[1] = output heads
activation:
feat_drop: float
layer-wise dropout rate [0,1]
attn_drop: float
mechanism-wise dropout rate [0,1]
negative_slope:
negative slope of leaky ReLU
residual:
use residual connection
"""
def __init__(self,
num_layers,
in_dim,
num_hidden,
num_classes,
num_heads,
activation,
feat_drop,
attn_drop,
negative_slope=0.2,
residual=False,
allow_zero_in_degree=True,
return_attention=False):
super(PAIRGAT, self).__init__()
self.num_layers = num_layers
self.num_hidden = num_hidden
self.num_heads = num_heads[0]
self.num_out_heads = num_heads[-1]
self.layers = nn.ModuleList()
self.activation = activation
self.return_attention = return_attention
# input layer
self.layers.append(PAIRConv(in_feats=in_dim,
out_feats=self.num_hidden,
num_heads=self.num_heads,
feat_drop=feat_drop,
attn_drop=attn_drop,
negative_slope=negative_slope,
activation=activation,
allow_zero_in_degree=allow_zero_in_degree,
return_attention=False))
# hidden layers
for l in range(1, num_layers):
self.layers.append(PAIRConv(in_feats=(num_hidden+1) * self.num_heads,
out_feats=self.num_hidden,
num_heads=self.num_heads,
feat_drop=feat_drop,
attn_drop=attn_drop,
negative_slope=negative_slope,
activation=activation,
allow_zero_in_degree=allow_zero_in_degree,
return_attention=False))
# output layer
self.layers.append(Linear((num_hidden+1) * self.num_heads, num_classes, bias=True))
print(self.layers)
def forward(self, g=None, inputs=None, **kwds):
"""
Parameters:
- - - - -
g: DGL Graph
the graph
inputs: tensor
node features
Returns:
- - - - -
logits: tensor
output layer
"""
h = inputs
for l in range(self.num_layers-1):
h = self.layers[l](g, h).flatten(1)
h = h.flatten(1)
# output projection
logits = self.layers[-1](h)
return logits
def save(self, filename):
"""
"""
torch.save(self.state_dict(), filename) | parcellearning/pairgat/pairgat.py | from parcellearning.conv.pairconv import PAIRConv
from parcellearning.conv.gatconv import GATConv
import numpy as np
import dgl
from dgl import data
from dgl.data import DGLDataset
import dgl.function as fn
from dgl.nn.pytorch import edge_softmax
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Linear
class PAIRGAT(nn.Module):
"""
Instantiate a pairwise-similarity graph nttention network model.
Parameters:
- - - - -
num_layers: int
number of layers in network
in_dim: int
input feature dimension
num_hidden: int
number of nodes per hidden layer
num_classes: int
number of output classes
num_heads: list of length (2)
number of independent heads per layer (multi-head attention mechanisms)
num_heads[0] = hidden heads
num_heads[1] = output heads
activation:
feat_drop: float
layer-wise dropout rate [0,1]
attn_drop: float
mechanism-wise dropout rate [0,1]
negative_slope:
negative slope of leaky ReLU
residual:
use residual connection
"""
def __init__(self,
num_layers,
in_dim,
num_hidden,
num_classes,
num_heads,
activation,
feat_drop,
attn_drop,
negative_slope=0.2,
residual=False,
allow_zero_in_degree=True,
return_attention=False):
super(PAIRGAT, self).__init__()
self.num_layers = num_layers
self.num_hidden = num_hidden
self.num_heads = num_heads[0]
self.num_out_heads = num_heads[-1]
self.layers = nn.ModuleList()
self.activation = activation
self.return_attention = return_attention
# input layer
self.layers.append(PAIRConv(in_feats=in_dim,
out_feats=self.num_hidden,
num_heads=self.num_heads,
feat_drop=feat_drop,
attn_drop=attn_drop,
negative_slope=negative_slope,
activation=activation,
allow_zero_in_degree=allow_zero_in_degree,
return_attention=False))
# hidden layers
for l in range(1, num_layers):
self.layers.append(PAIRConv(in_feats=(num_hidden+1) * self.num_heads,
out_feats=self.num_hidden,
num_heads=self.num_heads,
feat_drop=feat_drop,
attn_drop=attn_drop,
negative_slope=negative_slope,
activation=activation,
allow_zero_in_degree=allow_zero_in_degree,
return_attention=False))
# output layer
self.layers.append(Linear((num_hidden+1) * self.num_heads, num_classes, bias=True))
print(self.layers)
def forward(self, g=None, inputs=None, **kwds):
"""
Parameters:
- - - - -
g: DGL Graph
the graph
inputs: tensor
node features
Returns:
- - - - -
logits: tensor
output layer
"""
h = inputs
for l in range(self.num_layers-1):
h = self.layers[l](g, h).flatten(1)
h = h.flatten(1)
# output projection
logits = self.layers[-1](h)
return logits
def save(self, filename):
"""
"""
torch.save(self.state_dict(), filename) | 0.911967 | 0.499268 |
import networkx as nx
from collections import OrderedDict
from ontology_processing.graph_creation.ontology_processing_utils import (
get_source_types,
)
class ProcessMyths:
"""
Climate Myths are shown to the end user. Some myths are attached to a climate
solution and others are common myths not attached to a solution. Both need to
be added to the NetworkX object for use by the API.
"""
def __init__(self, G):
self.G = G
self.source_types = get_source_types()
self.general_myths = None
def process_myths(self, subgraph_downstream_adaptations, nodes_upstream_greenhouse_effect):
"""
Structures myth data in NetworkX object to be easier for API use.
"""
general_myths = list()
all_myths = list(nx.get_node_attributes(self.G, "myth").keys())
for myth in all_myths:
node_neighbors = self.G.neighbors(myth)
for neighbor in node_neighbors:
if self.G[myth][neighbor]["type"] == "is_a_myth_about":
impact_myths = []
if "risk solution" in self.G.nodes[neighbor].keys():
if "solution myths" not in self.G.nodes[neighbor].keys():
solution_myths = []
else:
solution_myths = self.G.nodes[neighbor]["solution myths"]
solution_myths.append(myth)
nx.set_node_attributes(
self.G, {neighbor: solution_myths}, "solution myths"
)
if subgraph_downstream_adaptations.has_node(neighbor):
if "impact myths" not in self.G.nodes[neighbor].keys():
impact_myths = []
else:
impact_myths = self.G.nodes[neighbor]["impact myths"]
impact_myths.append(myth)
nx.set_node_attributes(self.G, {neighbor: impact_myths}, "impact myths")
if neighbor in nodes_upstream_greenhouse_effect:
general_myths.append(myth)
self.add_myth_sources(myth)
# get unique general myths
self.general_myths = list(dict.fromkeys(general_myths))
self.sort_myths()
def add_myth_sources(self, myth):
"""
Process myth sources into nice field called 'myth sources'
with only unique urls from any source type
"""
myth_sources = list()
for source_type in self.source_types:
if (
"properties" in self.G.nodes[myth]
and source_type in self.G.nodes[myth]["properties"]
):
myth_sources.extend(self.G.nodes[myth]["properties"][source_type])
myth_sources = list(
OrderedDict.fromkeys(myth_sources)
) # removes any duplicates while preserving order
nx.set_node_attributes(
self.G,
{myth: myth_sources},
"myth sources",
)
def sort_myths(self):
"""
Sort the myths by popularity (skeptical science)
"""
general_myths_dict = dict()
for myth in self.general_myths:
general_myths_dict[myth] = self.G.nodes[myth]["data_properties"]["myth_frequency"]
general_myths_sorted = sorted(
general_myths_dict,
key=general_myths_dict.get,
reverse=True,
)
self.general_myths = general_myths_sorted
def add_general_myths(self):
"""
Update the networkx object to have a 'general myths' field and
include in it all nodes from mitigation_solutions
"""
nx.set_node_attributes(
self.G,
{"increase in greenhouse effect": self.general_myths},
"general myths",
)
def get_graph(self):
return self.G | ontology_processing/graph_creation/process_myths.py | import networkx as nx
from collections import OrderedDict
from ontology_processing.graph_creation.ontology_processing_utils import (
get_source_types,
)
class ProcessMyths:
"""
Climate Myths are shown to the end user. Some myths are attached to a climate
solution and others are common myths not attached to a solution. Both need to
be added to the NetworkX object for use by the API.
"""
def __init__(self, G):
self.G = G
self.source_types = get_source_types()
self.general_myths = None
def process_myths(self, subgraph_downstream_adaptations, nodes_upstream_greenhouse_effect):
"""
Structures myth data in NetworkX object to be easier for API use.
"""
general_myths = list()
all_myths = list(nx.get_node_attributes(self.G, "myth").keys())
for myth in all_myths:
node_neighbors = self.G.neighbors(myth)
for neighbor in node_neighbors:
if self.G[myth][neighbor]["type"] == "is_a_myth_about":
impact_myths = []
if "risk solution" in self.G.nodes[neighbor].keys():
if "solution myths" not in self.G.nodes[neighbor].keys():
solution_myths = []
else:
solution_myths = self.G.nodes[neighbor]["solution myths"]
solution_myths.append(myth)
nx.set_node_attributes(
self.G, {neighbor: solution_myths}, "solution myths"
)
if subgraph_downstream_adaptations.has_node(neighbor):
if "impact myths" not in self.G.nodes[neighbor].keys():
impact_myths = []
else:
impact_myths = self.G.nodes[neighbor]["impact myths"]
impact_myths.append(myth)
nx.set_node_attributes(self.G, {neighbor: impact_myths}, "impact myths")
if neighbor in nodes_upstream_greenhouse_effect:
general_myths.append(myth)
self.add_myth_sources(myth)
# get unique general myths
self.general_myths = list(dict.fromkeys(general_myths))
self.sort_myths()
def add_myth_sources(self, myth):
"""
Process myth sources into nice field called 'myth sources'
with only unique urls from any source type
"""
myth_sources = list()
for source_type in self.source_types:
if (
"properties" in self.G.nodes[myth]
and source_type in self.G.nodes[myth]["properties"]
):
myth_sources.extend(self.G.nodes[myth]["properties"][source_type])
myth_sources = list(
OrderedDict.fromkeys(myth_sources)
) # removes any duplicates while preserving order
nx.set_node_attributes(
self.G,
{myth: myth_sources},
"myth sources",
)
def sort_myths(self):
"""
Sort the myths by popularity (skeptical science)
"""
general_myths_dict = dict()
for myth in self.general_myths:
general_myths_dict[myth] = self.G.nodes[myth]["data_properties"]["myth_frequency"]
general_myths_sorted = sorted(
general_myths_dict,
key=general_myths_dict.get,
reverse=True,
)
self.general_myths = general_myths_sorted
def add_general_myths(self):
"""
Update the networkx object to have a 'general myths' field and
include in it all nodes from mitigation_solutions
"""
nx.set_node_attributes(
self.G,
{"increase in greenhouse effect": self.general_myths},
"general myths",
)
def get_graph(self):
return self.G | 0.705988 | 0.394172 |
import pytest
from dart_fss.fs.extract import find_all_columns
from dart_fss.utils import str_compare
class TestCrp(object):
def __init__(self, corp_code, bgn_de, separate, report_tp, end_de=None):
self.corp = None
if corp_code:
self.corp_code = corp_code
else:
pytest.fail('The parameter should be initialized: corp_code')
self.bgn_de = bgn_de
self.end_de = end_de
self.separate = separate
self.report_tp = report_tp
self.test_set = []
def set_corp_list(self, corp_list):
if self.corp_code:
self.corp = corp_list.find_by_corp_code(self.corp_code)
def add_test_value(self, fs_tp, date, column, item, expected):
test_set = {
'fs_tp': fs_tp,
'date': date,
'column': column,
'item': item,
'expected': expected
}
self.test_set.append(test_set)
def run_test(self):
if self.corp is None:
pytest.fail('The corp_list should be initialized')
fs = self.corp.extract_fs(bgn_de=self.bgn_de, end_de=self.end_de,
separate=self.separate, report_tp=self.report_tp)
for test in self.test_set:
tp = test['fs_tp']
date = test['date']
column = test['column']
item = test['item']
expected = test['expected']
df = fs[tp]
date_column = find_all_columns(df=df, query=date)[0]
label_column = find_all_columns(df=df, query=column)[0]
actual = None
for idx in range(len(df)):
text = df[label_column].iloc[idx].replace(' ', '')
if str_compare(text, item):
actual = df[date_column].iloc[idx]
if actual != expected:
pytest.fail("Test failed: corp_code='{}', ".format(self.corp.corp_code) +
"corp_name='{}', fs_tp='{}', ".format(self.corp.corp_name, tp) +
"start_dt='{}', report_tp='{}', ".format(self.bgn_de, fs.info['report_tp']) +
"date='{}', column='{}',".format(date, column) +
"item='{}', actual='{}', expected='{}'".format(item, actual, expected)) | dart_fss/tests/test_case/testcrp.py | import pytest
from dart_fss.fs.extract import find_all_columns
from dart_fss.utils import str_compare
class TestCrp(object):
def __init__(self, corp_code, bgn_de, separate, report_tp, end_de=None):
self.corp = None
if corp_code:
self.corp_code = corp_code
else:
pytest.fail('The parameter should be initialized: corp_code')
self.bgn_de = bgn_de
self.end_de = end_de
self.separate = separate
self.report_tp = report_tp
self.test_set = []
def set_corp_list(self, corp_list):
if self.corp_code:
self.corp = corp_list.find_by_corp_code(self.corp_code)
def add_test_value(self, fs_tp, date, column, item, expected):
test_set = {
'fs_tp': fs_tp,
'date': date,
'column': column,
'item': item,
'expected': expected
}
self.test_set.append(test_set)
def run_test(self):
if self.corp is None:
pytest.fail('The corp_list should be initialized')
fs = self.corp.extract_fs(bgn_de=self.bgn_de, end_de=self.end_de,
separate=self.separate, report_tp=self.report_tp)
for test in self.test_set:
tp = test['fs_tp']
date = test['date']
column = test['column']
item = test['item']
expected = test['expected']
df = fs[tp]
date_column = find_all_columns(df=df, query=date)[0]
label_column = find_all_columns(df=df, query=column)[0]
actual = None
for idx in range(len(df)):
text = df[label_column].iloc[idx].replace(' ', '')
if str_compare(text, item):
actual = df[date_column].iloc[idx]
if actual != expected:
pytest.fail("Test failed: corp_code='{}', ".format(self.corp.corp_code) +
"corp_name='{}', fs_tp='{}', ".format(self.corp.corp_name, tp) +
"start_dt='{}', report_tp='{}', ".format(self.bgn_de, fs.info['report_tp']) +
"date='{}', column='{}',".format(date, column) +
"item='{}', actual='{}', expected='{}'".format(item, actual, expected)) | 0.395018 | 0.294576 |
import random
import boto3
from base import ProxyRotator
class AWSCommand(object):
'''Class encapsulating the aws ec2 API'''
def __init__(self, config=None):
self.ec2 = boto3.resource('ec2')
self.config = config
def create_ec2(self, **params):
return self.ec2.create_instances(MaxCount=1, MinCount=1, **params)[0]
def get_proxies(self):
proxies = []
filters=[
{'Name':'image-id', 'Values':[self.config.aws_image_id]},
{'Name': 'instance-state-name', 'Values': ['running']}
]
for instance in self.ec2.instances.filter(Filters=filters):
proxies.append(','.join([instance.public_ip_address, '0', instance.id,'0','0']))
return proxies
def delete_ec2(self, instance_id):
instance = self.ec2.Instance(instance_id)
instance.terminate()
instance.wait_until_terminated()
class AwsProxyRotator(ProxyRotator):
""" AWS implementation of ProxyRotator """
def __init__(self, cfg='proxy.conf', test_mode=False, rotate=False, region=None):
super(AwsProxyRotator, self).__init__(cfg, test_mode, rotate, region)
#AWS resource manager
self.aws_command = AWSCommand(config=self.config)
self.vps_command = self.aws_command
def delete_instance(self, instance_id):
""" Delete instance by id """
return self.aws_command.delete_ec2(instance_id)
def make_new_instance(self, region=None, test=False, verbose=False):
# If calling as test, make up an ip
if test:
return '.'.join(map(lambda x: str(random.randrange(20, 100)), range(4))), random.randrange(10000,
50000)
params = dict(ImageId=self.config.aws_image_id,
InstanceType=self.config.aws_instance_type,
KeyName=self.config.aws_key_name,
SecurityGroupIds=self.config.aws_security_groups,
SubnetId=self.config.aws_subnet_id ,
DryRun=True)
print 'Making new ec2...'
ec2_instance = self.aws_command.create_ec2(**params)
ec2_instance.wait_until_running()
time.sleep(10)
ip = ec2_instance.public_ip_address
pid = ec2_instance.id
# Post process the host
print 'Post-processing',ip,'...'
self.post_process(ip)
return ip, pid
def drop(self):
""" Drop all instances in current configuration (except the LB) """
print 'Dropping all proxies ...'
proxies = self.aws_command.get_proxies()
for item in proxies:
ip,_,instance_id = item.split(',')
print '\tDropping ec2',instance_id,'with IP',ip,'...'
self.aws_command.delete_ec2(instance_id) | aws.py | import random
import boto3
from base import ProxyRotator
class AWSCommand(object):
'''Class encapsulating the aws ec2 API'''
def __init__(self, config=None):
self.ec2 = boto3.resource('ec2')
self.config = config
def create_ec2(self, **params):
return self.ec2.create_instances(MaxCount=1, MinCount=1, **params)[0]
def get_proxies(self):
proxies = []
filters=[
{'Name':'image-id', 'Values':[self.config.aws_image_id]},
{'Name': 'instance-state-name', 'Values': ['running']}
]
for instance in self.ec2.instances.filter(Filters=filters):
proxies.append(','.join([instance.public_ip_address, '0', instance.id,'0','0']))
return proxies
def delete_ec2(self, instance_id):
instance = self.ec2.Instance(instance_id)
instance.terminate()
instance.wait_until_terminated()
class AwsProxyRotator(ProxyRotator):
""" AWS implementation of ProxyRotator """
def __init__(self, cfg='proxy.conf', test_mode=False, rotate=False, region=None):
super(AwsProxyRotator, self).__init__(cfg, test_mode, rotate, region)
#AWS resource manager
self.aws_command = AWSCommand(config=self.config)
self.vps_command = self.aws_command
def delete_instance(self, instance_id):
""" Delete instance by id """
return self.aws_command.delete_ec2(instance_id)
def make_new_instance(self, region=None, test=False, verbose=False):
# If calling as test, make up an ip
if test:
return '.'.join(map(lambda x: str(random.randrange(20, 100)), range(4))), random.randrange(10000,
50000)
params = dict(ImageId=self.config.aws_image_id,
InstanceType=self.config.aws_instance_type,
KeyName=self.config.aws_key_name,
SecurityGroupIds=self.config.aws_security_groups,
SubnetId=self.config.aws_subnet_id ,
DryRun=True)
print 'Making new ec2...'
ec2_instance = self.aws_command.create_ec2(**params)
ec2_instance.wait_until_running()
time.sleep(10)
ip = ec2_instance.public_ip_address
pid = ec2_instance.id
# Post process the host
print 'Post-processing',ip,'...'
self.post_process(ip)
return ip, pid
def drop(self):
""" Drop all instances in current configuration (except the LB) """
print 'Dropping all proxies ...'
proxies = self.aws_command.get_proxies()
for item in proxies:
ip,_,instance_id = item.split(',')
print '\tDropping ec2',instance_id,'with IP',ip,'...'
self.aws_command.delete_ec2(instance_id) | 0.525125 | 0.095687 |
from java.io import File
from java.lang import Class
from java.lang import ClassNotFoundException
from java.lang import Double
from java.lang import Long
from java.sql import Connection
from java.sql import DriverManager
from java.sql import ResultSet
from java.sql import SQLException
from java.sql import Statement
from java.util.logging import Level
from java.util import ArrayList
from org.sleuthkit.autopsy.casemodule import Case
from org.sleuthkit.autopsy.casemodule.services import FileManager
from org.sleuthkit.autopsy.coreutils import Logger
from org.sleuthkit.autopsy.coreutils import MessageNotifyUtil
from org.sleuthkit.autopsy.datamodel import ContentUtils
from org.sleuthkit.autopsy.ingest import IngestJobContext
from org.sleuthkit.datamodel import AbstractFile
from org.sleuthkit.datamodel import Blackboard
from org.sleuthkit.datamodel import BlackboardArtifact
from org.sleuthkit.datamodel import BlackboardAttribute
from org.sleuthkit.datamodel import Content
from org.sleuthkit.datamodel import TskCoreException
import traceback
import general
"""
Analyzes database created by browser that stores GEO location info.
"""
class BrowserLocationAnalyzer(general.AndroidComponentAnalyzer):
def __init__(self):
self._logger = Logger.getLogger(self.__class__.__name__)
def analyze(self, dataSource, fileManager, context):
try:
abstractFiles = fileManager.findFiles(dataSource, "CachedGeoposition%.db")
for abstractFile in abstractFiles:
if abstractFile.getSize() == 0:
continue
try:
jFile = File(Case.getCurrentCase().getTempDirectory(), str(abstractFile.getId()) + abstractFile.getName())
ContentUtils.writeToFile(abstractFile, jFile, context.dataSourceIngestIsCancelled)
self.__findGeoLocationsInDB(jFile.toString(), abstractFile)
except Exception as ex:
self._logger.log(Level.SEVERE, "Error parsing browser location files", ex)
self._logger.log(Level.SEVERE, traceback.format_exc())
except TskCoreException as ex:
# Error finding browser location files.
pass
def __findGeoLocationsInDB(self, databasePath, abstractFile):
if not databasePath:
return
try:
Class.forName("org.sqlite.JDBC") #load JDBC driver
connection = DriverManager.getConnection("jdbc:sqlite:" + databasePath)
statement = connection.createStatement()
except (ClassNotFoundException) as ex:
self._logger.log(Level.SEVERE, "Error loading JDBC driver", ex)
self._logger.log(Level.SEVERE, traceback.format_exc())
return
except (SQLException) as ex:
# Error connecting to SQL databse.
return
resultSet = None
try:
resultSet = statement.executeQuery("SELECT timestamp, latitude, longitude, accuracy FROM CachedPosition;")
while resultSet.next():
timestamp = Long.valueOf(resultSet.getString("timestamp")) / 1000
latitude = Double.valueOf(resultSet.getString("latitude"))
longitude = Double.valueOf(resultSet.getString("longitude"))
attributes = ArrayList()
attributes.add(BlackboardAttribute(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_GEO_LATITUDE, general.MODULE_NAME, latitude))
attributes.add(BlackboardAttribute(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_GEO_LONGITUDE, general.MODULE_NAME, longitude))
attributes.add(BlackboardAttribute(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_DATETIME, general.MODULE_NAME, timestamp))
attributes.add(BlackboardAttribute(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_PROG_NAME, general.MODULE_NAME, "Browser Location History"))
artifact = abstractFile.newDataArtifact(BlackboardArtifact.Type(BlackboardArtifact.ARTIFACT_TYPE.TSK_GPS_BOOKMARK), attributes)
# artifact.addAttribute(BlackboardAttribute(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_VALUE.getTypeID(),moduleName, accuracy))
# NOTE: originally commented out
try:
blackboard = Case.getCurrentCase().getSleuthkitCase().getBlackboard()
blackboard.postArtifact(artifact, general.MODULE_NAME, context.getJobId())
except Blackboard.BlackboardException as ex:
self._logger.log(Level.SEVERE, "Unable to index blackboard artifact " + str(artifact.getArtifactTypeName()), ex)
self._logger.log(Level.SEVERE, traceback.format_exc())
MessageNotifyUtil.Notify.error("Failed to index GPS trackpoint artifact for keyword search.", artifact.getDisplayName())
except SQLException as ex:
# Unable to execute browser location SQL query against database.
pass
except Exception as ex:
self._logger.log(Level.SEVERE, "Error processing browser location history.", ex)
self._logger.log(Level.SEVERE, traceback.format_exc())
finally:
try:
if resultSet is not None:
resultSet.close()
statement.close()
connection.close()
except Exception as ex:
# Error closing database.
pass | InternalPythonModules/android/browserlocation.py | from java.io import File
from java.lang import Class
from java.lang import ClassNotFoundException
from java.lang import Double
from java.lang import Long
from java.sql import Connection
from java.sql import DriverManager
from java.sql import ResultSet
from java.sql import SQLException
from java.sql import Statement
from java.util.logging import Level
from java.util import ArrayList
from org.sleuthkit.autopsy.casemodule import Case
from org.sleuthkit.autopsy.casemodule.services import FileManager
from org.sleuthkit.autopsy.coreutils import Logger
from org.sleuthkit.autopsy.coreutils import MessageNotifyUtil
from org.sleuthkit.autopsy.datamodel import ContentUtils
from org.sleuthkit.autopsy.ingest import IngestJobContext
from org.sleuthkit.datamodel import AbstractFile
from org.sleuthkit.datamodel import Blackboard
from org.sleuthkit.datamodel import BlackboardArtifact
from org.sleuthkit.datamodel import BlackboardAttribute
from org.sleuthkit.datamodel import Content
from org.sleuthkit.datamodel import TskCoreException
import traceback
import general
"""
Analyzes database created by browser that stores GEO location info.
"""
class BrowserLocationAnalyzer(general.AndroidComponentAnalyzer):
def __init__(self):
self._logger = Logger.getLogger(self.__class__.__name__)
def analyze(self, dataSource, fileManager, context):
try:
abstractFiles = fileManager.findFiles(dataSource, "CachedGeoposition%.db")
for abstractFile in abstractFiles:
if abstractFile.getSize() == 0:
continue
try:
jFile = File(Case.getCurrentCase().getTempDirectory(), str(abstractFile.getId()) + abstractFile.getName())
ContentUtils.writeToFile(abstractFile, jFile, context.dataSourceIngestIsCancelled)
self.__findGeoLocationsInDB(jFile.toString(), abstractFile)
except Exception as ex:
self._logger.log(Level.SEVERE, "Error parsing browser location files", ex)
self._logger.log(Level.SEVERE, traceback.format_exc())
except TskCoreException as ex:
# Error finding browser location files.
pass
def __findGeoLocationsInDB(self, databasePath, abstractFile):
if not databasePath:
return
try:
Class.forName("org.sqlite.JDBC") #load JDBC driver
connection = DriverManager.getConnection("jdbc:sqlite:" + databasePath)
statement = connection.createStatement()
except (ClassNotFoundException) as ex:
self._logger.log(Level.SEVERE, "Error loading JDBC driver", ex)
self._logger.log(Level.SEVERE, traceback.format_exc())
return
except (SQLException) as ex:
# Error connecting to SQL databse.
return
resultSet = None
try:
resultSet = statement.executeQuery("SELECT timestamp, latitude, longitude, accuracy FROM CachedPosition;")
while resultSet.next():
timestamp = Long.valueOf(resultSet.getString("timestamp")) / 1000
latitude = Double.valueOf(resultSet.getString("latitude"))
longitude = Double.valueOf(resultSet.getString("longitude"))
attributes = ArrayList()
attributes.add(BlackboardAttribute(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_GEO_LATITUDE, general.MODULE_NAME, latitude))
attributes.add(BlackboardAttribute(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_GEO_LONGITUDE, general.MODULE_NAME, longitude))
attributes.add(BlackboardAttribute(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_DATETIME, general.MODULE_NAME, timestamp))
attributes.add(BlackboardAttribute(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_PROG_NAME, general.MODULE_NAME, "Browser Location History"))
artifact = abstractFile.newDataArtifact(BlackboardArtifact.Type(BlackboardArtifact.ARTIFACT_TYPE.TSK_GPS_BOOKMARK), attributes)
# artifact.addAttribute(BlackboardAttribute(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_VALUE.getTypeID(),moduleName, accuracy))
# NOTE: originally commented out
try:
blackboard = Case.getCurrentCase().getSleuthkitCase().getBlackboard()
blackboard.postArtifact(artifact, general.MODULE_NAME, context.getJobId())
except Blackboard.BlackboardException as ex:
self._logger.log(Level.SEVERE, "Unable to index blackboard artifact " + str(artifact.getArtifactTypeName()), ex)
self._logger.log(Level.SEVERE, traceback.format_exc())
MessageNotifyUtil.Notify.error("Failed to index GPS trackpoint artifact for keyword search.", artifact.getDisplayName())
except SQLException as ex:
# Unable to execute browser location SQL query against database.
pass
except Exception as ex:
self._logger.log(Level.SEVERE, "Error processing browser location history.", ex)
self._logger.log(Level.SEVERE, traceback.format_exc())
finally:
try:
if resultSet is not None:
resultSet.close()
statement.close()
connection.close()
except Exception as ex:
# Error closing database.
pass | 0.653901 | 0.09451 |
import numpy
import imageio
import glob
import sys
import os
import random
from PIL import Image
height = 0
width = 0
def get_subdir(folder):
listDir = None
for root, dirs, files in os.walk(folder):
if not dirs == []:
listDir = dirs
break
listDir.sort()
return listDir
def get_labels_and_files(folder, number=0):
# Make a list of lists of files for each label
filelists = []
subdir = get_subdir(folder)
for label in range(0, len(subdir)):
filelist = []
filelists.append(filelist)
dirname = os.path.join(folder, subdir[label])
for file in os.listdir(dirname):
if (file.endswith('.png')):
fullname = os.path.join(dirname, file)
if (os.path.getsize(fullname) > 0):
filelist.append(fullname)
else:
print('file ' + fullname + ' is empty')
# sort each list of files so they start off in the same order
# regardless of how the order the OS returns them in
filelist.sort()
# Take the specified number of items for each label and
# build them into an array of (label, filename) pairs
# Since we seeded the RNG, we should get the same sample each run
labelsAndFiles = []
for label in range(0, len(subdir)):
count = number if number > 0 else len(filelists[label])
filelist = random.sample(filelists[label], count)
for filename in filelist:
labelsAndFiles.append((label, filename))
print (labelsAndFiles[0][0])
return labelsAndFiles
def make_arrays(labelsAndFiles, ratio):
global height, width
images = []
labels = []
imShape = imageio.imread(labelsAndFiles[0][1]).shape
print(imShape)
if len(imShape) > 2:
height, width, channels = imShape
print(height, width, channels)
else:
height, width = imShape
print(height, width)
channels = 1
for i in range(0, len(labelsAndFiles)):
# display progress, since this can take a while
if (i % 100 == 0):
sys.stdout.write("\r%d%% complete" %
((i * 100) / len(labelsAndFiles)))
sys.stdout.flush()
filename = labelsAndFiles[i][1]
try:
image = imageio.imread(filename)
if len(image.shape) > 2:
print ("Log for grayscale images only. image.shape is bigger than 2. Image not added")
else:
images.append(image)
labels.append(labelsAndFiles[i][0])
except:
# If this happens we won't have the requested number
print("\nCan't read image file " + filename)
if ratio == 'train':
ratio = 0
elif ratio == 'test':
ratio = 1
else:
ratio = float(ratio) / 100
count = len(images)
trainNum = int(count * (1 - ratio))
testNum = count - trainNum
if channels > 1:
trainImagedata = numpy.zeros(
(trainNum, height, width, channels), dtype=numpy.uint8)
testImagedata = numpy.zeros(
(testNum, height, width, channels), dtype=numpy.uint8)
else:
trainImagedata = numpy.zeros(
(trainNum, height, width), dtype=numpy.uint8)
testImagedata = numpy.zeros(
(testNum, height, width), dtype=numpy.uint8)
trainLabeldata = numpy.zeros(trainNum, dtype=numpy.uint8)
testLabeldata = numpy.zeros(testNum, dtype=numpy.uint8)
for i in range(trainNum):
trainImagedata[i] = images[i]
trainLabeldata[i] = labels[i]
for i in range(0, testNum):
testImagedata[i] = images[trainNum + i]
testLabeldata[i] = labels[trainNum + i]
print("\n")
return trainImagedata, trainLabeldata, testImagedata, testLabeldata
def write_labeldata(labeldata, outputfile):
header = numpy.array([0x0801, len(labeldata)], dtype='>i4')
with open(outputfile, "wb") as f:
f.write(header.tobytes())
f.write(labeldata.tobytes())
def write_imagedata(imagedata, outputfile):
global height, width
header = numpy.array([0x0803, len(imagedata), height, width], dtype='>i4')
with open(outputfile, "wb") as f:
f.write(header.tobytes())
f.write(imagedata.tobytes())
def main(folder, mode, dstPath):
global idxLabelPath, idxImagePath
labelsAndFiles = get_labels_and_files(folder)
# Uncomment the line below if you want to seed the random
# number generator in the same way I did to produce the
# specific data files in this repo.
# random.seed(int("notMNIST", 36))
testLabelPath = dstPath+"/t10k-labels-idx1-ubyte"
testImagePath = dstPath+"/t10k-images-idx3-ubyte"
trainLabelPath = dstPath+"/train-labels-idx1-ubyte"
trainImagePath = dstPath+"/train-images-idx3-ubyte"
if not os.path.exists(dstPath):
os.makedirs(dstPath)
if not os.path.exists(os.path.split(dstPath)[0]+"/processed"):
print("create" + os.path.split(dstPath)[0]+"/processed")
os.mkdir(os.path.split(dstPath)[0]+"/processed")
random.shuffle(labelsAndFiles)
trainImagedata, trainLabeldata, testImagedata, testLabeldata = make_arrays(
labelsAndFiles, mode)
if mode == 'train':
write_labeldata(trainLabeldata, trainLabelPath)
write_imagedata(trainImagedata, trainImagePath)
elif mode == 'test':
write_labeldata(testLabeldata, testLabelPath)
write_imagedata(testImagedata, testImagePath)
else:
write_labeldata(trainLabeldata, trainLabelPath)
write_imagedata(trainImagedata, trainImagePath)
write_labeldata(testLabeldata, testLabelPath)
write_imagedata(testImagedata, testImagePath)
if __name__ == '__main__':
folder = sys.argv[1]
mode = sys.argv[2]
dstPath = sys.argv[3]
main(folder, mode, dstPath) | helpers/convertToMnistFormat/convert_to_mnist_format.py |
import numpy
import imageio
import glob
import sys
import os
import random
from PIL import Image
height = 0
width = 0
def get_subdir(folder):
listDir = None
for root, dirs, files in os.walk(folder):
if not dirs == []:
listDir = dirs
break
listDir.sort()
return listDir
def get_labels_and_files(folder, number=0):
# Make a list of lists of files for each label
filelists = []
subdir = get_subdir(folder)
for label in range(0, len(subdir)):
filelist = []
filelists.append(filelist)
dirname = os.path.join(folder, subdir[label])
for file in os.listdir(dirname):
if (file.endswith('.png')):
fullname = os.path.join(dirname, file)
if (os.path.getsize(fullname) > 0):
filelist.append(fullname)
else:
print('file ' + fullname + ' is empty')
# sort each list of files so they start off in the same order
# regardless of how the order the OS returns them in
filelist.sort()
# Take the specified number of items for each label and
# build them into an array of (label, filename) pairs
# Since we seeded the RNG, we should get the same sample each run
labelsAndFiles = []
for label in range(0, len(subdir)):
count = number if number > 0 else len(filelists[label])
filelist = random.sample(filelists[label], count)
for filename in filelist:
labelsAndFiles.append((label, filename))
print (labelsAndFiles[0][0])
return labelsAndFiles
def make_arrays(labelsAndFiles, ratio):
global height, width
images = []
labels = []
imShape = imageio.imread(labelsAndFiles[0][1]).shape
print(imShape)
if len(imShape) > 2:
height, width, channels = imShape
print(height, width, channels)
else:
height, width = imShape
print(height, width)
channels = 1
for i in range(0, len(labelsAndFiles)):
# display progress, since this can take a while
if (i % 100 == 0):
sys.stdout.write("\r%d%% complete" %
((i * 100) / len(labelsAndFiles)))
sys.stdout.flush()
filename = labelsAndFiles[i][1]
try:
image = imageio.imread(filename)
if len(image.shape) > 2:
print ("Log for grayscale images only. image.shape is bigger than 2. Image not added")
else:
images.append(image)
labels.append(labelsAndFiles[i][0])
except:
# If this happens we won't have the requested number
print("\nCan't read image file " + filename)
if ratio == 'train':
ratio = 0
elif ratio == 'test':
ratio = 1
else:
ratio = float(ratio) / 100
count = len(images)
trainNum = int(count * (1 - ratio))
testNum = count - trainNum
if channels > 1:
trainImagedata = numpy.zeros(
(trainNum, height, width, channels), dtype=numpy.uint8)
testImagedata = numpy.zeros(
(testNum, height, width, channels), dtype=numpy.uint8)
else:
trainImagedata = numpy.zeros(
(trainNum, height, width), dtype=numpy.uint8)
testImagedata = numpy.zeros(
(testNum, height, width), dtype=numpy.uint8)
trainLabeldata = numpy.zeros(trainNum, dtype=numpy.uint8)
testLabeldata = numpy.zeros(testNum, dtype=numpy.uint8)
for i in range(trainNum):
trainImagedata[i] = images[i]
trainLabeldata[i] = labels[i]
for i in range(0, testNum):
testImagedata[i] = images[trainNum + i]
testLabeldata[i] = labels[trainNum + i]
print("\n")
return trainImagedata, trainLabeldata, testImagedata, testLabeldata
def write_labeldata(labeldata, outputfile):
header = numpy.array([0x0801, len(labeldata)], dtype='>i4')
with open(outputfile, "wb") as f:
f.write(header.tobytes())
f.write(labeldata.tobytes())
def write_imagedata(imagedata, outputfile):
global height, width
header = numpy.array([0x0803, len(imagedata), height, width], dtype='>i4')
with open(outputfile, "wb") as f:
f.write(header.tobytes())
f.write(imagedata.tobytes())
def main(folder, mode, dstPath):
global idxLabelPath, idxImagePath
labelsAndFiles = get_labels_and_files(folder)
# Uncomment the line below if you want to seed the random
# number generator in the same way I did to produce the
# specific data files in this repo.
# random.seed(int("notMNIST", 36))
testLabelPath = dstPath+"/t10k-labels-idx1-ubyte"
testImagePath = dstPath+"/t10k-images-idx3-ubyte"
trainLabelPath = dstPath+"/train-labels-idx1-ubyte"
trainImagePath = dstPath+"/train-images-idx3-ubyte"
if not os.path.exists(dstPath):
os.makedirs(dstPath)
if not os.path.exists(os.path.split(dstPath)[0]+"/processed"):
print("create" + os.path.split(dstPath)[0]+"/processed")
os.mkdir(os.path.split(dstPath)[0]+"/processed")
random.shuffle(labelsAndFiles)
trainImagedata, trainLabeldata, testImagedata, testLabeldata = make_arrays(
labelsAndFiles, mode)
if mode == 'train':
write_labeldata(trainLabeldata, trainLabelPath)
write_imagedata(trainImagedata, trainImagePath)
elif mode == 'test':
write_labeldata(testLabeldata, testLabelPath)
write_imagedata(testImagedata, testImagePath)
else:
write_labeldata(trainLabeldata, trainLabelPath)
write_imagedata(trainImagedata, trainImagePath)
write_labeldata(testLabeldata, testLabelPath)
write_imagedata(testImagedata, testImagePath)
if __name__ == '__main__':
folder = sys.argv[1]
mode = sys.argv[2]
dstPath = sys.argv[3]
main(folder, mode, dstPath) | 0.218253 | 0.257572 |
from package.definition import logger
class Config():
"""
Configuration
Args:
use_bidirectional (bool): if True, becomes a bidirectional listener (default: True)
use_attention (bool): flag indication whether to use attention mechanism or not (default: True)
use_label_smooth (bool): flag indication whether to use label smoothing or not (default: True)
input_reverse (bool): flag indication whether to reverse input feature or not (default: True)
use_pickle (bool): flag indication whether to load data from pickle or not (default: False)
use_augment (bool): flag indication whether to use spec-augmentation or not (default: True)
use_pyramidal (bool): flag indication whether to use pyramidal rnn in listener or not (default: True)
use_multistep_lr (bool): flag indication whether to use multistep leraning rate or not (default:False)
augment_ratio (float): ratio of spec-augmentation applied data (default: 1.0)
listener_layer_size (int): num of listener`s RNN cell (default: 6)
speller_layer_size (int): num of speller`s RNN cell (default: 3)
hidden_size (int): size of hidden state of RNN (default: 256)
dropout (float): dropout probability (default: 0.5)
batch_size (int): mini-batch size (default: 12)
worker_num (int): num of cpu core will be used (default: 1)
max_epochs (int): max epoch (default: 40)
init_lr (float): initial learning rate (default: 1e-4)
high_plateau_lr (float): maximum learning rate after the ramp up phase (default: -)
low_plateau_lr (float): Steps to be maintained at a certain number to avoid extremely slow learning (default: -)
teacher_forcing (float): The probability that teacher forcing will be used (default: 0.90)
seed (int): seed for random (default: 1)
max_len (int): a maximum allowed length for the sequence to be processed (default: 120)
use_cuda (bool): if True, use CUDA (default: True)
"""
def __init__(self,
use_bidirectional=True,
use_attention=True,
use_label_smooth=True,
input_reverse=True,
use_augment=True,
use_pickle=False,
use_pyramidal=True,
use_cuda=True,
augment_ratio=1.0,
hidden_size=256,
dropout=0.5,
listener_layer_size=5,
speller_layer_size=3,
batch_size=32,
worker_num=1,
max_epochs=40,
use_multistep_lr=False,
init_lr=0.0001,
high_plateau_lr=0.0003,
low_plateau_lr=0.00001,
teacher_forcing=0.90,
seed=1,
max_len=151
):
self.use_bidirectional = use_bidirectional
self.use_attention = use_attention
self.use_label_smooth = use_label_smooth
self.input_reverse = input_reverse
self.use_augment = use_augment
self.use_pickle = use_pickle
self.use_pyramidal = use_pyramidal
self.use_cuda = use_cuda
self.augment_ratio = augment_ratio
self.hidden_size = hidden_size
self.dropout = dropout
self.listener_layer_size = listener_layer_size
self.speller_layer_size = speller_layer_size
self.batch_size = batch_size
self.worker_num = worker_num
self.max_epochs = max_epochs
self.use_multistep_lr = use_multistep_lr
self.init_lr = init_lr
if use_multistep_lr:
self.high_plateau_lr = high_plateau_lr
self.low_plateau_lr = low_plateau_lr
self.teacher_forcing = teacher_forcing
self.seed = seed
self.max_len = max_len
self.print_log()
def print_log(self):
""" print information of configuration """
logger.info("use_bidirectional : %s" % str(self.use_bidirectional))
logger.info("use_attention : %s" % str(self.use_attention))
logger.info("use_pickle : %s" % str(self.use_pickle))
logger.info("use_augment : %s" % str(self.use_augment))
logger.info("use_pyramidal : %s" % str(self.use_pyramidal))
logger.info("augment_ratio : %0.2f" % self.augment_ratio)
logger.info("input_reverse : %s" % str(self.input_reverse))
logger.info("hidden_size : %d" % self.hidden_size)
logger.info("listener_layer_size : %d" % self.listener_layer_size)
logger.info("speller_layer_size : %d" % self.speller_layer_size)
logger.info("dropout : %0.2f" % self.dropout)
logger.info("batch_size : %d" % self.batch_size)
logger.info("worker_num : %d" % self.worker_num)
logger.info("max_epochs : %d" % self.max_epochs)
logger.info("initial learning rate : %0.4f" % self.init_lr)
if self.use_multistep_lr:
logger.info("high plateau learning rate : %0.4f" % self.high_plateau_lr)
logger.info("low plateau learning rate : %0.4f" % self.low_plateau_lr)
logger.info("teacher_forcing_ratio : %0.2f" % self.teacher_forcing)
logger.info("seed : %d" % self.seed)
logger.info("max_len : %d" % self.max_len)
logger.info("use_cuda : %s" % str(self.use_cuda)) | package/config.py | from package.definition import logger
class Config():
"""
Configuration
Args:
use_bidirectional (bool): if True, becomes a bidirectional listener (default: True)
use_attention (bool): flag indication whether to use attention mechanism or not (default: True)
use_label_smooth (bool): flag indication whether to use label smoothing or not (default: True)
input_reverse (bool): flag indication whether to reverse input feature or not (default: True)
use_pickle (bool): flag indication whether to load data from pickle or not (default: False)
use_augment (bool): flag indication whether to use spec-augmentation or not (default: True)
use_pyramidal (bool): flag indication whether to use pyramidal rnn in listener or not (default: True)
use_multistep_lr (bool): flag indication whether to use multistep leraning rate or not (default:False)
augment_ratio (float): ratio of spec-augmentation applied data (default: 1.0)
listener_layer_size (int): num of listener`s RNN cell (default: 6)
speller_layer_size (int): num of speller`s RNN cell (default: 3)
hidden_size (int): size of hidden state of RNN (default: 256)
dropout (float): dropout probability (default: 0.5)
batch_size (int): mini-batch size (default: 12)
worker_num (int): num of cpu core will be used (default: 1)
max_epochs (int): max epoch (default: 40)
init_lr (float): initial learning rate (default: 1e-4)
high_plateau_lr (float): maximum learning rate after the ramp up phase (default: -)
low_plateau_lr (float): Steps to be maintained at a certain number to avoid extremely slow learning (default: -)
teacher_forcing (float): The probability that teacher forcing will be used (default: 0.90)
seed (int): seed for random (default: 1)
max_len (int): a maximum allowed length for the sequence to be processed (default: 120)
use_cuda (bool): if True, use CUDA (default: True)
"""
def __init__(self,
use_bidirectional=True,
use_attention=True,
use_label_smooth=True,
input_reverse=True,
use_augment=True,
use_pickle=False,
use_pyramidal=True,
use_cuda=True,
augment_ratio=1.0,
hidden_size=256,
dropout=0.5,
listener_layer_size=5,
speller_layer_size=3,
batch_size=32,
worker_num=1,
max_epochs=40,
use_multistep_lr=False,
init_lr=0.0001,
high_plateau_lr=0.0003,
low_plateau_lr=0.00001,
teacher_forcing=0.90,
seed=1,
max_len=151
):
self.use_bidirectional = use_bidirectional
self.use_attention = use_attention
self.use_label_smooth = use_label_smooth
self.input_reverse = input_reverse
self.use_augment = use_augment
self.use_pickle = use_pickle
self.use_pyramidal = use_pyramidal
self.use_cuda = use_cuda
self.augment_ratio = augment_ratio
self.hidden_size = hidden_size
self.dropout = dropout
self.listener_layer_size = listener_layer_size
self.speller_layer_size = speller_layer_size
self.batch_size = batch_size
self.worker_num = worker_num
self.max_epochs = max_epochs
self.use_multistep_lr = use_multistep_lr
self.init_lr = init_lr
if use_multistep_lr:
self.high_plateau_lr = high_plateau_lr
self.low_plateau_lr = low_plateau_lr
self.teacher_forcing = teacher_forcing
self.seed = seed
self.max_len = max_len
self.print_log()
def print_log(self):
""" print information of configuration """
logger.info("use_bidirectional : %s" % str(self.use_bidirectional))
logger.info("use_attention : %s" % str(self.use_attention))
logger.info("use_pickle : %s" % str(self.use_pickle))
logger.info("use_augment : %s" % str(self.use_augment))
logger.info("use_pyramidal : %s" % str(self.use_pyramidal))
logger.info("augment_ratio : %0.2f" % self.augment_ratio)
logger.info("input_reverse : %s" % str(self.input_reverse))
logger.info("hidden_size : %d" % self.hidden_size)
logger.info("listener_layer_size : %d" % self.listener_layer_size)
logger.info("speller_layer_size : %d" % self.speller_layer_size)
logger.info("dropout : %0.2f" % self.dropout)
logger.info("batch_size : %d" % self.batch_size)
logger.info("worker_num : %d" % self.worker_num)
logger.info("max_epochs : %d" % self.max_epochs)
logger.info("initial learning rate : %0.4f" % self.init_lr)
if self.use_multistep_lr:
logger.info("high plateau learning rate : %0.4f" % self.high_plateau_lr)
logger.info("low plateau learning rate : %0.4f" % self.low_plateau_lr)
logger.info("teacher_forcing_ratio : %0.2f" % self.teacher_forcing)
logger.info("seed : %d" % self.seed)
logger.info("max_len : %d" % self.max_len)
logger.info("use_cuda : %s" % str(self.use_cuda)) | 0.884058 | 0.23793 |
# In[ ]:
# Author : <NAME>
# github link : https://github.com/amirshnll/COVID-19-Surveillance
# dataset link : http://archive.ics.uci.edu/ml/datasets/COVID-19+Surveillance
# email : <EMAIL>
# ### <p style=color:blue>Logestic Regression for Divorce Predictors Data Set</p>
#
# #### The Dataset
# The Dataset is from UCIMachinelearning and it provides you all the relevant information needed for the prediction of Divorce. It contains 54 features and on the basis of these features we have to predict that the couple has been divorced or not. Value 1 represent Divorced and value 0 represent not divorced. Features are as follows:
# 1. If one of us apologizes when our discussion deteriorates, the discussion ends.
# 2. I know we can ignore our differences, even if things get hard sometimes.
# 3. When we need it, we can take our discussions with my spouse from the beginning and correct it.
# 4. When I discuss with my spouse, to contact him will eventually work.
# 5. The time I spent with my wife is special for us.
# 6. We don't have time at home as partners.
# 7. We are like two strangers who share the same environment at home rather than family.
# 8. I enjoy our holidays with my wife.
# 9. I enjoy traveling with my wife.
# 10. Most of our goals are common to my spouse.
# 11. I think that one day in the future, when I look back, I see that my spouse and I have been in harmony with each other.
# 12. My spouse and I have similar values in terms of personal freedom.
# 13. My spouse and I have similar sense of entertainment.
# 14. Most of our goals for people (children, friends, etc.) are the same.
# 15. Our dreams with my spouse are similar and harmonious.
# 16. We're compatible with my spouse about what love should be.
# 17. We share the same views about being happy in our life with my spouse
# 18. My spouse and I have similar ideas about how marriage should be
# 19. My spouse and I have similar ideas about how roles should be in marriage
# 20. My spouse and I have similar values in trust.
# 21. I know exactly what my wife likes.
# 22. I know how my spouse wants to be taken care of when she/he sick.
# 23. I know my spouse's favorite food.
# 24. I can tell you what kind of stress my spouse is facing in her/his life.
# 25. I have knowledge of my spouse's inner world.
# 26. I know my spouse's basic anxieties.
# 27. I know what my spouse's current sources of stress are.
# 28. I know my spouse's hopes and wishes.
# 29. I know my spouse very well.
# 30. I know my spouse's friends and their social relationships.
# 31. I feel aggressive when I argue with my spouse.
# 32. When discussing with my spouse, I usually use expressions such as ‘you always’ or ‘you never’ .
# 33. I can use negative statements about my spouse's personality during our discussions.
# 34. I can use offensive expressions during our discussions.
# 35. I can insult my spouse during our discussions.
# 36. I can be humiliating when we discussions.
# 37. My discussion with my spouse is not calm.
# 38. I hate my spouse's way of open a subject.
# 39. Our discussions often occur suddenly.
# 40. We're just starting a discussion before I know what's going on.
# 41. When I talk to my spouse about something, my calm suddenly breaks.
# 42. When I argue with my spouse, ı only go out and I don't say a word.
# 43. I mostly stay silent to calm the environment a little bit.
# 44. Sometimes I think it's good for me to leave home for a while.
# 45. I'd rather stay silent than discuss with my spouse.
# 46. Even if I'm right in the discussion, I stay silent to hurt my spouse.
# 47. When I discuss with my spouse, I stay silent because I am afraid of not being able to control my anger.
# 48. I feel right in our discussions.
# 49. I have nothing to do with what I've been accused of.
# 50. I'm not actually the one who's guilty about what I'm accused of.
# 51. I'm not the one who's wrong about problems at home.
# 52. I wouldn't hesitate to tell my spouse about her/his inadequacy.
# 53. When I discuss, I remind my spouse of her/his inadequacy.
# 54. I'm not afraid to tell my spouse about her/his incompetence.
# Generally, logistic Machine Learning in Python has a straightforward and user-friendly implementation. It usually consists of these steps:<br>
# 1. Import packages, functions, and classes<br>
# 2. Get data to work with and, if appropriate, transform it<br>
# 3. Create a classification model and train (or fit) it with existing data<br>
# 4. Evaluate your model to see if its performance is satisfactory<br>
# 5. Apply your model to make predictions<br>
# #### Import packages, functions, and classes
# In[1]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn import metrics
from sklearn import preprocessing
from sklearn.metrics import accuracy_score
from sklearn import tree
# #### Get data to work with and, if appropriate, transform it
# In[2]:
df = pd.read_csv('divorce.csv',sep=';')
y=df.Class
x_data=df.drop(columns=['Class'])
df.head(10)
# #### Data description
# In[3]:
sns.countplot(x='Class',data=df,palette='hls')
plt.show()
count_no_sub = len(df[df['Class']==0])
count_sub = len(df[df['Class']==1])
pct_of_no_sub = count_no_sub/(count_no_sub+count_sub)
print("percentage of no divorce is", pct_of_no_sub*100)
pct_of_sub = count_sub/(count_no_sub+count_sub)
print("percentage of divorce", pct_of_sub*100)
# #### Normalize data
# In[4]:
x = (x_data - np.min(x_data)) / (np.max(x_data) - np.min(x_data)).values
x.head()
# #### correlation of all atribute
# In[5]:
plt.figure(figsize=(10,8))
sns.heatmap(df.corr(), cmap='viridis');
# #### Split data set
# In[6]:
x_train, x_test, y_train, y_test = train_test_split(x,y,test_size = 0.4,random_state=400)
print("x_train: ",x_train.shape)
print("x_test: ",x_test.shape)
print("y_train: ",y_train.shape)
print("y_test: ",y_test.shape)
# #### Create a classification model and train (or fit) it with existing data
# Step 1. Import the model you want to use<br>
# Step 2. Make an instance of the Model<br>
# Step 3. Training the model on the data, storing the information learned from the data<br>
# Step 4. Predict labels for new data <br>
# In[7]:
clfr = LogisticRegression(solver='lbfgs')# step 2
clfr.fit(x_train, y_train.ravel())# step 3
y_predr = clfr.predict(x_test)# step 4
# #### Report
# In[8]:
print(classification_report(y_test, clfr.predict(x_test)))
print('Accuracy of logistic regression classifier on test set: {:.2f}'.format(clfr.score(x_test, y_test)))
# #### Draw Figure
# In[9]:
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
logit_roc_auc = roc_auc_score(y_test, clfr.predict(x_test))
fpr, tpr, thresholds = roc_curve(y_test, clfr.predict_proba(x_test)[:,1])
plt.figure()
plt.plot(fpr, tpr, label='Logistic Regression (area = %0.2f)' % logit_roc_auc)
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating divorce')
plt.legend(loc="lower right")
plt.show()
# #### Confusion Matrix
# In[11]:
from sklearn.metrics import classification_report, confusion_matrix as cm
def confusionMatrix(y_pred,title,n):
plt.subplot(1,2,n)
ax=sns.heatmap(cm(y_test, y_pred)/sum(sum(cm(y_test, y_pred))), annot=True
,cmap='RdBu_r', vmin=0, vmax=0.52,cbar=False, linewidths=.5)
plt.title(title)
plt.ylabel('Actual outputs')
plt.xlabel('Prediction')
b, t=ax.get_ylim()
ax.set_ylim(b+.5, t-.5)
plt.subplot(1,2,n+1)
axx=sns.heatmap(cm(y_test, y_pred), annot=True
,cmap='plasma', vmin=0, vmax=40,cbar=False, linewidths=.5)
b, t=axx.get_ylim()
axx.set_ylim(b+.5, t-.5)
return
plt.figure(figsize=(8,6))
confusionMatrix(y_predr,'Logestic Regression',1)
plt.show
# #### Result:
# So we have successfully trained our dataset into Logestic Regression for predicting whether a couple will get divorced or not. And also got the accuracy & confusion matrix for Logestic Regression | Logestic Regression.py |
# In[ ]:
# Author : <NAME>
# github link : https://github.com/amirshnll/COVID-19-Surveillance
# dataset link : http://archive.ics.uci.edu/ml/datasets/COVID-19+Surveillance
# email : <EMAIL>
# ### <p style=color:blue>Logestic Regression for Divorce Predictors Data Set</p>
#
# #### The Dataset
# The Dataset is from UCIMachinelearning and it provides you all the relevant information needed for the prediction of Divorce. It contains 54 features and on the basis of these features we have to predict that the couple has been divorced or not. Value 1 represent Divorced and value 0 represent not divorced. Features are as follows:
# 1. If one of us apologizes when our discussion deteriorates, the discussion ends.
# 2. I know we can ignore our differences, even if things get hard sometimes.
# 3. When we need it, we can take our discussions with my spouse from the beginning and correct it.
# 4. When I discuss with my spouse, to contact him will eventually work.
# 5. The time I spent with my wife is special for us.
# 6. We don't have time at home as partners.
# 7. We are like two strangers who share the same environment at home rather than family.
# 8. I enjoy our holidays with my wife.
# 9. I enjoy traveling with my wife.
# 10. Most of our goals are common to my spouse.
# 11. I think that one day in the future, when I look back, I see that my spouse and I have been in harmony with each other.
# 12. My spouse and I have similar values in terms of personal freedom.
# 13. My spouse and I have similar sense of entertainment.
# 14. Most of our goals for people (children, friends, etc.) are the same.
# 15. Our dreams with my spouse are similar and harmonious.
# 16. We're compatible with my spouse about what love should be.
# 17. We share the same views about being happy in our life with my spouse
# 18. My spouse and I have similar ideas about how marriage should be
# 19. My spouse and I have similar ideas about how roles should be in marriage
# 20. My spouse and I have similar values in trust.
# 21. I know exactly what my wife likes.
# 22. I know how my spouse wants to be taken care of when she/he sick.
# 23. I know my spouse's favorite food.
# 24. I can tell you what kind of stress my spouse is facing in her/his life.
# 25. I have knowledge of my spouse's inner world.
# 26. I know my spouse's basic anxieties.
# 27. I know what my spouse's current sources of stress are.
# 28. I know my spouse's hopes and wishes.
# 29. I know my spouse very well.
# 30. I know my spouse's friends and their social relationships.
# 31. I feel aggressive when I argue with my spouse.
# 32. When discussing with my spouse, I usually use expressions such as ‘you always’ or ‘you never’ .
# 33. I can use negative statements about my spouse's personality during our discussions.
# 34. I can use offensive expressions during our discussions.
# 35. I can insult my spouse during our discussions.
# 36. I can be humiliating when we discussions.
# 37. My discussion with my spouse is not calm.
# 38. I hate my spouse's way of open a subject.
# 39. Our discussions often occur suddenly.
# 40. We're just starting a discussion before I know what's going on.
# 41. When I talk to my spouse about something, my calm suddenly breaks.
# 42. When I argue with my spouse, ı only go out and I don't say a word.
# 43. I mostly stay silent to calm the environment a little bit.
# 44. Sometimes I think it's good for me to leave home for a while.
# 45. I'd rather stay silent than discuss with my spouse.
# 46. Even if I'm right in the discussion, I stay silent to hurt my spouse.
# 47. When I discuss with my spouse, I stay silent because I am afraid of not being able to control my anger.
# 48. I feel right in our discussions.
# 49. I have nothing to do with what I've been accused of.
# 50. I'm not actually the one who's guilty about what I'm accused of.
# 51. I'm not the one who's wrong about problems at home.
# 52. I wouldn't hesitate to tell my spouse about her/his inadequacy.
# 53. When I discuss, I remind my spouse of her/his inadequacy.
# 54. I'm not afraid to tell my spouse about her/his incompetence.
# Generally, logistic Machine Learning in Python has a straightforward and user-friendly implementation. It usually consists of these steps:<br>
# 1. Import packages, functions, and classes<br>
# 2. Get data to work with and, if appropriate, transform it<br>
# 3. Create a classification model and train (or fit) it with existing data<br>
# 4. Evaluate your model to see if its performance is satisfactory<br>
# 5. Apply your model to make predictions<br>
# #### Import packages, functions, and classes
# In[1]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn import metrics
from sklearn import preprocessing
from sklearn.metrics import accuracy_score
from sklearn import tree
# #### Get data to work with and, if appropriate, transform it
# In[2]:
df = pd.read_csv('divorce.csv',sep=';')
y=df.Class
x_data=df.drop(columns=['Class'])
df.head(10)
# #### Data description
# In[3]:
sns.countplot(x='Class',data=df,palette='hls')
plt.show()
count_no_sub = len(df[df['Class']==0])
count_sub = len(df[df['Class']==1])
pct_of_no_sub = count_no_sub/(count_no_sub+count_sub)
print("percentage of no divorce is", pct_of_no_sub*100)
pct_of_sub = count_sub/(count_no_sub+count_sub)
print("percentage of divorce", pct_of_sub*100)
# #### Normalize data
# In[4]:
x = (x_data - np.min(x_data)) / (np.max(x_data) - np.min(x_data)).values
x.head()
# #### correlation of all atribute
# In[5]:
plt.figure(figsize=(10,8))
sns.heatmap(df.corr(), cmap='viridis');
# #### Split data set
# In[6]:
x_train, x_test, y_train, y_test = train_test_split(x,y,test_size = 0.4,random_state=400)
print("x_train: ",x_train.shape)
print("x_test: ",x_test.shape)
print("y_train: ",y_train.shape)
print("y_test: ",y_test.shape)
# #### Create a classification model and train (or fit) it with existing data
# Step 1. Import the model you want to use<br>
# Step 2. Make an instance of the Model<br>
# Step 3. Training the model on the data, storing the information learned from the data<br>
# Step 4. Predict labels for new data <br>
# In[7]:
clfr = LogisticRegression(solver='lbfgs')# step 2
clfr.fit(x_train, y_train.ravel())# step 3
y_predr = clfr.predict(x_test)# step 4
# #### Report
# In[8]:
print(classification_report(y_test, clfr.predict(x_test)))
print('Accuracy of logistic regression classifier on test set: {:.2f}'.format(clfr.score(x_test, y_test)))
# #### Draw Figure
# In[9]:
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
logit_roc_auc = roc_auc_score(y_test, clfr.predict(x_test))
fpr, tpr, thresholds = roc_curve(y_test, clfr.predict_proba(x_test)[:,1])
plt.figure()
plt.plot(fpr, tpr, label='Logistic Regression (area = %0.2f)' % logit_roc_auc)
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating divorce')
plt.legend(loc="lower right")
plt.show()
# #### Confusion Matrix
# In[11]:
from sklearn.metrics import classification_report, confusion_matrix as cm
def confusionMatrix(y_pred,title,n):
plt.subplot(1,2,n)
ax=sns.heatmap(cm(y_test, y_pred)/sum(sum(cm(y_test, y_pred))), annot=True
,cmap='RdBu_r', vmin=0, vmax=0.52,cbar=False, linewidths=.5)
plt.title(title)
plt.ylabel('Actual outputs')
plt.xlabel('Prediction')
b, t=ax.get_ylim()
ax.set_ylim(b+.5, t-.5)
plt.subplot(1,2,n+1)
axx=sns.heatmap(cm(y_test, y_pred), annot=True
,cmap='plasma', vmin=0, vmax=40,cbar=False, linewidths=.5)
b, t=axx.get_ylim()
axx.set_ylim(b+.5, t-.5)
return
plt.figure(figsize=(8,6))
confusionMatrix(y_predr,'Logestic Regression',1)
plt.show
# #### Result:
# So we have successfully trained our dataset into Logestic Regression for predicting whether a couple will get divorced or not. And also got the accuracy & confusion matrix for Logestic Regression | 0.446495 | 0.560914 |
from os.path import exists
import sqlite3 as sql
import pandas
db_path = './all13f.db'
if exists(db_path):
conn = sql.connect(db_path)
else:
print(db_path + ' does not exist. Exiting.')
exit(1)
# Get funds
funds_df = pandas.read_sql_query('''SELECT * FROM "FUNDS"''', conn, index_col='cik')
funds_dict = funds_df.to_dict(orient='dict')['name']
top_funds = pandas.DataFrame()
cik_df = pandas.read_sql_query('''SELECT DISTINCT cik FROM "SEC13F8"''', conn)
for cik in cik_df['cik'].tolist():
assert(cik), 'Not a valid cik lookup.'
print('-----------------------')
print('Analyzing Fund: ', funds_dict[cik])
dates = pandas.read_sql_query('SELECT DISTINCT date from "SEC13F8" WHERE cik="' + cik + '" ORDER BY date ASC', conn, parse_dates='date')['date']
dateMin = min(dates).date()
dateMax = max(dates).date()
if dateMin == dateMax:
print('Reported:', dateMin)
else:
print('Reports between:', dateMin, 'and', dateMax)
fund_df = pandas.read_sql_query('SELECT cusip, issuer, SUM(value) FROM "SEC13F8" WHERE cik="' + cik + '" GROUP BY cusip ORDER BY SUM(value) DESC', conn)
fund_sum = fund_df['SUM(value)'].sum()
print('Holdings: $%0.2fB' % (fund_sum/1e6))
fund_pct = fund_df['SUM(value)']/fund_sum
fund_df['pct'] = fund_pct
top_df = fund_df[fund_pct > 0.02].copy()
top_funds = pandas.concat([top_funds, top_df]).groupby('cusip', as_index=False).agg({'issuer': 'first', 'SUM(value)': 'sum','pct': 'sum'})
print('Top stocks for fund:')
top_df['SUM(value)'] = top_df['SUM(value)']/1000
print(top_df.rename(columns={'issuer': 'Stock Issuer', 'SUM(value)': 'Value ($M)', 'pct': 'Sum Percentage'}))
top_funds.sort_values('pct', ascending=False, inplace=True)
top_funds.rename(columns={'SUM(value)': 'Value ($k)', 'pct': '% Fund Integrated'}, inplace=True)
print('--------------------------\n---------------------------')
print('Overall top funds, with percentage of portfolio integrated:')
print(top_funds.head(20))
all_df = pandas.read_sql_query('''SELECT cusip, issuer, cik, SUM(value), SUM(shares) FROM "SEC13F8" GROUP BY cusip ORDER BY SUM(value) DESC''', conn)
sum = all_df['SUM(value)'].sum()
pct = all_df['SUM(value)']/sum
all_df['pct'] = pct
top = all_df[pct > 0.02]
print('----------------------------')
print(top[['cusip', 'issuer', 'pct']].rename(columns={'issuer': 'Stock Issuer', 'pct': '% Total Value'}))
print('Funds: ', all_df.cik.nunique())
print('Total holdings: $%0.2fB' % (sum/1e6))
print('Number of investments >2% holding: ',len(top)) | analyze13f.py | from os.path import exists
import sqlite3 as sql
import pandas
db_path = './all13f.db'
if exists(db_path):
conn = sql.connect(db_path)
else:
print(db_path + ' does not exist. Exiting.')
exit(1)
# Get funds
funds_df = pandas.read_sql_query('''SELECT * FROM "FUNDS"''', conn, index_col='cik')
funds_dict = funds_df.to_dict(orient='dict')['name']
top_funds = pandas.DataFrame()
cik_df = pandas.read_sql_query('''SELECT DISTINCT cik FROM "SEC13F8"''', conn)
for cik in cik_df['cik'].tolist():
assert(cik), 'Not a valid cik lookup.'
print('-----------------------')
print('Analyzing Fund: ', funds_dict[cik])
dates = pandas.read_sql_query('SELECT DISTINCT date from "SEC13F8" WHERE cik="' + cik + '" ORDER BY date ASC', conn, parse_dates='date')['date']
dateMin = min(dates).date()
dateMax = max(dates).date()
if dateMin == dateMax:
print('Reported:', dateMin)
else:
print('Reports between:', dateMin, 'and', dateMax)
fund_df = pandas.read_sql_query('SELECT cusip, issuer, SUM(value) FROM "SEC13F8" WHERE cik="' + cik + '" GROUP BY cusip ORDER BY SUM(value) DESC', conn)
fund_sum = fund_df['SUM(value)'].sum()
print('Holdings: $%0.2fB' % (fund_sum/1e6))
fund_pct = fund_df['SUM(value)']/fund_sum
fund_df['pct'] = fund_pct
top_df = fund_df[fund_pct > 0.02].copy()
top_funds = pandas.concat([top_funds, top_df]).groupby('cusip', as_index=False).agg({'issuer': 'first', 'SUM(value)': 'sum','pct': 'sum'})
print('Top stocks for fund:')
top_df['SUM(value)'] = top_df['SUM(value)']/1000
print(top_df.rename(columns={'issuer': 'Stock Issuer', 'SUM(value)': 'Value ($M)', 'pct': 'Sum Percentage'}))
top_funds.sort_values('pct', ascending=False, inplace=True)
top_funds.rename(columns={'SUM(value)': 'Value ($k)', 'pct': '% Fund Integrated'}, inplace=True)
print('--------------------------\n---------------------------')
print('Overall top funds, with percentage of portfolio integrated:')
print(top_funds.head(20))
all_df = pandas.read_sql_query('''SELECT cusip, issuer, cik, SUM(value), SUM(shares) FROM "SEC13F8" GROUP BY cusip ORDER BY SUM(value) DESC''', conn)
sum = all_df['SUM(value)'].sum()
pct = all_df['SUM(value)']/sum
all_df['pct'] = pct
top = all_df[pct > 0.02]
print('----------------------------')
print(top[['cusip', 'issuer', 'pct']].rename(columns={'issuer': 'Stock Issuer', 'pct': '% Total Value'}))
print('Funds: ', all_df.cik.nunique())
print('Total holdings: $%0.2fB' % (sum/1e6))
print('Number of investments >2% holding: ',len(top)) | 0.184217 | 0.158304 |
from avocado_qemu import Test
from avocado_qemu import BUILD_DIR
from avocado_qemu import wait_for_console_pattern
from avocado_qemu import exec_command_and_wait_for_pattern
from avocado_qemu import is_readable_executable_file
from qemu.accel import kvm_available
import os
import socket
import subprocess
ACCEL_NOT_AVAILABLE_FMT = "%s accelerator does not seem to be available"
KVM_NOT_AVAILABLE = ACCEL_NOT_AVAILABLE_FMT % "KVM"
def pick_default_vug_bin():
relative_path = "./contrib/vhost-user-gpu/vhost-user-gpu"
if is_readable_executable_file(relative_path):
return relative_path
bld_dir_path = os.path.join(BUILD_DIR, relative_path)
if is_readable_executable_file(bld_dir_path):
return bld_dir_path
class VirtioGPUx86(Test):
"""
:avocado: tags=virtio-gpu
"""
KERNEL_COMMON_COMMAND_LINE = "printk.time=0 "
KERNEL_URL = (
"https://archives.fedoraproject.org/pub/fedora"
"/linux/releases/33/Everything/x86_64/os/images"
"/pxeboot/vmlinuz"
)
INITRD_URL = (
"https://archives.fedoraproject.org/pub/fedora"
"/linux/releases/33/Everything/x86_64/os/images"
"/pxeboot/initrd.img"
)
def wait_for_console_pattern(self, success_message, vm=None):
wait_for_console_pattern(
self,
success_message,
failure_message="Kernel panic - not syncing",
vm=vm,
)
def test_virtio_vga_virgl(self):
"""
:avocado: tags=arch:x86_64
:avocado: tags=device:virtio-vga
"""
kernel_command_line = (
self.KERNEL_COMMON_COMMAND_LINE + "console=ttyS0 rdinit=/bin/bash"
)
# FIXME: should check presence of virtio, virgl etc
if not kvm_available(self.arch, self.qemu_bin):
self.cancel(KVM_NOT_AVAILABLE)
kernel_path = self.fetch_asset(self.KERNEL_URL)
initrd_path = self.fetch_asset(self.INITRD_URL)
self.vm.set_console()
self.vm.add_args("-cpu", "host")
self.vm.add_args("-m", "2G")
self.vm.add_args("-machine", "pc,accel=kvm")
self.vm.add_args("-device", "virtio-vga,virgl=on")
self.vm.add_args("-display", "egl-headless")
self.vm.add_args(
"-kernel",
kernel_path,
"-initrd",
initrd_path,
"-append",
kernel_command_line,
)
try:
self.vm.launch()
except:
# TODO: probably fails because we are missing the VirGL features
self.cancel("VirGL not enabled?")
self.wait_for_console_pattern("as init process")
exec_command_and_wait_for_pattern(
self, "/usr/sbin/modprobe virtio_gpu", ""
)
self.wait_for_console_pattern("features: +virgl +edid")
def test_vhost_user_vga_virgl(self):
"""
:avocado: tags=arch:x86_64
:avocado: tags=device:vhost-user-vga
"""
kernel_command_line = (
self.KERNEL_COMMON_COMMAND_LINE + "console=ttyS0 rdinit=/bin/bash"
)
# FIXME: should check presence of vhost-user-gpu, virgl, memfd etc
if not kvm_available(self.arch, self.qemu_bin):
self.cancel(KVM_NOT_AVAILABLE)
vug = pick_default_vug_bin()
if not vug:
self.cancel("Could not find vhost-user-gpu")
kernel_path = self.fetch_asset(self.KERNEL_URL)
initrd_path = self.fetch_asset(self.INITRD_URL)
# Create socketpair to connect proxy and remote processes
qemu_sock, vug_sock = socket.socketpair(
socket.AF_UNIX, socket.SOCK_STREAM
)
os.set_inheritable(qemu_sock.fileno(), True)
os.set_inheritable(vug_sock.fileno(), True)
self._vug_log_path = os.path.join(
self.logdir, "vhost-user-gpu.log"
)
self._vug_log_file = open(self._vug_log_path, "wb")
self.log.info('Complete vhost-user-gpu.log file can be '
'found at %s', self._vug_log_path)
vugp = subprocess.Popen(
[vug, "--virgl", "--fd=%d" % vug_sock.fileno()],
stdin=subprocess.DEVNULL,
stdout=self._vug_log_file,
stderr=subprocess.STDOUT,
shell=False,
close_fds=False,
)
self.vm.set_console()
self.vm.add_args("-cpu", "host")
self.vm.add_args("-m", "2G")
self.vm.add_args("-object", "memory-backend-memfd,id=mem,size=2G")
self.vm.add_args("-machine", "pc,memory-backend=mem,accel=kvm")
self.vm.add_args("-chardev", "socket,id=vug,fd=%d" % qemu_sock.fileno())
self.vm.add_args("-device", "vhost-user-vga,chardev=vug")
self.vm.add_args("-display", "egl-headless")
self.vm.add_args(
"-kernel",
kernel_path,
"-initrd",
initrd_path,
"-append",
kernel_command_line,
)
self.vm.launch()
self.wait_for_console_pattern("as init process")
exec_command_and_wait_for_pattern(
self, "/usr/sbin/modprobe virtio_gpu", ""
)
self.wait_for_console_pattern("features: +virgl -edid")
self.vm.shutdown()
qemu_sock.close()
vugp.terminate()
vugp.wait() | qemu/tests/acceptance/virtio-gpu.py |
from avocado_qemu import Test
from avocado_qemu import BUILD_DIR
from avocado_qemu import wait_for_console_pattern
from avocado_qemu import exec_command_and_wait_for_pattern
from avocado_qemu import is_readable_executable_file
from qemu.accel import kvm_available
import os
import socket
import subprocess
ACCEL_NOT_AVAILABLE_FMT = "%s accelerator does not seem to be available"
KVM_NOT_AVAILABLE = ACCEL_NOT_AVAILABLE_FMT % "KVM"
def pick_default_vug_bin():
relative_path = "./contrib/vhost-user-gpu/vhost-user-gpu"
if is_readable_executable_file(relative_path):
return relative_path
bld_dir_path = os.path.join(BUILD_DIR, relative_path)
if is_readable_executable_file(bld_dir_path):
return bld_dir_path
class VirtioGPUx86(Test):
"""
:avocado: tags=virtio-gpu
"""
KERNEL_COMMON_COMMAND_LINE = "printk.time=0 "
KERNEL_URL = (
"https://archives.fedoraproject.org/pub/fedora"
"/linux/releases/33/Everything/x86_64/os/images"
"/pxeboot/vmlinuz"
)
INITRD_URL = (
"https://archives.fedoraproject.org/pub/fedora"
"/linux/releases/33/Everything/x86_64/os/images"
"/pxeboot/initrd.img"
)
def wait_for_console_pattern(self, success_message, vm=None):
wait_for_console_pattern(
self,
success_message,
failure_message="Kernel panic - not syncing",
vm=vm,
)
def test_virtio_vga_virgl(self):
"""
:avocado: tags=arch:x86_64
:avocado: tags=device:virtio-vga
"""
kernel_command_line = (
self.KERNEL_COMMON_COMMAND_LINE + "console=ttyS0 rdinit=/bin/bash"
)
# FIXME: should check presence of virtio, virgl etc
if not kvm_available(self.arch, self.qemu_bin):
self.cancel(KVM_NOT_AVAILABLE)
kernel_path = self.fetch_asset(self.KERNEL_URL)
initrd_path = self.fetch_asset(self.INITRD_URL)
self.vm.set_console()
self.vm.add_args("-cpu", "host")
self.vm.add_args("-m", "2G")
self.vm.add_args("-machine", "pc,accel=kvm")
self.vm.add_args("-device", "virtio-vga,virgl=on")
self.vm.add_args("-display", "egl-headless")
self.vm.add_args(
"-kernel",
kernel_path,
"-initrd",
initrd_path,
"-append",
kernel_command_line,
)
try:
self.vm.launch()
except:
# TODO: probably fails because we are missing the VirGL features
self.cancel("VirGL not enabled?")
self.wait_for_console_pattern("as init process")
exec_command_and_wait_for_pattern(
self, "/usr/sbin/modprobe virtio_gpu", ""
)
self.wait_for_console_pattern("features: +virgl +edid")
def test_vhost_user_vga_virgl(self):
"""
:avocado: tags=arch:x86_64
:avocado: tags=device:vhost-user-vga
"""
kernel_command_line = (
self.KERNEL_COMMON_COMMAND_LINE + "console=ttyS0 rdinit=/bin/bash"
)
# FIXME: should check presence of vhost-user-gpu, virgl, memfd etc
if not kvm_available(self.arch, self.qemu_bin):
self.cancel(KVM_NOT_AVAILABLE)
vug = pick_default_vug_bin()
if not vug:
self.cancel("Could not find vhost-user-gpu")
kernel_path = self.fetch_asset(self.KERNEL_URL)
initrd_path = self.fetch_asset(self.INITRD_URL)
# Create socketpair to connect proxy and remote processes
qemu_sock, vug_sock = socket.socketpair(
socket.AF_UNIX, socket.SOCK_STREAM
)
os.set_inheritable(qemu_sock.fileno(), True)
os.set_inheritable(vug_sock.fileno(), True)
self._vug_log_path = os.path.join(
self.logdir, "vhost-user-gpu.log"
)
self._vug_log_file = open(self._vug_log_path, "wb")
self.log.info('Complete vhost-user-gpu.log file can be '
'found at %s', self._vug_log_path)
vugp = subprocess.Popen(
[vug, "--virgl", "--fd=%d" % vug_sock.fileno()],
stdin=subprocess.DEVNULL,
stdout=self._vug_log_file,
stderr=subprocess.STDOUT,
shell=False,
close_fds=False,
)
self.vm.set_console()
self.vm.add_args("-cpu", "host")
self.vm.add_args("-m", "2G")
self.vm.add_args("-object", "memory-backend-memfd,id=mem,size=2G")
self.vm.add_args("-machine", "pc,memory-backend=mem,accel=kvm")
self.vm.add_args("-chardev", "socket,id=vug,fd=%d" % qemu_sock.fileno())
self.vm.add_args("-device", "vhost-user-vga,chardev=vug")
self.vm.add_args("-display", "egl-headless")
self.vm.add_args(
"-kernel",
kernel_path,
"-initrd",
initrd_path,
"-append",
kernel_command_line,
)
self.vm.launch()
self.wait_for_console_pattern("as init process")
exec_command_and_wait_for_pattern(
self, "/usr/sbin/modprobe virtio_gpu", ""
)
self.wait_for_console_pattern("features: +virgl -edid")
self.vm.shutdown()
qemu_sock.close()
vugp.terminate()
vugp.wait() | 0.183411 | 0.063861 |
import mxnet as mx
from mxnet.operator import CustomOp, CustomOpProp
from .util import parse_string_to_tuple
class BroadcastToWithSamplesOp(CustomOp):
def __init__(self, isSamples, shape, **kwargs):
self.isSamples = isSamples
self.shape = shape
super(BroadcastToWithSamplesOp, self).__init__(**kwargs)
def forward(self, is_train, req, in_data, out_data, aux):
a = in_data[0]
n_dim = len(self.shape)
if self.isSamples:
t_shape = (a.shape[0],) + (1,) * (n_dim - len(a.shape)) + a.shape[1:]
else:
t_shape = (1,) * (n_dim - len(a.shape)) + a.shape
a_reshape = mx.nd.reshape(a, shape=t_shape)
out = mx.nd.broadcast_to(a_reshape, shape=self.shape)
self.assign(out_data[0], req[0], out)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
a_shape = in_data[0].shape
if self.isSamples:
grad = mx.nd.reshape(out_grad[0], shape=(a_shape[0], -1,) + a_shape[1:])
a_grad = mx.nd.sum(grad, axis=1)
else:
grad = mx.nd.reshape(out_grad[0], shape=(-1,) + a_shape)
a_grad = mx.nd.sum(grad, axis=0)
self.assign(in_grad[0], req[0], a_grad)
@mx.operator.register("broadcast_to_w_samples")
class BroadcastToWithSamplesOpProp(CustomOpProp):
def __init__(self, **kwargs):
self.isSamples = kwargs['isSamples'].lower() == 'true'
self.shape = parse_string_to_tuple(kwargs['shape'])
super(BroadcastToWithSamplesOpProp, self).__init__(need_top_grad=True)
def list_arguments(self):
return ['data']
def list_outputs(self):
return ['out']
def infer_shape(self, in_shapes):
return in_shapes, (self.shape,), ()
def create_operator(self, ctx, in_shapes, in_dtypes, **kwargs):
return BroadcastToWithSamplesOp(isSamples=self.isSamples,
shape=self.shape, **kwargs)
def broadcast_to_w_samples(F, data, shape, isSamples=True):
if F is mx.nd:
n_dim = len(shape)
if isSamples:
num_samples = max(data.shape[0], shape[0])
t_shape = (data.shape[0],) + (1,) * (n_dim - len(data.shape)) + data.shape[1:]
shape = (num_samples,) + shape[1:]
else:
t_shape = (1,) * (n_dim - len(data.shape)) + data.shape
data_reshape = F.reshape(data, shape=t_shape)
return F.broadcast_to(data_reshape, shape=shape)
else:
return F.Custom(data, op_type="broadcast_to_w_samples",
isSamples=isSamples, shape=shape) | mxfusion/util/customop.py | import mxnet as mx
from mxnet.operator import CustomOp, CustomOpProp
from .util import parse_string_to_tuple
class BroadcastToWithSamplesOp(CustomOp):
def __init__(self, isSamples, shape, **kwargs):
self.isSamples = isSamples
self.shape = shape
super(BroadcastToWithSamplesOp, self).__init__(**kwargs)
def forward(self, is_train, req, in_data, out_data, aux):
a = in_data[0]
n_dim = len(self.shape)
if self.isSamples:
t_shape = (a.shape[0],) + (1,) * (n_dim - len(a.shape)) + a.shape[1:]
else:
t_shape = (1,) * (n_dim - len(a.shape)) + a.shape
a_reshape = mx.nd.reshape(a, shape=t_shape)
out = mx.nd.broadcast_to(a_reshape, shape=self.shape)
self.assign(out_data[0], req[0], out)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
a_shape = in_data[0].shape
if self.isSamples:
grad = mx.nd.reshape(out_grad[0], shape=(a_shape[0], -1,) + a_shape[1:])
a_grad = mx.nd.sum(grad, axis=1)
else:
grad = mx.nd.reshape(out_grad[0], shape=(-1,) + a_shape)
a_grad = mx.nd.sum(grad, axis=0)
self.assign(in_grad[0], req[0], a_grad)
@mx.operator.register("broadcast_to_w_samples")
class BroadcastToWithSamplesOpProp(CustomOpProp):
def __init__(self, **kwargs):
self.isSamples = kwargs['isSamples'].lower() == 'true'
self.shape = parse_string_to_tuple(kwargs['shape'])
super(BroadcastToWithSamplesOpProp, self).__init__(need_top_grad=True)
def list_arguments(self):
return ['data']
def list_outputs(self):
return ['out']
def infer_shape(self, in_shapes):
return in_shapes, (self.shape,), ()
def create_operator(self, ctx, in_shapes, in_dtypes, **kwargs):
return BroadcastToWithSamplesOp(isSamples=self.isSamples,
shape=self.shape, **kwargs)
def broadcast_to_w_samples(F, data, shape, isSamples=True):
if F is mx.nd:
n_dim = len(shape)
if isSamples:
num_samples = max(data.shape[0], shape[0])
t_shape = (data.shape[0],) + (1,) * (n_dim - len(data.shape)) + data.shape[1:]
shape = (num_samples,) + shape[1:]
else:
t_shape = (1,) * (n_dim - len(data.shape)) + data.shape
data_reshape = F.reshape(data, shape=t_shape)
return F.broadcast_to(data_reshape, shape=shape)
else:
return F.Custom(data, op_type="broadcast_to_w_samples",
isSamples=isSamples, shape=shape) | 0.699357 | 0.480113 |
from concurrent.futures import Future, TimeoutError
from functools import partial
from deprecated import deprecated
from enum import Enum
from ... import RouterClient
from ...NotificationHandler import NotificationHandler
from ... import BitMaskTools
from ..messages import ControlConfig_pb2 as ControlConfigPb # NOQA
class ControlConfigFunctionUid(Enum):
uidSetGravityVector = 0x100001
uidGetGravityVector = 0x100002
uidSetPayloadInformation = 0x100003
uidGetPayloadInformation = 0x100004
uidSetToolConfiguration = 0x100005
uidGetToolConfiguration = 0x100006
uidOnNotificationControlConfigurationTopic = 0x100007
uidUnsubscribe = 0x100008
uidSetCartesianReferenceFrame = 0x100009
uidGetCartesianReferenceFrame = 0x10000a
uidGetControlMode = 0x10000d
uidSetJointSpeedSoftLimits = 0x10000e
uidSetTwistLinearSoftLimit = 0x10000f
uidSetTwistAngularSoftLimit = 0x100010
uidSetJointAccelerationSoftLimits = 0x100011
uidGetKinematicHardLimits = 0x100012
uidGetKinematicSoftLimits = 0x100013
uidGetAllKinematicSoftLimits = 0x100014
uidSetDesiredLinearTwist = 0x100015
uidSetDesiredAngularTwist = 0x100016
uidSetDesiredJointSpeeds = 0x100017
uidGetDesiredSpeeds = 0x100018
uidResetGravityVector = 0x100019
uidResetPayloadInformation = 0x10001a
uidResetToolConfiguration = 0x10001b
uidResetJointSpeedSoftLimits = 0x10001c
uidResetTwistLinearSoftLimit = 0x10001d
uidResetTwistAngularSoftLimit = 0x10001e
uidResetJointAccelerationSoftLimits = 0x10001f
class ControlConfigClient():
serviceVersion = 1
serviceId = 16
router = RouterClient.RouterClient
def __init__(self, router: RouterClient.RouterClient):
self.router = router
self.notificationHandler = NotificationHandler()
callback = partial(self.ExecuteRouterNotification)
self.router.registerNotificationCallback(self.serviceId, callback)
def ExecuteRouterNotification(self, message):
self.notificationHandler.call(BitMaskTools.extractFrameId(message.header.message_info), message.payload)
def SetGravityVector(self, gravityvector, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
reqPayload = gravityvector.SerializeToString()
future = self.router.send(reqPayload, 1, ControlConfigFunctionUid.uidSetGravityVector, deviceId, options)
result = future.result(options.getTimeoutInSecond())
def GetGravityVector(self, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
future = self.router.send(None, 1, ControlConfigFunctionUid.uidGetGravityVector, deviceId, options)
ansPayload = ControlConfigPb.GravityVector()
result = future.result(options.getTimeoutInSecond())
ansPayload.ParseFromString(result.payload)
return ansPayload
def SetPayloadInformation(self, payloadinformation, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
reqPayload = payloadinformation.SerializeToString()
future = self.router.send(reqPayload, 1, ControlConfigFunctionUid.uidSetPayloadInformation, deviceId, options)
result = future.result(options.getTimeoutInSecond())
def GetPayloadInformation(self, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
future = self.router.send(None, 1, ControlConfigFunctionUid.uidGetPayloadInformation, deviceId, options)
ansPayload = ControlConfigPb.PayloadInformation()
result = future.result(options.getTimeoutInSecond())
ansPayload.ParseFromString(result.payload)
return ansPayload
def SetToolConfiguration(self, toolconfiguration, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
reqPayload = toolconfiguration.SerializeToString()
future = self.router.send(reqPayload, 1, ControlConfigFunctionUid.uidSetToolConfiguration, deviceId, options)
result = future.result(options.getTimeoutInSecond())
def GetToolConfiguration(self, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
future = self.router.send(None, 1, ControlConfigFunctionUid.uidGetToolConfiguration, deviceId, options)
ansPayload = ControlConfigPb.ToolConfiguration()
result = future.result(options.getTimeoutInSecond())
ansPayload.ParseFromString(result.payload)
return ansPayload
def OnNotificationControlConfigurationTopic(self, callback, notificationoptions, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
reqPayload = notificationoptions.SerializeToString()
future = self.router.send(reqPayload, 1, ControlConfigFunctionUid.uidOnNotificationControlConfigurationTopic, deviceId, options)
ansPayload = ControlConfigPb.NotificationHandle()
result = future.result(options.getTimeoutInSecond())
ansPayload.ParseFromString(result.payload)
def parseNotifDataFromString(payload):
obj = ControlConfigPb.ControlConfigurationNotification()
obj.ParseFromString(payload)
return obj
self.notificationHandler.addCallback(ansPayload.identifier, parseNotifDataFromString, callback)
return ansPayload
def Unsubscribe(self, notificationhandle, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
reqPayload = notificationhandle.SerializeToString()
future = self.router.send(reqPayload, 1, ControlConfigFunctionUid.uidUnsubscribe, deviceId, options)
result = future.result(options.getTimeoutInSecond())
def SetCartesianReferenceFrame(self, cartesianreferenceframeinfo, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
reqPayload = cartesianreferenceframeinfo.SerializeToString()
future = self.router.send(reqPayload, 1, ControlConfigFunctionUid.uidSetCartesianReferenceFrame, deviceId, options)
result = future.result(options.getTimeoutInSecond())
def GetCartesianReferenceFrame(self, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
future = self.router.send(None, 1, ControlConfigFunctionUid.uidGetCartesianReferenceFrame, deviceId, options)
ansPayload = ControlConfigPb.CartesianReferenceFrameInfo()
result = future.result(options.getTimeoutInSecond())
ansPayload.ParseFromString(result.payload)
return ansPayload
def GetControlMode(self, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
future = self.router.send(None, 1, ControlConfigFunctionUid.uidGetControlMode, deviceId, options)
ansPayload = ControlConfigPb.ControlModeInformation()
result = future.result(options.getTimeoutInSecond())
ansPayload.ParseFromString(result.payload)
return ansPayload
def SetJointSpeedSoftLimits(self, jointspeedsoftlimits, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
reqPayload = jointspeedsoftlimits.SerializeToString()
future = self.router.send(reqPayload, 1, ControlConfigFunctionUid.uidSetJointSpeedSoftLimits, deviceId, options)
result = future.result(options.getTimeoutInSecond())
def SetTwistLinearSoftLimit(self, twistlinearsoftlimit, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
reqPayload = twistlinearsoftlimit.SerializeToString()
future = self.router.send(reqPayload, 1, ControlConfigFunctionUid.uidSetTwistLinearSoftLimit, deviceId, options)
result = future.result(options.getTimeoutInSecond())
def SetTwistAngularSoftLimit(self, twistangularsoftlimit, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
reqPayload = twistangularsoftlimit.SerializeToString()
future = self.router.send(reqPayload, 1, ControlConfigFunctionUid.uidSetTwistAngularSoftLimit, deviceId, options)
result = future.result(options.getTimeoutInSecond())
def SetJointAccelerationSoftLimits(self, jointaccelerationsoftlimits, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
reqPayload = jointaccelerationsoftlimits.SerializeToString()
future = self.router.send(reqPayload, 1, ControlConfigFunctionUid.uidSetJointAccelerationSoftLimits, deviceId, options)
result = future.result(options.getTimeoutInSecond())
def GetKinematicHardLimits(self, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
future = self.router.send(None, 1, ControlConfigFunctionUid.uidGetKinematicHardLimits, deviceId, options)
ansPayload = ControlConfigPb.KinematicLimits()
result = future.result(options.getTimeoutInSecond())
ansPayload.ParseFromString(result.payload)
return ansPayload
def GetKinematicSoftLimits(self, controlmodeinformation, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
reqPayload = controlmodeinformation.SerializeToString()
future = self.router.send(reqPayload, 1, ControlConfigFunctionUid.uidGetKinematicSoftLimits, deviceId, options)
ansPayload = ControlConfigPb.KinematicLimits()
result = future.result(options.getTimeoutInSecond())
ansPayload.ParseFromString(result.payload)
return ansPayload
def GetAllKinematicSoftLimits(self, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
future = self.router.send(None, 1, ControlConfigFunctionUid.uidGetAllKinematicSoftLimits, deviceId, options)
ansPayload = ControlConfigPb.KinematicLimitsList()
result = future.result(options.getTimeoutInSecond())
ansPayload.ParseFromString(result.payload)
return ansPayload
def SetDesiredLinearTwist(self, lineartwist, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
reqPayload = lineartwist.SerializeToString()
future = self.router.send(reqPayload, 1, ControlConfigFunctionUid.uidSetDesiredLinearTwist, deviceId, options)
result = future.result(options.getTimeoutInSecond())
def SetDesiredAngularTwist(self, angulartwist, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
reqPayload = angulartwist.SerializeToString()
future = self.router.send(reqPayload, 1, ControlConfigFunctionUid.uidSetDesiredAngularTwist, deviceId, options)
result = future.result(options.getTimeoutInSecond())
def SetDesiredJointSpeeds(self, jointspeeds, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
reqPayload = jointspeeds.SerializeToString()
future = self.router.send(reqPayload, 1, ControlConfigFunctionUid.uidSetDesiredJointSpeeds, deviceId, options)
result = future.result(options.getTimeoutInSecond())
def GetDesiredSpeeds(self, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
future = self.router.send(None, 1, ControlConfigFunctionUid.uidGetDesiredSpeeds, deviceId, options)
ansPayload = ControlConfigPb.DesiredSpeeds()
result = future.result(options.getTimeoutInSecond())
ansPayload.ParseFromString(result.payload)
return ansPayload
def ResetGravityVector(self, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
future = self.router.send(None, 1, ControlConfigFunctionUid.uidResetGravityVector, deviceId, options)
ansPayload = ControlConfigPb.GravityVector()
result = future.result(options.getTimeoutInSecond())
ansPayload.ParseFromString(result.payload)
return ansPayload
def ResetPayloadInformation(self, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
future = self.router.send(None, 1, ControlConfigFunctionUid.uidResetPayloadInformation, deviceId, options)
ansPayload = ControlConfigPb.PayloadInformation()
result = future.result(options.getTimeoutInSecond())
ansPayload.ParseFromString(result.payload)
return ansPayload
def ResetToolConfiguration(self, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
future = self.router.send(None, 1, ControlConfigFunctionUid.uidResetToolConfiguration, deviceId, options)
ansPayload = ControlConfigPb.ToolConfiguration()
result = future.result(options.getTimeoutInSecond())
ansPayload.ParseFromString(result.payload)
return ansPayload
def ResetJointSpeedSoftLimits(self, controlmodeinformation, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
reqPayload = controlmodeinformation.SerializeToString()
future = self.router.send(reqPayload, 1, ControlConfigFunctionUid.uidResetJointSpeedSoftLimits, deviceId, options)
ansPayload = ControlConfigPb.JointSpeedSoftLimits()
result = future.result(options.getTimeoutInSecond())
ansPayload.ParseFromString(result.payload)
return ansPayload
def ResetTwistLinearSoftLimit(self, controlmodeinformation, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
reqPayload = controlmodeinformation.SerializeToString()
future = self.router.send(reqPayload, 1, ControlConfigFunctionUid.uidResetTwistLinearSoftLimit, deviceId, options)
ansPayload = ControlConfigPb.TwistLinearSoftLimit()
result = future.result(options.getTimeoutInSecond())
ansPayload.ParseFromString(result.payload)
return ansPayload
def ResetTwistAngularSoftLimit(self, controlmodeinformation, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
reqPayload = controlmodeinformation.SerializeToString()
future = self.router.send(reqPayload, 1, ControlConfigFunctionUid.uidResetTwistAngularSoftLimit, deviceId, options)
ansPayload = ControlConfigPb.TwistAngularSoftLimit()
result = future.result(options.getTimeoutInSecond())
ansPayload.ParseFromString(result.payload)
return ansPayload
def ResetJointAccelerationSoftLimits(self, controlmodeinformation, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
reqPayload = controlmodeinformation.SerializeToString()
future = self.router.send(reqPayload, 1, ControlConfigFunctionUid.uidResetJointAccelerationSoftLimits, deviceId, options)
ansPayload = ControlConfigPb.JointAccelerationSoftLimits()
result = future.result(options.getTimeoutInSecond())
ansPayload.ParseFromString(result.payload)
return ansPayload | kortex_api/autogen/client_stubs/ControlConfigClientRpc.py | from concurrent.futures import Future, TimeoutError
from functools import partial
from deprecated import deprecated
from enum import Enum
from ... import RouterClient
from ...NotificationHandler import NotificationHandler
from ... import BitMaskTools
from ..messages import ControlConfig_pb2 as ControlConfigPb # NOQA
class ControlConfigFunctionUid(Enum):
uidSetGravityVector = 0x100001
uidGetGravityVector = 0x100002
uidSetPayloadInformation = 0x100003
uidGetPayloadInformation = 0x100004
uidSetToolConfiguration = 0x100005
uidGetToolConfiguration = 0x100006
uidOnNotificationControlConfigurationTopic = 0x100007
uidUnsubscribe = 0x100008
uidSetCartesianReferenceFrame = 0x100009
uidGetCartesianReferenceFrame = 0x10000a
uidGetControlMode = 0x10000d
uidSetJointSpeedSoftLimits = 0x10000e
uidSetTwistLinearSoftLimit = 0x10000f
uidSetTwistAngularSoftLimit = 0x100010
uidSetJointAccelerationSoftLimits = 0x100011
uidGetKinematicHardLimits = 0x100012
uidGetKinematicSoftLimits = 0x100013
uidGetAllKinematicSoftLimits = 0x100014
uidSetDesiredLinearTwist = 0x100015
uidSetDesiredAngularTwist = 0x100016
uidSetDesiredJointSpeeds = 0x100017
uidGetDesiredSpeeds = 0x100018
uidResetGravityVector = 0x100019
uidResetPayloadInformation = 0x10001a
uidResetToolConfiguration = 0x10001b
uidResetJointSpeedSoftLimits = 0x10001c
uidResetTwistLinearSoftLimit = 0x10001d
uidResetTwistAngularSoftLimit = 0x10001e
uidResetJointAccelerationSoftLimits = 0x10001f
class ControlConfigClient():
serviceVersion = 1
serviceId = 16
router = RouterClient.RouterClient
def __init__(self, router: RouterClient.RouterClient):
self.router = router
self.notificationHandler = NotificationHandler()
callback = partial(self.ExecuteRouterNotification)
self.router.registerNotificationCallback(self.serviceId, callback)
def ExecuteRouterNotification(self, message):
self.notificationHandler.call(BitMaskTools.extractFrameId(message.header.message_info), message.payload)
def SetGravityVector(self, gravityvector, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
reqPayload = gravityvector.SerializeToString()
future = self.router.send(reqPayload, 1, ControlConfigFunctionUid.uidSetGravityVector, deviceId, options)
result = future.result(options.getTimeoutInSecond())
def GetGravityVector(self, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
future = self.router.send(None, 1, ControlConfigFunctionUid.uidGetGravityVector, deviceId, options)
ansPayload = ControlConfigPb.GravityVector()
result = future.result(options.getTimeoutInSecond())
ansPayload.ParseFromString(result.payload)
return ansPayload
def SetPayloadInformation(self, payloadinformation, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
reqPayload = payloadinformation.SerializeToString()
future = self.router.send(reqPayload, 1, ControlConfigFunctionUid.uidSetPayloadInformation, deviceId, options)
result = future.result(options.getTimeoutInSecond())
def GetPayloadInformation(self, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
future = self.router.send(None, 1, ControlConfigFunctionUid.uidGetPayloadInformation, deviceId, options)
ansPayload = ControlConfigPb.PayloadInformation()
result = future.result(options.getTimeoutInSecond())
ansPayload.ParseFromString(result.payload)
return ansPayload
def SetToolConfiguration(self, toolconfiguration, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
reqPayload = toolconfiguration.SerializeToString()
future = self.router.send(reqPayload, 1, ControlConfigFunctionUid.uidSetToolConfiguration, deviceId, options)
result = future.result(options.getTimeoutInSecond())
def GetToolConfiguration(self, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
future = self.router.send(None, 1, ControlConfigFunctionUid.uidGetToolConfiguration, deviceId, options)
ansPayload = ControlConfigPb.ToolConfiguration()
result = future.result(options.getTimeoutInSecond())
ansPayload.ParseFromString(result.payload)
return ansPayload
def OnNotificationControlConfigurationTopic(self, callback, notificationoptions, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
reqPayload = notificationoptions.SerializeToString()
future = self.router.send(reqPayload, 1, ControlConfigFunctionUid.uidOnNotificationControlConfigurationTopic, deviceId, options)
ansPayload = ControlConfigPb.NotificationHandle()
result = future.result(options.getTimeoutInSecond())
ansPayload.ParseFromString(result.payload)
def parseNotifDataFromString(payload):
obj = ControlConfigPb.ControlConfigurationNotification()
obj.ParseFromString(payload)
return obj
self.notificationHandler.addCallback(ansPayload.identifier, parseNotifDataFromString, callback)
return ansPayload
def Unsubscribe(self, notificationhandle, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
reqPayload = notificationhandle.SerializeToString()
future = self.router.send(reqPayload, 1, ControlConfigFunctionUid.uidUnsubscribe, deviceId, options)
result = future.result(options.getTimeoutInSecond())
def SetCartesianReferenceFrame(self, cartesianreferenceframeinfo, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
reqPayload = cartesianreferenceframeinfo.SerializeToString()
future = self.router.send(reqPayload, 1, ControlConfigFunctionUid.uidSetCartesianReferenceFrame, deviceId, options)
result = future.result(options.getTimeoutInSecond())
def GetCartesianReferenceFrame(self, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
future = self.router.send(None, 1, ControlConfigFunctionUid.uidGetCartesianReferenceFrame, deviceId, options)
ansPayload = ControlConfigPb.CartesianReferenceFrameInfo()
result = future.result(options.getTimeoutInSecond())
ansPayload.ParseFromString(result.payload)
return ansPayload
def GetControlMode(self, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
future = self.router.send(None, 1, ControlConfigFunctionUid.uidGetControlMode, deviceId, options)
ansPayload = ControlConfigPb.ControlModeInformation()
result = future.result(options.getTimeoutInSecond())
ansPayload.ParseFromString(result.payload)
return ansPayload
def SetJointSpeedSoftLimits(self, jointspeedsoftlimits, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
reqPayload = jointspeedsoftlimits.SerializeToString()
future = self.router.send(reqPayload, 1, ControlConfigFunctionUid.uidSetJointSpeedSoftLimits, deviceId, options)
result = future.result(options.getTimeoutInSecond())
def SetTwistLinearSoftLimit(self, twistlinearsoftlimit, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
reqPayload = twistlinearsoftlimit.SerializeToString()
future = self.router.send(reqPayload, 1, ControlConfigFunctionUid.uidSetTwistLinearSoftLimit, deviceId, options)
result = future.result(options.getTimeoutInSecond())
def SetTwistAngularSoftLimit(self, twistangularsoftlimit, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
reqPayload = twistangularsoftlimit.SerializeToString()
future = self.router.send(reqPayload, 1, ControlConfigFunctionUid.uidSetTwistAngularSoftLimit, deviceId, options)
result = future.result(options.getTimeoutInSecond())
def SetJointAccelerationSoftLimits(self, jointaccelerationsoftlimits, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
reqPayload = jointaccelerationsoftlimits.SerializeToString()
future = self.router.send(reqPayload, 1, ControlConfigFunctionUid.uidSetJointAccelerationSoftLimits, deviceId, options)
result = future.result(options.getTimeoutInSecond())
def GetKinematicHardLimits(self, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
future = self.router.send(None, 1, ControlConfigFunctionUid.uidGetKinematicHardLimits, deviceId, options)
ansPayload = ControlConfigPb.KinematicLimits()
result = future.result(options.getTimeoutInSecond())
ansPayload.ParseFromString(result.payload)
return ansPayload
def GetKinematicSoftLimits(self, controlmodeinformation, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
reqPayload = controlmodeinformation.SerializeToString()
future = self.router.send(reqPayload, 1, ControlConfigFunctionUid.uidGetKinematicSoftLimits, deviceId, options)
ansPayload = ControlConfigPb.KinematicLimits()
result = future.result(options.getTimeoutInSecond())
ansPayload.ParseFromString(result.payload)
return ansPayload
def GetAllKinematicSoftLimits(self, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
future = self.router.send(None, 1, ControlConfigFunctionUid.uidGetAllKinematicSoftLimits, deviceId, options)
ansPayload = ControlConfigPb.KinematicLimitsList()
result = future.result(options.getTimeoutInSecond())
ansPayload.ParseFromString(result.payload)
return ansPayload
def SetDesiredLinearTwist(self, lineartwist, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
reqPayload = lineartwist.SerializeToString()
future = self.router.send(reqPayload, 1, ControlConfigFunctionUid.uidSetDesiredLinearTwist, deviceId, options)
result = future.result(options.getTimeoutInSecond())
def SetDesiredAngularTwist(self, angulartwist, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
reqPayload = angulartwist.SerializeToString()
future = self.router.send(reqPayload, 1, ControlConfigFunctionUid.uidSetDesiredAngularTwist, deviceId, options)
result = future.result(options.getTimeoutInSecond())
def SetDesiredJointSpeeds(self, jointspeeds, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
reqPayload = jointspeeds.SerializeToString()
future = self.router.send(reqPayload, 1, ControlConfigFunctionUid.uidSetDesiredJointSpeeds, deviceId, options)
result = future.result(options.getTimeoutInSecond())
def GetDesiredSpeeds(self, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
future = self.router.send(None, 1, ControlConfigFunctionUid.uidGetDesiredSpeeds, deviceId, options)
ansPayload = ControlConfigPb.DesiredSpeeds()
result = future.result(options.getTimeoutInSecond())
ansPayload.ParseFromString(result.payload)
return ansPayload
def ResetGravityVector(self, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
future = self.router.send(None, 1, ControlConfigFunctionUid.uidResetGravityVector, deviceId, options)
ansPayload = ControlConfigPb.GravityVector()
result = future.result(options.getTimeoutInSecond())
ansPayload.ParseFromString(result.payload)
return ansPayload
def ResetPayloadInformation(self, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
future = self.router.send(None, 1, ControlConfigFunctionUid.uidResetPayloadInformation, deviceId, options)
ansPayload = ControlConfigPb.PayloadInformation()
result = future.result(options.getTimeoutInSecond())
ansPayload.ParseFromString(result.payload)
return ansPayload
def ResetToolConfiguration(self, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
future = self.router.send(None, 1, ControlConfigFunctionUid.uidResetToolConfiguration, deviceId, options)
ansPayload = ControlConfigPb.ToolConfiguration()
result = future.result(options.getTimeoutInSecond())
ansPayload.ParseFromString(result.payload)
return ansPayload
def ResetJointSpeedSoftLimits(self, controlmodeinformation, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
reqPayload = controlmodeinformation.SerializeToString()
future = self.router.send(reqPayload, 1, ControlConfigFunctionUid.uidResetJointSpeedSoftLimits, deviceId, options)
ansPayload = ControlConfigPb.JointSpeedSoftLimits()
result = future.result(options.getTimeoutInSecond())
ansPayload.ParseFromString(result.payload)
return ansPayload
def ResetTwistLinearSoftLimit(self, controlmodeinformation, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
reqPayload = controlmodeinformation.SerializeToString()
future = self.router.send(reqPayload, 1, ControlConfigFunctionUid.uidResetTwistLinearSoftLimit, deviceId, options)
ansPayload = ControlConfigPb.TwistLinearSoftLimit()
result = future.result(options.getTimeoutInSecond())
ansPayload.ParseFromString(result.payload)
return ansPayload
def ResetTwistAngularSoftLimit(self, controlmodeinformation, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
reqPayload = controlmodeinformation.SerializeToString()
future = self.router.send(reqPayload, 1, ControlConfigFunctionUid.uidResetTwistAngularSoftLimit, deviceId, options)
ansPayload = ControlConfigPb.TwistAngularSoftLimit()
result = future.result(options.getTimeoutInSecond())
ansPayload.ParseFromString(result.payload)
return ansPayload
def ResetJointAccelerationSoftLimits(self, controlmodeinformation, deviceId = 0, options = RouterClient.RouterClientSendOptions()):
reqPayload = controlmodeinformation.SerializeToString()
future = self.router.send(reqPayload, 1, ControlConfigFunctionUid.uidResetJointAccelerationSoftLimits, deviceId, options)
ansPayload = ControlConfigPb.JointAccelerationSoftLimits()
result = future.result(options.getTimeoutInSecond())
ansPayload.ParseFromString(result.payload)
return ansPayload | 0.569733 | 0.132767 |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import pickle
import pandas as pd
POI_FILENAME = "data/poi-paris.pkl"
parismap = mpimg.imread('data/paris-48.806-2.23--48.916-2.48.jpg')
## coordonnees GPS de la carte
xmin, xmax = 2.23, 2.48 # coord_x min et max
ymin, ymax = 48.806, 48.916 # coord_y min et max
coords = [xmin, xmax, ymin, ymax]
class Density(object):
def fit(self,data):
pass
def predict(self,data):
pass
def score(self,data):
#A compléter : retourne la log-vraisemblance
pass
class Histogramme(Density):
def __init__(self,steps=10):
Density.__init__(self)
self.steps = steps
def fit(self,x):
#A compléter : apprend l'histogramme de la densité sur x
pass
def predict(self,x):
#A compléter : retourne la densité associée à chaque point de x
pass
class KernelDensity(Density):
def __init__(self,kernel=None,sigma=0.1):
Density.__init__(self)
self.kernel = kernel
self.sigma = sigma
def fit(self,x):
self.x = x
def predict(self,data):
#A compléter : retourne la densité associée à chaque point de data
pass
def get_density2D(f,data,steps=100):
""" Calcule la densité en chaque case d'une grille steps x steps dont les bornes sont calculées à partir du min/max de data. Renvoie la grille estimée et la discrétisation sur chaque axe.
"""
xmin, xmax = data[:,0].min(), data[:,0].max()
ymin, ymax = data[:,1].min(), data[:,1].max()
xlin,ylin = np.linspace(xmin,xmax,steps),np.linspace(ymin,ymax,steps)
xx, yy = np.meshgrid(xlin,ylin)
grid = np.c_[xx.ravel(), yy.ravel()]
res = f.predict(grid).reshape(steps, steps)
return res, xlin, ylin
def show_density(f, data, steps=100, log=False):
""" Dessine la densité f et ses courbes de niveau sur une grille 2D calculée à partir de data, avec un pas de discrétisation de steps. Le paramètre log permet d'afficher la log densité plutôt que la densité brute
"""
res, xlin, ylin = get_density2D(f, data, steps)
xx, yy = np.meshgrid(xlin, ylin)
plt.figure()
show_img()
if log:
res = np.log(res+1e-10)
plt.scatter(data[:, 0], data[:, 1], alpha=0.8, s=3)
show_img(res)
plt.colorbar()
plt.contour(xx, yy, res, 20)
def show_img(img=parismap):
""" Affiche une matrice ou une image selon les coordonnées de la carte de Paris.
"""
origin = "lower" if len(img.shape) == 2 else "upper"
alpha = 0.3 if len(img.shape) == 2 else 1.
plt.imshow(img, extent=coords, aspect=1.5, origin=origin, alpha=alpha)
## extent pour controler l'echelle du plan
def load_poi(typepoi,fn=POI_FILENAME):
""" Dictionaire POI, clé : type de POI, valeur : dictionnaire des POIs de ce type : (id_POI, [coordonnées, note, nom, type, prix])
Liste des POIs : furniture_store, laundry, bakery, cafe, home_goods_store,
clothing_store, atm, lodging, night_club, convenience_store, restaurant, bar
"""
poidata = pickle.load(open(fn, "rb"))
data = np.array([[v[1][0][1],v[1][0][0]] for v in sorted(poidata[typepoi].items())])
note = np.array([v[1][1] for v in sorted(poidata[typepoi].items())])
return data,note
plt.ion()
# Liste des POIs : furniture_store, laundry, bakery, cafe, home_goods_store, clothing_store, atm, lodging, night_club, convenience_store, restaurant, bar
# La fonction charge la localisation des POIs dans geo_mat et leur note.
geo_mat, notes = load_poi("bar")
# Affiche la carte de Paris
show_img()
# Affiche les POIs
plt.scatter(geo_mat[:,0],geo_mat[:,1],alpha=0.8,s=3) | S2/ML/TME/TME_dam/tme2/tme2.py |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import pickle
import pandas as pd
POI_FILENAME = "data/poi-paris.pkl"
parismap = mpimg.imread('data/paris-48.806-2.23--48.916-2.48.jpg')
## coordonnees GPS de la carte
xmin, xmax = 2.23, 2.48 # coord_x min et max
ymin, ymax = 48.806, 48.916 # coord_y min et max
coords = [xmin, xmax, ymin, ymax]
class Density(object):
def fit(self,data):
pass
def predict(self,data):
pass
def score(self,data):
#A compléter : retourne la log-vraisemblance
pass
class Histogramme(Density):
def __init__(self,steps=10):
Density.__init__(self)
self.steps = steps
def fit(self,x):
#A compléter : apprend l'histogramme de la densité sur x
pass
def predict(self,x):
#A compléter : retourne la densité associée à chaque point de x
pass
class KernelDensity(Density):
def __init__(self,kernel=None,sigma=0.1):
Density.__init__(self)
self.kernel = kernel
self.sigma = sigma
def fit(self,x):
self.x = x
def predict(self,data):
#A compléter : retourne la densité associée à chaque point de data
pass
def get_density2D(f,data,steps=100):
""" Calcule la densité en chaque case d'une grille steps x steps dont les bornes sont calculées à partir du min/max de data. Renvoie la grille estimée et la discrétisation sur chaque axe.
"""
xmin, xmax = data[:,0].min(), data[:,0].max()
ymin, ymax = data[:,1].min(), data[:,1].max()
xlin,ylin = np.linspace(xmin,xmax,steps),np.linspace(ymin,ymax,steps)
xx, yy = np.meshgrid(xlin,ylin)
grid = np.c_[xx.ravel(), yy.ravel()]
res = f.predict(grid).reshape(steps, steps)
return res, xlin, ylin
def show_density(f, data, steps=100, log=False):
""" Dessine la densité f et ses courbes de niveau sur une grille 2D calculée à partir de data, avec un pas de discrétisation de steps. Le paramètre log permet d'afficher la log densité plutôt que la densité brute
"""
res, xlin, ylin = get_density2D(f, data, steps)
xx, yy = np.meshgrid(xlin, ylin)
plt.figure()
show_img()
if log:
res = np.log(res+1e-10)
plt.scatter(data[:, 0], data[:, 1], alpha=0.8, s=3)
show_img(res)
plt.colorbar()
plt.contour(xx, yy, res, 20)
def show_img(img=parismap):
""" Affiche une matrice ou une image selon les coordonnées de la carte de Paris.
"""
origin = "lower" if len(img.shape) == 2 else "upper"
alpha = 0.3 if len(img.shape) == 2 else 1.
plt.imshow(img, extent=coords, aspect=1.5, origin=origin, alpha=alpha)
## extent pour controler l'echelle du plan
def load_poi(typepoi,fn=POI_FILENAME):
""" Dictionaire POI, clé : type de POI, valeur : dictionnaire des POIs de ce type : (id_POI, [coordonnées, note, nom, type, prix])
Liste des POIs : furniture_store, laundry, bakery, cafe, home_goods_store,
clothing_store, atm, lodging, night_club, convenience_store, restaurant, bar
"""
poidata = pickle.load(open(fn, "rb"))
data = np.array([[v[1][0][1],v[1][0][0]] for v in sorted(poidata[typepoi].items())])
note = np.array([v[1][1] for v in sorted(poidata[typepoi].items())])
return data,note
plt.ion()
# Liste des POIs : furniture_store, laundry, bakery, cafe, home_goods_store, clothing_store, atm, lodging, night_club, convenience_store, restaurant, bar
# La fonction charge la localisation des POIs dans geo_mat et leur note.
geo_mat, notes = load_poi("bar")
# Affiche la carte de Paris
show_img()
# Affiche les POIs
plt.scatter(geo_mat[:,0],geo_mat[:,1],alpha=0.8,s=3) | 0.403097 | 0.596521 |
import six
from zope.interface import implementer
from zope.interface.verify import verifyObject
from twisted.python.failure import Failure
from twisted.protocols.amp import IBoxReceiver, IBoxSender
from twisted.trial.unittest import TestCase
from epsilon.amprouter import _ROUTE, RouteNotConnected, Router
@implementer(IBoxReceiver)
class SomeReceiver:
"""
A stub AMP box receiver which just keeps track of whether it has been
started or stopped and what boxes have been delivered to it.
@ivar sender: C{None} until C{startReceivingBoxes} is called, then a
reference to the L{IBoxSender} passed to that method.
@ivar reason: C{None} until {stopReceivingBoxes} is called, then a
reference to the L{Failure} passed to that method.
@ivar started: C{False} until C{startReceivingBoxes} is called, then
C{True}.
@ivar stopped: C{False} until C{stopReceivingBoxes} is called, then
C{True}.
"""
sender = None
reason = None
started = False
stopped = False
def __init__(self):
self.boxes = []
def startReceivingBoxes(self, sender):
self.started = True
self.sender = sender
def ampBoxReceived(self, box):
if self.started and not self.stopped:
self.boxes.append(box)
def stopReceivingBoxes(self, reason):
self.stopped = True
self.reason = reason
@implementer(IBoxSender)
class CollectingSender:
"""
An L{IBoxSender} which collects and saves boxes and errors sent to it.
"""
def __init__(self):
self.boxes = []
self.errors = []
def sendBox(self, box):
"""
Reject boxes with non-string keys or values; save all the rest in
C{self.boxes}.
"""
serial_types = (six.text_type, six.binary_type)
for k, v in six.viewitems(box):
if not (isinstance(k, serial_types) and isinstance(v, serial_types)):
raise TypeError("Cannot send boxes containing non-strings")
self.boxes.append(box)
def unhandledError(self, failure):
self.errors.append(failure.getErrorMessage())
class RouteTests(TestCase):
"""
Tests for L{Route}, the L{IBoxSender} which handles adding routing
information to outgoing boxes.
"""
def setUp(self):
"""
Create a route attached to a stub sender.
"""
self.receiver = SomeReceiver()
self.sender = CollectingSender()
self.localName = u"foo"
self.remoteName = u"bar"
self.router = Router()
self.router.startReceivingBoxes(self.sender)
self.route = self.router.bindRoute(self.receiver, self.localName)
def test_interfaces(self):
"""
L{Route} instances provide L{IBoxSender}.
"""
self.assertTrue(verifyObject(IBoxSender, self.route))
def test_start(self):
"""
L{Route.start} starts its L{IBoxReceiver}.
"""
self.assertFalse(self.receiver.started)
self.route.start()
self.assertTrue(self.receiver.started)
self.assertIdentical(self.receiver.sender, self.route)
def test_stop(self):
"""
L{Route.stop} stops its L{IBoxReceiver}.
"""
self.route.start()
self.assertFalse(self.receiver.stopped)
self.route.stop(Failure(RuntimeError("foo")))
self.assertTrue(self.receiver.stopped)
self.receiver.reason.trap(RuntimeError)
def test_sendBox(self):
"""
L{Route.sendBox} adds the route name to the box before passing it on to
the underlying sender.
"""
self.route.connectTo(self.remoteName)
self.route.sendBox({"foo": "bar"})
self.assertEqual(
self.sender.boxes, [{_ROUTE: self.remoteName, "foo": "bar"}])
def test_sendUnroutedBox(self):
"""
If C{Route.connectTo} is called with C{None}, no route name is added to
the outgoing box.
"""
self.route.connectTo(None)
self.route.sendBox({"foo": "bar"})
self.assertEqual(
self.sender.boxes, [{"foo": "bar"}])
def test_sendBoxWithoutConnection(self):
"""
L{Route.sendBox} raises L{RouteNotConnected} if called before the
L{Route} is connected to a remote route name.
"""
self.assertRaises(
RouteNotConnected, self.route.sendBox, {'foo': 'bar'})
def test_unbind(self):
"""
L{Route.unbind} removes the route from its router.
"""
self.route.unbind()
self.assertRaises(
KeyError, self.router.ampBoxReceived, {_ROUTE: self.localName})
class RouterTests(TestCase):
"""
Tests for L{Router}, the L{IBoxReceiver} which directs routed AMP boxes to
the right object.
"""
def setUp(self):
"""
Create sender, router, receiver, and route objects.
"""
self.sender = CollectingSender()
self.router = Router()
self.router.startReceivingBoxes(self.sender)
self.receiver = SomeReceiver()
self.route = self.router.bindRoute(self.receiver)
self.route.connectTo(u"foo")
def test_interfaces(self):
"""
L{Router} instances provide L{IBoxReceiver}.
"""
self.assertTrue(verifyObject(IBoxReceiver, self.router))
def test_uniqueRoutes(self):
"""
L{Router.createRouteIdentifier} returns a new, different route
identifier on each call.
"""
identifiers = [self.router.createRouteIdentifier() for x in range(10)]
self.assertEqual(len(set(identifiers)), len(identifiers))
def test_bind(self):
"""
L{Router.bind} returns a new L{Route} instance which will send boxes to
the L{Route}'s L{IBoxSender} after adding a C{_ROUTE} key to them.
"""
self.route.sendBox({'foo': 'bar'})
self.assertEqual(
self.sender.boxes,
[{_ROUTE: self.route.remoteRouteName, 'foo': 'bar'}])
self.route.unhandledError(Failure(Exception("some test exception")))
self.assertEqual(
self.sender.errors, ["some test exception"])
def test_bindBeforeStart(self):
"""
If a L{Route} is created with L{Router.bind} before the L{Router} is
started with L{Router.startReceivingBoxes}, the L{Route} is created
unstarted and only started when the L{Router} is started.
"""
router = Router()
receiver = SomeReceiver()
route = router.bindRoute(receiver)
route.connectTo(u'quux')
self.assertFalse(receiver.started)
sender = CollectingSender()
router.startReceivingBoxes(sender)
self.assertTrue(receiver.started)
route.sendBox({'foo': 'bar'})
self.assertEqual(
sender.boxes, [{_ROUTE: route.remoteRouteName, 'foo': 'bar'}])
router.ampBoxReceived({_ROUTE: route.localRouteName, 'baz': 'quux'})
self.assertEqual(receiver.boxes, [{'baz': 'quux'}])
def test_bindBeforeStartFinishAfterStart(self):
"""
If a L{Route} is created with L{Router.connect} before the L{Router} is
started with L{Router.startReceivingBoxes} but the Deferred returned by
the connect thunk does not fire until after the router is started, the
L{IBoxReceiver} associated with the route is not started until that
Deferred fires and the route is associated with a remote route name.
"""
router = Router()
receiver = SomeReceiver()
route = router.bindRoute(receiver)
sender = CollectingSender()
router.startReceivingBoxes(sender)
self.assertFalse(receiver.started)
route.connectTo(u"remoteName")
self.assertTrue(receiver.started)
receiver.sender.sendBox({'foo': 'bar'})
self.assertEqual(sender.boxes, [{_ROUTE: 'remoteName', 'foo': 'bar'}])
def test_ampBoxReceived(self):
"""
L{Router.ampBoxReceived} passes on AMP boxes to the L{IBoxReceiver}
identified by the route key in the box.
"""
firstReceiver = SomeReceiver()
firstRoute = self.router.bindRoute(firstReceiver)
firstRoute.start()
secondReceiver = SomeReceiver()
secondRoute = self.router.bindRoute(secondReceiver)
secondRoute.start()
self.router.ampBoxReceived(
{_ROUTE: firstRoute.localRouteName, 'foo': 'bar'})
self.router.ampBoxReceived(
{_ROUTE: secondRoute.localRouteName, 'baz': 'quux'})
self.assertEqual(firstReceiver.boxes, [{'foo': 'bar'}])
self.assertEqual(secondReceiver.boxes, [{'baz': 'quux'}])
def test_ampBoxReceivedDefaultRoute(self):
"""
L{Router.ampBoxReceived} delivers boxes with no route to the default
box receiver.
"""
sender = CollectingSender()
receiver = SomeReceiver()
router = Router()
router.startReceivingBoxes(sender)
router.bindRoute(receiver, None).start()
router.ampBoxReceived({'foo': 'bar'})
self.assertEqual(receiver.boxes, [{'foo': 'bar'}])
def test_stopReceivingBoxes(self):
"""
L{Router.stopReceivingBoxes} calls the C{stop} method of each connected
route.
"""
sender = CollectingSender()
router = Router()
router.startReceivingBoxes(sender)
receiver = SomeReceiver()
router.bindRoute(receiver)
class DummyException(Exception):
pass
self.assertFalse(receiver.stopped)
router.stopReceivingBoxes(Failure(DummyException()))
self.assertTrue(receiver.stopped)
receiver.reason.trap(DummyException) | epsilon/test/test_amprouter.py | import six
from zope.interface import implementer
from zope.interface.verify import verifyObject
from twisted.python.failure import Failure
from twisted.protocols.amp import IBoxReceiver, IBoxSender
from twisted.trial.unittest import TestCase
from epsilon.amprouter import _ROUTE, RouteNotConnected, Router
@implementer(IBoxReceiver)
class SomeReceiver:
"""
A stub AMP box receiver which just keeps track of whether it has been
started or stopped and what boxes have been delivered to it.
@ivar sender: C{None} until C{startReceivingBoxes} is called, then a
reference to the L{IBoxSender} passed to that method.
@ivar reason: C{None} until {stopReceivingBoxes} is called, then a
reference to the L{Failure} passed to that method.
@ivar started: C{False} until C{startReceivingBoxes} is called, then
C{True}.
@ivar stopped: C{False} until C{stopReceivingBoxes} is called, then
C{True}.
"""
sender = None
reason = None
started = False
stopped = False
def __init__(self):
self.boxes = []
def startReceivingBoxes(self, sender):
self.started = True
self.sender = sender
def ampBoxReceived(self, box):
if self.started and not self.stopped:
self.boxes.append(box)
def stopReceivingBoxes(self, reason):
self.stopped = True
self.reason = reason
@implementer(IBoxSender)
class CollectingSender:
"""
An L{IBoxSender} which collects and saves boxes and errors sent to it.
"""
def __init__(self):
self.boxes = []
self.errors = []
def sendBox(self, box):
"""
Reject boxes with non-string keys or values; save all the rest in
C{self.boxes}.
"""
serial_types = (six.text_type, six.binary_type)
for k, v in six.viewitems(box):
if not (isinstance(k, serial_types) and isinstance(v, serial_types)):
raise TypeError("Cannot send boxes containing non-strings")
self.boxes.append(box)
def unhandledError(self, failure):
self.errors.append(failure.getErrorMessage())
class RouteTests(TestCase):
"""
Tests for L{Route}, the L{IBoxSender} which handles adding routing
information to outgoing boxes.
"""
def setUp(self):
"""
Create a route attached to a stub sender.
"""
self.receiver = SomeReceiver()
self.sender = CollectingSender()
self.localName = u"foo"
self.remoteName = u"bar"
self.router = Router()
self.router.startReceivingBoxes(self.sender)
self.route = self.router.bindRoute(self.receiver, self.localName)
def test_interfaces(self):
"""
L{Route} instances provide L{IBoxSender}.
"""
self.assertTrue(verifyObject(IBoxSender, self.route))
def test_start(self):
"""
L{Route.start} starts its L{IBoxReceiver}.
"""
self.assertFalse(self.receiver.started)
self.route.start()
self.assertTrue(self.receiver.started)
self.assertIdentical(self.receiver.sender, self.route)
def test_stop(self):
"""
L{Route.stop} stops its L{IBoxReceiver}.
"""
self.route.start()
self.assertFalse(self.receiver.stopped)
self.route.stop(Failure(RuntimeError("foo")))
self.assertTrue(self.receiver.stopped)
self.receiver.reason.trap(RuntimeError)
def test_sendBox(self):
"""
L{Route.sendBox} adds the route name to the box before passing it on to
the underlying sender.
"""
self.route.connectTo(self.remoteName)
self.route.sendBox({"foo": "bar"})
self.assertEqual(
self.sender.boxes, [{_ROUTE: self.remoteName, "foo": "bar"}])
def test_sendUnroutedBox(self):
"""
If C{Route.connectTo} is called with C{None}, no route name is added to
the outgoing box.
"""
self.route.connectTo(None)
self.route.sendBox({"foo": "bar"})
self.assertEqual(
self.sender.boxes, [{"foo": "bar"}])
def test_sendBoxWithoutConnection(self):
"""
L{Route.sendBox} raises L{RouteNotConnected} if called before the
L{Route} is connected to a remote route name.
"""
self.assertRaises(
RouteNotConnected, self.route.sendBox, {'foo': 'bar'})
def test_unbind(self):
"""
L{Route.unbind} removes the route from its router.
"""
self.route.unbind()
self.assertRaises(
KeyError, self.router.ampBoxReceived, {_ROUTE: self.localName})
class RouterTests(TestCase):
"""
Tests for L{Router}, the L{IBoxReceiver} which directs routed AMP boxes to
the right object.
"""
def setUp(self):
"""
Create sender, router, receiver, and route objects.
"""
self.sender = CollectingSender()
self.router = Router()
self.router.startReceivingBoxes(self.sender)
self.receiver = SomeReceiver()
self.route = self.router.bindRoute(self.receiver)
self.route.connectTo(u"foo")
def test_interfaces(self):
"""
L{Router} instances provide L{IBoxReceiver}.
"""
self.assertTrue(verifyObject(IBoxReceiver, self.router))
def test_uniqueRoutes(self):
"""
L{Router.createRouteIdentifier} returns a new, different route
identifier on each call.
"""
identifiers = [self.router.createRouteIdentifier() for x in range(10)]
self.assertEqual(len(set(identifiers)), len(identifiers))
def test_bind(self):
"""
L{Router.bind} returns a new L{Route} instance which will send boxes to
the L{Route}'s L{IBoxSender} after adding a C{_ROUTE} key to them.
"""
self.route.sendBox({'foo': 'bar'})
self.assertEqual(
self.sender.boxes,
[{_ROUTE: self.route.remoteRouteName, 'foo': 'bar'}])
self.route.unhandledError(Failure(Exception("some test exception")))
self.assertEqual(
self.sender.errors, ["some test exception"])
def test_bindBeforeStart(self):
"""
If a L{Route} is created with L{Router.bind} before the L{Router} is
started with L{Router.startReceivingBoxes}, the L{Route} is created
unstarted and only started when the L{Router} is started.
"""
router = Router()
receiver = SomeReceiver()
route = router.bindRoute(receiver)
route.connectTo(u'quux')
self.assertFalse(receiver.started)
sender = CollectingSender()
router.startReceivingBoxes(sender)
self.assertTrue(receiver.started)
route.sendBox({'foo': 'bar'})
self.assertEqual(
sender.boxes, [{_ROUTE: route.remoteRouteName, 'foo': 'bar'}])
router.ampBoxReceived({_ROUTE: route.localRouteName, 'baz': 'quux'})
self.assertEqual(receiver.boxes, [{'baz': 'quux'}])
def test_bindBeforeStartFinishAfterStart(self):
"""
If a L{Route} is created with L{Router.connect} before the L{Router} is
started with L{Router.startReceivingBoxes} but the Deferred returned by
the connect thunk does not fire until after the router is started, the
L{IBoxReceiver} associated with the route is not started until that
Deferred fires and the route is associated with a remote route name.
"""
router = Router()
receiver = SomeReceiver()
route = router.bindRoute(receiver)
sender = CollectingSender()
router.startReceivingBoxes(sender)
self.assertFalse(receiver.started)
route.connectTo(u"remoteName")
self.assertTrue(receiver.started)
receiver.sender.sendBox({'foo': 'bar'})
self.assertEqual(sender.boxes, [{_ROUTE: 'remoteName', 'foo': 'bar'}])
def test_ampBoxReceived(self):
"""
L{Router.ampBoxReceived} passes on AMP boxes to the L{IBoxReceiver}
identified by the route key in the box.
"""
firstReceiver = SomeReceiver()
firstRoute = self.router.bindRoute(firstReceiver)
firstRoute.start()
secondReceiver = SomeReceiver()
secondRoute = self.router.bindRoute(secondReceiver)
secondRoute.start()
self.router.ampBoxReceived(
{_ROUTE: firstRoute.localRouteName, 'foo': 'bar'})
self.router.ampBoxReceived(
{_ROUTE: secondRoute.localRouteName, 'baz': 'quux'})
self.assertEqual(firstReceiver.boxes, [{'foo': 'bar'}])
self.assertEqual(secondReceiver.boxes, [{'baz': 'quux'}])
def test_ampBoxReceivedDefaultRoute(self):
"""
L{Router.ampBoxReceived} delivers boxes with no route to the default
box receiver.
"""
sender = CollectingSender()
receiver = SomeReceiver()
router = Router()
router.startReceivingBoxes(sender)
router.bindRoute(receiver, None).start()
router.ampBoxReceived({'foo': 'bar'})
self.assertEqual(receiver.boxes, [{'foo': 'bar'}])
def test_stopReceivingBoxes(self):
"""
L{Router.stopReceivingBoxes} calls the C{stop} method of each connected
route.
"""
sender = CollectingSender()
router = Router()
router.startReceivingBoxes(sender)
receiver = SomeReceiver()
router.bindRoute(receiver)
class DummyException(Exception):
pass
self.assertFalse(receiver.stopped)
router.stopReceivingBoxes(Failure(DummyException()))
self.assertTrue(receiver.stopped)
receiver.reason.trap(DummyException) | 0.60054 | 0.260142 |
import re
import unittest
from perfkitbenchmarker import flags_validators
from perfkitbenchmarker import sample
from perfkitbenchmarker import timing_util
class ValidateMeasurementsFlagTestCase(unittest.TestCase):
"""Tests exercising ValidateMeasurementsFlag."""
def testInvalidValue(self):
"""Passing an unrecognized value is not allowed."""
exp_str = 'test: Invalid value for --timing_measurements'
exp_regex = r'^%s$' % re.escape(exp_str)
with self.assertRaisesRegexp(flags_validators.Error, exp_regex):
timing_util.ValidateMeasurementsFlag(['test'])
def testNoneWithAnother(self):
"""Passing none with another value is not allowed."""
exp_str = 'none: Cannot combine with other --timing_measurements options'
exp_regex = r'^%s$' % re.escape(exp_str)
with self.assertRaisesRegexp(flags_validators.Error, exp_regex):
timing_util.ValidateMeasurementsFlag(['none', 'runtimes'])
def testValid(self):
"""Test various valid combinations."""
validate = timing_util.ValidateMeasurementsFlag
self.assertIs(validate([]), True)
self.assertIs(validate(['none']), True)
self.assertIs(validate(['end_to_end_runtime']), True)
self.assertIs(validate(['runtimes']), True)
self.assertIs(validate(['timestamps']), True)
self.assertIs(validate(['end_to_end_runtime', 'runtimes']), True)
self.assertIs(validate(['end_to_end_runtime', 'timestamps']), True)
self.assertIs(validate(['runtimes', 'timestamps']), True)
self.assertIs(
validate(['end_to_end_runtime', 'runtimes', 'timestamps']), True)
class IntervalTimerTestCase(unittest.TestCase):
"""Tests exercising IntervalTimer."""
def testMeasureSequential(self):
"""Verify correct interval tuple generation in sequential measurements."""
timer = timing_util.IntervalTimer()
self.assertEqual(timer.intervals, [])
with timer.Measure('First Interval'):
pass
with timer.Measure('Second Interval'):
pass
self.assertEqual(len(timer.intervals), 2)
first_interval = timer.intervals[0]
self.assertEqual(len(first_interval), 3)
first_name = first_interval[0]
first_start = first_interval[1]
first_stop = first_interval[2]
self.assertEqual(first_name, 'First Interval')
second_interval = timer.intervals[1]
self.assertEqual(len(second_interval), 3)
second_name = second_interval[0]
second_start = second_interval[1]
second_stop = second_interval[2]
self.assertEqual(second_name, 'Second Interval')
self.assertLessEqual(first_start, first_stop)
self.assertLessEqual(first_stop, second_start)
self.assertLessEqual(second_start, second_stop)
def testMeasureNested(self):
"""Verify correct interval tuple generation in nested measurements."""
timer = timing_util.IntervalTimer()
self.assertEqual(timer.intervals, [])
with timer.Measure('Outer Interval'):
with timer.Measure('Inner Interval'):
pass
self.assertEqual(len(timer.intervals), 2)
inner_interval = timer.intervals[0]
self.assertEqual(len(inner_interval), 3)
inner_name = inner_interval[0]
inner_start = inner_interval[1]
inner_stop = inner_interval[2]
self.assertEqual(inner_name, 'Inner Interval')
outer_interval = timer.intervals[1]
self.assertEqual(len(outer_interval), 3)
outer_name = outer_interval[0]
outer_start = outer_interval[1]
outer_stop = outer_interval[2]
self.assertEqual(outer_name, 'Outer Interval')
self.assertLessEqual(outer_start, inner_start)
self.assertLessEqual(inner_start, inner_stop)
self.assertLessEqual(inner_stop, outer_stop)
def testGenerateSamplesMeasureNotCalled(self):
"""GenerateSamples should return an empty list if Measure was not called."""
timer = timing_util.IntervalTimer()
self.assertEqual(timer.intervals, [])
samples = timer.GenerateSamples(
include_runtime=True, include_timestamps=True)
self.assertEqual(timer.intervals, [])
self.assertEqual(samples, [])
def testGenerateSamplesNoRuntimeNoTimestamps(self):
"""No samples when include_runtime and include_timestamps are False."""
timer = timing_util.IntervalTimer()
with timer.Measure('First Interval'):
pass
with timer.Measure('Second Interval'):
pass
samples = timer.GenerateSamples(
include_runtime=False, include_timestamps=False)
self.assertEqual(samples, [])
def testGenerateSamplesRuntimeNoTimestamps(self):
"""Test generating runtime sample but no timestamp samples."""
timer = timing_util.IntervalTimer()
with timer.Measure('First'):
pass
with timer.Measure('Second'):
pass
start0 = timer.intervals[0][1]
stop0 = timer.intervals[0][2]
start1 = timer.intervals[1][1]
stop1 = timer.intervals[1][2]
samples = timer.GenerateSamples(
include_runtime=True, include_timestamps=False)
exp_samples = [
sample.Sample('First Runtime', stop0 - start0, 'seconds'),
sample.Sample('Second Runtime', stop1 - start1, 'seconds')]
self.assertEqual(samples, exp_samples)
def testGenerateSamplesTimestampsNoRuntime(self):
"""Test generating timestamp samples but no runtime sample."""
timer = timing_util.IntervalTimer()
with timer.Measure('First'):
pass
with timer.Measure('Second'):
pass
start0 = timer.intervals[0][1]
stop0 = timer.intervals[0][2]
start1 = timer.intervals[1][1]
stop1 = timer.intervals[1][2]
samples = timer.GenerateSamples(
include_runtime=False, include_timestamps=True)
exp_samples = [
sample.Sample('First Start Timestamp', start0, 'seconds'),
sample.Sample('First Stop Timestamp', stop0, 'seconds'),
sample.Sample('Second Start Timestamp', start1, 'seconds'),
sample.Sample('Second Stop Timestamp', stop1, 'seconds')]
self.assertEqual(samples, exp_samples)
def testGenerateSamplesRuntimeAndTimestamps(self):
"""Test generating both runtime and timestamp samples."""
timer = timing_util.IntervalTimer()
with timer.Measure('First'):
pass
with timer.Measure('Second'):
pass
start0 = timer.intervals[0][1]
stop0 = timer.intervals[0][2]
start1 = timer.intervals[1][1]
stop1 = timer.intervals[1][2]
samples = timer.GenerateSamples(
include_runtime=True, include_timestamps=True)
exp_samples = [
sample.Sample('First Runtime', stop0 - start0, 'seconds'),
sample.Sample('First Start Timestamp', start0, 'seconds'),
sample.Sample('First Stop Timestamp', stop0, 'seconds'),
sample.Sample('Second Runtime', stop1 - start1, 'seconds'),
sample.Sample('Second Start Timestamp', start1, 'seconds'),
sample.Sample('Second Stop Timestamp', stop1, 'seconds')]
self.assertEqual(samples, exp_samples)
if __name__ == '__main__':
unittest.main() | tests/timing_util_test.py |
import re
import unittest
from perfkitbenchmarker import flags_validators
from perfkitbenchmarker import sample
from perfkitbenchmarker import timing_util
class ValidateMeasurementsFlagTestCase(unittest.TestCase):
"""Tests exercising ValidateMeasurementsFlag."""
def testInvalidValue(self):
"""Passing an unrecognized value is not allowed."""
exp_str = 'test: Invalid value for --timing_measurements'
exp_regex = r'^%s$' % re.escape(exp_str)
with self.assertRaisesRegexp(flags_validators.Error, exp_regex):
timing_util.ValidateMeasurementsFlag(['test'])
def testNoneWithAnother(self):
"""Passing none with another value is not allowed."""
exp_str = 'none: Cannot combine with other --timing_measurements options'
exp_regex = r'^%s$' % re.escape(exp_str)
with self.assertRaisesRegexp(flags_validators.Error, exp_regex):
timing_util.ValidateMeasurementsFlag(['none', 'runtimes'])
def testValid(self):
"""Test various valid combinations."""
validate = timing_util.ValidateMeasurementsFlag
self.assertIs(validate([]), True)
self.assertIs(validate(['none']), True)
self.assertIs(validate(['end_to_end_runtime']), True)
self.assertIs(validate(['runtimes']), True)
self.assertIs(validate(['timestamps']), True)
self.assertIs(validate(['end_to_end_runtime', 'runtimes']), True)
self.assertIs(validate(['end_to_end_runtime', 'timestamps']), True)
self.assertIs(validate(['runtimes', 'timestamps']), True)
self.assertIs(
validate(['end_to_end_runtime', 'runtimes', 'timestamps']), True)
class IntervalTimerTestCase(unittest.TestCase):
"""Tests exercising IntervalTimer."""
def testMeasureSequential(self):
"""Verify correct interval tuple generation in sequential measurements."""
timer = timing_util.IntervalTimer()
self.assertEqual(timer.intervals, [])
with timer.Measure('First Interval'):
pass
with timer.Measure('Second Interval'):
pass
self.assertEqual(len(timer.intervals), 2)
first_interval = timer.intervals[0]
self.assertEqual(len(first_interval), 3)
first_name = first_interval[0]
first_start = first_interval[1]
first_stop = first_interval[2]
self.assertEqual(first_name, 'First Interval')
second_interval = timer.intervals[1]
self.assertEqual(len(second_interval), 3)
second_name = second_interval[0]
second_start = second_interval[1]
second_stop = second_interval[2]
self.assertEqual(second_name, 'Second Interval')
self.assertLessEqual(first_start, first_stop)
self.assertLessEqual(first_stop, second_start)
self.assertLessEqual(second_start, second_stop)
def testMeasureNested(self):
"""Verify correct interval tuple generation in nested measurements."""
timer = timing_util.IntervalTimer()
self.assertEqual(timer.intervals, [])
with timer.Measure('Outer Interval'):
with timer.Measure('Inner Interval'):
pass
self.assertEqual(len(timer.intervals), 2)
inner_interval = timer.intervals[0]
self.assertEqual(len(inner_interval), 3)
inner_name = inner_interval[0]
inner_start = inner_interval[1]
inner_stop = inner_interval[2]
self.assertEqual(inner_name, 'Inner Interval')
outer_interval = timer.intervals[1]
self.assertEqual(len(outer_interval), 3)
outer_name = outer_interval[0]
outer_start = outer_interval[1]
outer_stop = outer_interval[2]
self.assertEqual(outer_name, 'Outer Interval')
self.assertLessEqual(outer_start, inner_start)
self.assertLessEqual(inner_start, inner_stop)
self.assertLessEqual(inner_stop, outer_stop)
def testGenerateSamplesMeasureNotCalled(self):
"""GenerateSamples should return an empty list if Measure was not called."""
timer = timing_util.IntervalTimer()
self.assertEqual(timer.intervals, [])
samples = timer.GenerateSamples(
include_runtime=True, include_timestamps=True)
self.assertEqual(timer.intervals, [])
self.assertEqual(samples, [])
def testGenerateSamplesNoRuntimeNoTimestamps(self):
"""No samples when include_runtime and include_timestamps are False."""
timer = timing_util.IntervalTimer()
with timer.Measure('First Interval'):
pass
with timer.Measure('Second Interval'):
pass
samples = timer.GenerateSamples(
include_runtime=False, include_timestamps=False)
self.assertEqual(samples, [])
def testGenerateSamplesRuntimeNoTimestamps(self):
"""Test generating runtime sample but no timestamp samples."""
timer = timing_util.IntervalTimer()
with timer.Measure('First'):
pass
with timer.Measure('Second'):
pass
start0 = timer.intervals[0][1]
stop0 = timer.intervals[0][2]
start1 = timer.intervals[1][1]
stop1 = timer.intervals[1][2]
samples = timer.GenerateSamples(
include_runtime=True, include_timestamps=False)
exp_samples = [
sample.Sample('First Runtime', stop0 - start0, 'seconds'),
sample.Sample('Second Runtime', stop1 - start1, 'seconds')]
self.assertEqual(samples, exp_samples)
def testGenerateSamplesTimestampsNoRuntime(self):
"""Test generating timestamp samples but no runtime sample."""
timer = timing_util.IntervalTimer()
with timer.Measure('First'):
pass
with timer.Measure('Second'):
pass
start0 = timer.intervals[0][1]
stop0 = timer.intervals[0][2]
start1 = timer.intervals[1][1]
stop1 = timer.intervals[1][2]
samples = timer.GenerateSamples(
include_runtime=False, include_timestamps=True)
exp_samples = [
sample.Sample('First Start Timestamp', start0, 'seconds'),
sample.Sample('First Stop Timestamp', stop0, 'seconds'),
sample.Sample('Second Start Timestamp', start1, 'seconds'),
sample.Sample('Second Stop Timestamp', stop1, 'seconds')]
self.assertEqual(samples, exp_samples)
def testGenerateSamplesRuntimeAndTimestamps(self):
"""Test generating both runtime and timestamp samples."""
timer = timing_util.IntervalTimer()
with timer.Measure('First'):
pass
with timer.Measure('Second'):
pass
start0 = timer.intervals[0][1]
stop0 = timer.intervals[0][2]
start1 = timer.intervals[1][1]
stop1 = timer.intervals[1][2]
samples = timer.GenerateSamples(
include_runtime=True, include_timestamps=True)
exp_samples = [
sample.Sample('First Runtime', stop0 - start0, 'seconds'),
sample.Sample('First Start Timestamp', start0, 'seconds'),
sample.Sample('First Stop Timestamp', stop0, 'seconds'),
sample.Sample('Second Runtime', stop1 - start1, 'seconds'),
sample.Sample('Second Start Timestamp', start1, 'seconds'),
sample.Sample('Second Stop Timestamp', stop1, 'seconds')]
self.assertEqual(samples, exp_samples)
if __name__ == '__main__':
unittest.main() | 0.787073 | 0.691406 |
import os
import shutil
import unittest
import luigi
import logging
import yaml
from itertools import izip
import ratatosk.lib.align.bwa as BWA
import ratatosk.lib.tools.samtools as SAM
import ratatosk.lib.files.fastq as FASTQ
import ratatosk.lib.tools.picard as PICARD
import ratatosk.lib.tools.gatk as GATK
import ratatosk.lib.utils.cutadapt as CUTADAPT
import ratatosk.lib.tools.fastqc as FASTQC
import ratatosk.lib.files.external
from ratatosk.config import get_config
from ratatosk.utils import make_fastq_links, rreplace, determine_read_type
logging.basicConfig(level=logging.DEBUG)
sample = "P001_101_index3_TGACCA_L001"
bam = os.path.join(sample + ".bam")
localconf = "mock.yaml"
ratatosk_conf = os.path.join(os.path.dirname(__file__), os.pardir, "config", "ratatosk.yaml")
def setUpModule():
global cnf
cnf = get_config()
with open(localconf, "w") as fp:
fp.write(yaml.safe_dump({
'picard' : {
'InsertMetrics' :
{'parent_task' : 'ratatosk.lib.tools.picard.DuplicationMetrics'},
},
'gatk' :
{
'IndelRealigner' :
{'parent_task': ['ratatosk.lib.tools.picard.MergeSamFiles',
'ratatosk.lib.tools.gatk.RealignerTargetCreator',
'ratatosk.lib.tools.gatk.UnifiedGenotyper'],
'source_label': [None, None, 'BOTH.raw'],
'source_suffix' : ['.bam', '.intervals', '.vcf'],
},
'RealignerTargetCreator' :
{'parent_task' : 'ratatosk.lib.align.bwa.BwaAln'},
}
},
default_flow_style=False))
# Need to add ratatosk first, then override with localconf
cnf.add_config_path(ratatosk_conf)
cnf.add_config_path(localconf)
def tearDownModule():
if os.path.exists(localconf):
os.unlink(localconf)
cnf.clear()
class TestGeneralFunctions(unittest.TestCase):
def test_make_source_file_name_from_string(self):
"""Test generating source file names from strings only. Obsolete."""
def _make_source_file_name(target, label, src_suffix, tgt_suffix, src_label=None):
# If tgt_suffix is list, target suffix should always
# correspond to tgt_suffix[0]
source = target
if isinstance(tgt_suffix, tuple) or isinstance(tgt_suffix, list):
tgt_suffix = tgt_suffix[0]
if tgt_suffix and not src_suffix is None:
if src_label:
# Trick: remove src_label first if present since
# the source label addition here corresponds to a
# "diff" compared to target name
source = rreplace(rreplace(source, tgt_suffix, "", 1), src_label, "", 1) + src_label + src_suffix
else:
source = rreplace(source, tgt_suffix, src_suffix, 1)
if label:
if source.count(label) > 1:
print "label '{}' found multiple times in target '{}'; this could be intentional".format(label, source)
elif source.count(label) == 0:
print "label '{}' not found in target '{}'; are you sure your target is correctly formatted?".format(label, source)
source = rreplace(source, label, "", 1)
return source
# Test IndelRealigner source name generation. IndelRealigner
# takes as input at least a bam file and realign intervals,
# and optionally vcf sources (and more...)
target = ".merge.realign.bam"
source_suffix = (".bam", ".intervals", ".vcf")
source_label = (None, None, ".BOTH.raw")
label = ".realign"
out_fn = []
for src_sfx, src_lab in izip(source_suffix, source_label):
out_fn.append(_make_source_file_name(target, label, src_sfx, ".bam", src_lab))
self.assertEqual(out_fn, [".merge.bam", ".merge.intervals", ".merge.BOTH.raw.vcf"])
source_label = (".merge", ".merge", ".BOTH.raw")
out_fn = []
for src_sfx, src_lab in izip(source_suffix, source_label):
out_fn.append(_make_source_file_name(target, label, src_sfx, ".bam", src_lab))
self.assertEqual(out_fn, [".merge.bam", ".merge.intervals", ".merge.BOTH.raw.vcf"])
# Test ReadBackedPhasing where the variant suffix can differ
# much from the original bam file
target = ".merge-variants-combined-phased.vcf"
source_suffix = (".bam", ".vcf")
source_label = (None, None)
label = "-phased"
out_fn = []
for src_sfx, src_lab in izip(source_suffix, source_label):
out_fn.append(_make_source_file_name(target, label, src_sfx, ".bam", src_lab))
out_fn = []
source_label = ("-variants-combined", None)
for src_sfx, src_lab in izip(source_suffix, source_label):
out_fn.append(_make_source_file_name(target, label, src_sfx, ".bam", src_lab))
def test_make_source_file_name_from_class(self):
"""Test generating source file names from classes, utilizing
the fact that the classes themselves contain the information
we request (label and source_suffix). Problem is they are not
instantiated.
"""
def _make_source_file_name(target_cls, source_cls, diff_label=None):
src_label = source_cls().label
tgt_suffix = target_cls.suffix
src_suffix = source_cls().suffix
if isinstance(tgt_suffix, tuple) or isinstance(tgt_suffix, list):
if len(tgt_suffix) > 0:
tgt_suffix = tgt_suffix[0]
if isinstance(src_suffix, tuple) or isinstance(src_suffix, list):
if len(src_suffix) > 0:
src_suffix = src_suffix[0]
# Start by stripping tgt_suffix
if tgt_suffix:
source = rreplace(target_cls.target, tgt_suffix, "", 1)
else:
source = target_cls.target
# Then remove the target label and diff_label
source = rreplace(source, target_cls.label, "", 1)
if diff_label:
source = rreplace(source, str(diff_label), "", 1)
if src_label:
# Trick: remove src_label first if present since
# the source label addition here corresponds to a
# "diff" compared to target name
source = rreplace(source, str(src_label), "", 1) + str(src_label) + str(src_suffix)
else:
source = source + str(src_suffix)
if src_label:
if source.count(str(src_label)) > 1:
print "label '{}' found multiple times in target '{}'; this could be intentional".format(src_label, source)
elif source.count(src_label) == 0:
print "label '{}' not found in target '{}'; are you sure your target is correctly formatted?".format(src_label, source)
return source
# Test IndelRealigner source name generation. IndelRealigner
# takes as input at least a bam file and realign intervals,
# and optionally vcf sources (and more...)
target = ".merge.realign.bam"
s = ratatosk.lib.tools.gatk.IndelRealigner(target=target,
parent_task=['ratatosk.lib.tools.picard.MergeSamFiles',
'ratatosk.lib.tools.gatk.RealignerTargetCreator',
'ratatosk.lib.tools.gatk.UnifiedGenotyper',])
out_fn = []
for p in s.parent():
out_fn.append(_make_source_file_name(s, p))
self.assertEqual(out_fn, [".merge.bam", ".merge.intervals", ".merge.vcf"])
# Test ReadBackedPhasing where the variant suffix can differ
# much from the original bam file
target = ".merge-variants-combined-phased.vcf"
out_fn = []
s = ratatosk.lib.tools.gatk.ReadBackedPhasing(target=target)
for p, dl in izip(s.parent(), s.diff_label):
out_fn.append(_make_source_file_name(s, p, dl))
self.assertEqual(out_fn, ['.merge.bam', '.merge-variants-combined.vcf'])
# Test picard metrics with two output files
target = ".merge.dup.insert_metrics"
s = ratatosk.lib.tools.picard.InsertMetrics(target=target)
self.assertEqual(_make_source_file_name(s, s.parent().pop()), ".merge.dup.bam")
def test_jobtask_source(self):
task = ratatosk.lib.tools.picard.InsertMetrics(target="data/sample.merge.dup.insert_metrics")
self.assertEqual(task.source(), ["data/sample.merge.dup.bam"])
task = ratatosk.lib.tools.gatk.IndelRealigner(target="data/sample.merge.dup.realign.bam",
parent_task=['ratatosk.lib.tools.picard.MergeSamFiles',
'ratatosk.lib.tools.gatk.RealignerTargetCreator',
'ratatosk.lib.tools.gatk.UnifiedGenotyper',])
self.assertEqual(task.source(), ['data/sample.dup.merge.bam', 'data/sample.merge.dup.intervals', 'data/sample.merge.dup.vcf'])
class TestUtilsFunctions(unittest.TestCase):
def test_determine_read_type(self):
fn = "sample_index1_1.fastq.gz"
rtype = determine_read_type(fn, "_1", "_2")
self.assertEqual(rtype, 1)
fn = "sample_index1_2.fastq.gz"
rtype = determine_read_type(fn, "_1", "_2")
self.assertEqual(rtype, 2)
fn = "4_120924_AC003CCCXX_P001_101_index1_1.fastq.gz"
rtype = determine_read_type(fn, "_1", "_2")
self.assertEqual(rtype, 1)
fn = "4_120924_AC003CCCXX_P001_101_index1_2.fastq.gz"
rtype = determine_read_type(fn, "_1", "_2")
self.assertEqual(rtype, 2)
fn = "P001_101_index3_TGACCA_L001_R1_001.fastq.gz"
rtype = determine_read_type(fn, "_R1_001", "_R2_001")
self.assertEqual(rtype, 1)
fn = "P001_101_index3_TGACCA_L001_R2_001.fastq.gz"
rtype = determine_read_type(fn, "_R1_001", "_R2_001")
self.assertEqual(rtype, 2) | test/test_functions.py | import os
import shutil
import unittest
import luigi
import logging
import yaml
from itertools import izip
import ratatosk.lib.align.bwa as BWA
import ratatosk.lib.tools.samtools as SAM
import ratatosk.lib.files.fastq as FASTQ
import ratatosk.lib.tools.picard as PICARD
import ratatosk.lib.tools.gatk as GATK
import ratatosk.lib.utils.cutadapt as CUTADAPT
import ratatosk.lib.tools.fastqc as FASTQC
import ratatosk.lib.files.external
from ratatosk.config import get_config
from ratatosk.utils import make_fastq_links, rreplace, determine_read_type
logging.basicConfig(level=logging.DEBUG)
sample = "P001_101_index3_TGACCA_L001"
bam = os.path.join(sample + ".bam")
localconf = "mock.yaml"
ratatosk_conf = os.path.join(os.path.dirname(__file__), os.pardir, "config", "ratatosk.yaml")
def setUpModule():
global cnf
cnf = get_config()
with open(localconf, "w") as fp:
fp.write(yaml.safe_dump({
'picard' : {
'InsertMetrics' :
{'parent_task' : 'ratatosk.lib.tools.picard.DuplicationMetrics'},
},
'gatk' :
{
'IndelRealigner' :
{'parent_task': ['ratatosk.lib.tools.picard.MergeSamFiles',
'ratatosk.lib.tools.gatk.RealignerTargetCreator',
'ratatosk.lib.tools.gatk.UnifiedGenotyper'],
'source_label': [None, None, 'BOTH.raw'],
'source_suffix' : ['.bam', '.intervals', '.vcf'],
},
'RealignerTargetCreator' :
{'parent_task' : 'ratatosk.lib.align.bwa.BwaAln'},
}
},
default_flow_style=False))
# Need to add ratatosk first, then override with localconf
cnf.add_config_path(ratatosk_conf)
cnf.add_config_path(localconf)
def tearDownModule():
if os.path.exists(localconf):
os.unlink(localconf)
cnf.clear()
class TestGeneralFunctions(unittest.TestCase):
def test_make_source_file_name_from_string(self):
"""Test generating source file names from strings only. Obsolete."""
def _make_source_file_name(target, label, src_suffix, tgt_suffix, src_label=None):
# If tgt_suffix is list, target suffix should always
# correspond to tgt_suffix[0]
source = target
if isinstance(tgt_suffix, tuple) or isinstance(tgt_suffix, list):
tgt_suffix = tgt_suffix[0]
if tgt_suffix and not src_suffix is None:
if src_label:
# Trick: remove src_label first if present since
# the source label addition here corresponds to a
# "diff" compared to target name
source = rreplace(rreplace(source, tgt_suffix, "", 1), src_label, "", 1) + src_label + src_suffix
else:
source = rreplace(source, tgt_suffix, src_suffix, 1)
if label:
if source.count(label) > 1:
print "label '{}' found multiple times in target '{}'; this could be intentional".format(label, source)
elif source.count(label) == 0:
print "label '{}' not found in target '{}'; are you sure your target is correctly formatted?".format(label, source)
source = rreplace(source, label, "", 1)
return source
# Test IndelRealigner source name generation. IndelRealigner
# takes as input at least a bam file and realign intervals,
# and optionally vcf sources (and more...)
target = ".merge.realign.bam"
source_suffix = (".bam", ".intervals", ".vcf")
source_label = (None, None, ".BOTH.raw")
label = ".realign"
out_fn = []
for src_sfx, src_lab in izip(source_suffix, source_label):
out_fn.append(_make_source_file_name(target, label, src_sfx, ".bam", src_lab))
self.assertEqual(out_fn, [".merge.bam", ".merge.intervals", ".merge.BOTH.raw.vcf"])
source_label = (".merge", ".merge", ".BOTH.raw")
out_fn = []
for src_sfx, src_lab in izip(source_suffix, source_label):
out_fn.append(_make_source_file_name(target, label, src_sfx, ".bam", src_lab))
self.assertEqual(out_fn, [".merge.bam", ".merge.intervals", ".merge.BOTH.raw.vcf"])
# Test ReadBackedPhasing where the variant suffix can differ
# much from the original bam file
target = ".merge-variants-combined-phased.vcf"
source_suffix = (".bam", ".vcf")
source_label = (None, None)
label = "-phased"
out_fn = []
for src_sfx, src_lab in izip(source_suffix, source_label):
out_fn.append(_make_source_file_name(target, label, src_sfx, ".bam", src_lab))
out_fn = []
source_label = ("-variants-combined", None)
for src_sfx, src_lab in izip(source_suffix, source_label):
out_fn.append(_make_source_file_name(target, label, src_sfx, ".bam", src_lab))
def test_make_source_file_name_from_class(self):
"""Test generating source file names from classes, utilizing
the fact that the classes themselves contain the information
we request (label and source_suffix). Problem is they are not
instantiated.
"""
def _make_source_file_name(target_cls, source_cls, diff_label=None):
src_label = source_cls().label
tgt_suffix = target_cls.suffix
src_suffix = source_cls().suffix
if isinstance(tgt_suffix, tuple) or isinstance(tgt_suffix, list):
if len(tgt_suffix) > 0:
tgt_suffix = tgt_suffix[0]
if isinstance(src_suffix, tuple) or isinstance(src_suffix, list):
if len(src_suffix) > 0:
src_suffix = src_suffix[0]
# Start by stripping tgt_suffix
if tgt_suffix:
source = rreplace(target_cls.target, tgt_suffix, "", 1)
else:
source = target_cls.target
# Then remove the target label and diff_label
source = rreplace(source, target_cls.label, "", 1)
if diff_label:
source = rreplace(source, str(diff_label), "", 1)
if src_label:
# Trick: remove src_label first if present since
# the source label addition here corresponds to a
# "diff" compared to target name
source = rreplace(source, str(src_label), "", 1) + str(src_label) + str(src_suffix)
else:
source = source + str(src_suffix)
if src_label:
if source.count(str(src_label)) > 1:
print "label '{}' found multiple times in target '{}'; this could be intentional".format(src_label, source)
elif source.count(src_label) == 0:
print "label '{}' not found in target '{}'; are you sure your target is correctly formatted?".format(src_label, source)
return source
# Test IndelRealigner source name generation. IndelRealigner
# takes as input at least a bam file and realign intervals,
# and optionally vcf sources (and more...)
target = ".merge.realign.bam"
s = ratatosk.lib.tools.gatk.IndelRealigner(target=target,
parent_task=['ratatosk.lib.tools.picard.MergeSamFiles',
'ratatosk.lib.tools.gatk.RealignerTargetCreator',
'ratatosk.lib.tools.gatk.UnifiedGenotyper',])
out_fn = []
for p in s.parent():
out_fn.append(_make_source_file_name(s, p))
self.assertEqual(out_fn, [".merge.bam", ".merge.intervals", ".merge.vcf"])
# Test ReadBackedPhasing where the variant suffix can differ
# much from the original bam file
target = ".merge-variants-combined-phased.vcf"
out_fn = []
s = ratatosk.lib.tools.gatk.ReadBackedPhasing(target=target)
for p, dl in izip(s.parent(), s.diff_label):
out_fn.append(_make_source_file_name(s, p, dl))
self.assertEqual(out_fn, ['.merge.bam', '.merge-variants-combined.vcf'])
# Test picard metrics with two output files
target = ".merge.dup.insert_metrics"
s = ratatosk.lib.tools.picard.InsertMetrics(target=target)
self.assertEqual(_make_source_file_name(s, s.parent().pop()), ".merge.dup.bam")
def test_jobtask_source(self):
task = ratatosk.lib.tools.picard.InsertMetrics(target="data/sample.merge.dup.insert_metrics")
self.assertEqual(task.source(), ["data/sample.merge.dup.bam"])
task = ratatosk.lib.tools.gatk.IndelRealigner(target="data/sample.merge.dup.realign.bam",
parent_task=['ratatosk.lib.tools.picard.MergeSamFiles',
'ratatosk.lib.tools.gatk.RealignerTargetCreator',
'ratatosk.lib.tools.gatk.UnifiedGenotyper',])
self.assertEqual(task.source(), ['data/sample.dup.merge.bam', 'data/sample.merge.dup.intervals', 'data/sample.merge.dup.vcf'])
class TestUtilsFunctions(unittest.TestCase):
def test_determine_read_type(self):
fn = "sample_index1_1.fastq.gz"
rtype = determine_read_type(fn, "_1", "_2")
self.assertEqual(rtype, 1)
fn = "sample_index1_2.fastq.gz"
rtype = determine_read_type(fn, "_1", "_2")
self.assertEqual(rtype, 2)
fn = "4_120924_AC003CCCXX_P001_101_index1_1.fastq.gz"
rtype = determine_read_type(fn, "_1", "_2")
self.assertEqual(rtype, 1)
fn = "4_120924_AC003CCCXX_P001_101_index1_2.fastq.gz"
rtype = determine_read_type(fn, "_1", "_2")
self.assertEqual(rtype, 2)
fn = "P001_101_index3_TGACCA_L001_R1_001.fastq.gz"
rtype = determine_read_type(fn, "_R1_001", "_R2_001")
self.assertEqual(rtype, 1)
fn = "P001_101_index3_TGACCA_L001_R2_001.fastq.gz"
rtype = determine_read_type(fn, "_R1_001", "_R2_001")
self.assertEqual(rtype, 2) | 0.404507 | 0.146484 |