input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
that has shared network with no subdomain access
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
self.vmdata["name"] = self.acldata["vmD111A"]["name"] + "-shared-scope-domain-nosubdomainaccess-domain-admin"
self.vmdata["displayname"] = self.acldata["vmD111A"]["displayname"] + "-shared-scope-domain-nosubdomainaccess-domain-admin"
try:
vm = VirtualMachine.create(
self.apiclient,
self.vmdata,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.shared_network_domain_d11.id,
accountid=self.account_d111a.name,
domainid=self.account_d111a.domainid
)
self.fail("Domain admin is able to deploy a VM for sub domain user in a shared network with scope=Domain and no subdomain access")
except Exception as e:
self.debug("When a user from a subdomain deploys a VM in a shared network with scope=domain with no subdomain access %s" % e)
if not CloudstackAclException.verifyMsginException(e, CloudstackAclException.NOT_AVAILABLE_IN_DOMAIN):
self.fail(
"Error message validation failed when Domain admin tries to deploy a VM for sub domain user in a shared network with scope=Domain and no subdomain access")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_domainadmin_scope_domain_nosubdomainaccess_subdomainadminuser(self):
"""
Valiate that Domain admin is NOT able to deploy a VM for sub domain admin user in a shared network with scope=Domain and no subdomain access
"""
# Deploy VM as an admin user in a subdomain under a domain that has shared network with no subdomain access
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
self.vmdata["name"] = self.acldata["vmD111"]["name"] + "-shared-scope-domain-nosubdomainaccess-domain-admin"
self.vmdata["displayname"] = self.acldata["vmD111"]["displayname"] + "-shared-scope-domain-nosubdomainaccess-domain-admin"
try:
vm = VirtualMachine.create(
self.apiclient,
self.vmdata,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.shared_network_domain_d11.id,
accountid=self.account_d111.name,
domainid=self.account_d111.domainid
)
self.fail("Domain admin is able to deploy a VM for sub domain admin user in a shared network with scope=Domain and no subdomain access")
except Exception as e:
self.debug("When a admin user from a subdomain deploys a VM in a shared network with scope=domain with no subdomain access %s" % e)
if not CloudstackAclException.verifyMsginException(e, CloudstackAclException.NOT_AVAILABLE_IN_DOMAIN):
self.fail(
"Error message validation failed when Domain admin tries to deploy a VM for sub domain admin user in a shared network with scope=Domain and no subdomain access ")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_domainadmin_scope_domain_nosubdomainaccess_parentdomainuser(self):
"""
Valiate that Domain admin is NOT able to deploy a VM for parent domain user in a shared network with scope=Domain and no subdomain access
"""
# Deploy VM as user in parentdomain of a domain that has shared network with no subdomain access
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
self.vmdata["name"] = self.acldata["vmD1A"]["name"] + "-shared-scope-domain-nosubdomainaccess-domain-admin"
self.vmdata["displayname"] = self.acldata["vmD1A"]["displayname"] + "-shared-scope-domain-nosubdomainaccess-domain-admin"
try:
vm = VirtualMachine.create(
self.apiclient,
self.vmdata,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.shared_network_domain_d11.id,
accountid=self.account_d1a.name,
domainid=self.account_d1a.domainid
)
self.fail("Domain admin is able to deploy a VM for parent domain user in a shared network with scope=Domain and no subdomain access")
except Exception as e:
self.debug("When a user from parent domain deploys a VM in a shared network with scope=domain with no subdomain access %s" % e)
if not CloudstackAclException.verifyMsginException(e, CloudstackAclException.NOT_AVAILABLE_IN_DOMAIN):
self.fail(
"Error message validation failed when Domain admin tries to deploy a VM for parent domain user in a shared network with scope=Domain and no subdomain access ")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_domainadmin_scope_domain_nosubdomainaccess_parentdomainadminuser(self):
"""
Valiate that Domain admin is NOT able to deploy a VM for parent domain admin user in a shared network with scope=Domain and no subdomain access
"""
# Deploy VM as an admin user in parentdomain of a domain that has shared network with no subdomain access
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
self.vmdata["name"] = self.acldata["vmD1"]["name"] + "-shared-scope-domain-nosubdomainaccess-domain-admin"
self.vmdata["displayname"] = self.acldata["vmD1"]["displayname"] + "-shared-scope-domain-nosubdomainaccess-domain-admin"
try:
vm = VirtualMachine.create(
self.apiclient,
self.vmdata,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.shared_network_domain_d11.id,
accountid=self.account_d1.name,
domainid=self.account_d1.domainid
)
self.fail("Domain admin is able to deploy a VM for parent domain admin user in a shared network with scope=Domain and no subdomain access")
except Exception as e:
self.debug("When an admin user from parent domain deploys a VM in a shared network with scope=domain with no subdomain access %s" % e)
if not CloudstackAclException.verifyMsginException(e, CloudstackAclException.NOT_AVAILABLE_IN_DOMAIN):
self.fail(
"Error message validation failed when Domain admin tries to deploy a VM for parent domain admin user in a shared network with scope=Domain and no subdomain access ")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_domainadmin_scope_domain_nosubdomainaccess_ROOTuser(self):
"""
Valiate that Domain admin is NOT able to deploy a VM for user in ROOT domain in a shared network with scope=Domain and no subdomain access
"""
# Deploy VM as user in ROOT domain
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
self.vmdata["name"] = self.acldata["vmROOTA"]["name"] + "-shared-scope-domain-nosubdomainaccess-domain-admin"
self.vmdata["displayname"] = self.acldata["vmROOTA"]["displayname"] + "-shared-scope-domain-nosubdomainaccess-domain-admin"
try:
vm = VirtualMachine.create(
self.apiclient,
self.vmdata,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.shared_network_domain_d11.id,
accountid=self.account_roota.name,
domainid=self.account_roota.domainid
)
self.fail("Domain admin is able to deploy a VM for user in ROOT domain in a shared network with scope=Domain and no subdomain access")
except Exception as e:
self.debug("When a regular user from ROOT domain deploys a VM in a shared network with scope=domain with no subdomain access %s" % e)
if not CloudstackAclException.verifyMsginException(e, CloudstackAclException.NO_PERMISSION_TO_OPERATE_DOMAIN):
self.fail(
"Error message validation failed when Domain admin tries to deploy a VM for user in ROOT domain in a shared network with scope=Domain and no subdomain access")
## Test cases relating to deploying Virtual Machine as Domain admin for other users in shared network with scope=Domain and with subdomain access
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_domainadmin_scope_domain_withsubdomainaccess_domainuser(self):
"""
Valiate that Domain admin is able to deploy a VM for regular user in domain in a shared network with scope=Domain and subdomain access
"""
# Deploy VM as user in a domain that has shared network with subdomain access
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
self.vmdata["name"] = self.acldata["vmD11A"]["name"] + "-shared-scope-domain-withsubdomainaccess-domain-admin"
self.vmdata["displayname"] = self.acldata["vmD11A"]["displayname"] + "-shared-scope-domain-withsubdomainaccess-domain-admin"
vm = VirtualMachine.create(
self.apiclient,
self.vmdata,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.shared_network_domain_with_subdomain_d11.id,
accountid=self.account_d11a.name,
domainid=self.account_d11a.domainid
)
self.assertEqual(vm.state == "Running" and vm.account == self.account_d11a.name and vm.domainid == self.account_d11a.domainid,
True,
"Domain admin is not able to deploy a VM for regular user in domain in a shared network with scope=Domain and subdomain access")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_domainadmin_scope_domain_withsubdomainaccess_domainadminuser(self):
"""
Valiate that Domain admin is able to deploy a VM for admin user in domain in a shared network with scope=Domain and subdomain access
"""
# Deploy VM as an admin user in a domain that has shared network with subdomain access
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
self.vmdata["name"] = self.acldata["vmD11"]["name"] + "-shared-scope-domain-withsubdomainaccess-domain-admin"
self.vmdata["displayname"] = self.acldata["vmD11"]["displayname"] + "-shared-scope-domain-withsubdomainaccess-domain-admin"
vm = VirtualMachine.create(
self.apiclient,
self.vmdata,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.shared_network_domain_with_subdomain_d11.id,
accountid=self.account_d11.name,
domainid=self.account_d11.domainid
)
self.assertEqual(vm.state == "Running" and vm.account == self.account_d11.name and vm.domainid == self.account_d11.domainid,
True,
"Domain admin is not able to deploy a VM for admin user in domain in a shared network with scope=Domain and subdomain access")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_domainadmin_scope_domain_withsubdomainaccess_subdomainuser(self):
"""
Valiate that Domain admin is able to deploy a VM for regular user in subdomain in a shared network with scope=Domain and subdomain access
"""
# Deploy VM as user in a subdomain under a domain that has shared network with subdomain access
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
self.vmdata["name"] = self.acldata["vmD111A"]["name"] + "-shared-scope-domain-withsubdomainaccess-domain-admin"
self.vmdata["displayname"] = self.acldata["vmD111A"]["displayname"] + "-shared-scope-domain-withsubdomainaccess-domain-admin"
vm = VirtualMachine.create(
self.apiclient,
self.vmdata,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.shared_network_domain_with_subdomain_d11.id,
accountid=self.account_d111a.name,
domainid=self.account_d111a.domainid
)
self.assertEqual(vm.state == "Running" and vm.account == self.account_d111a.name and vm.domainid == self.account_d111a.domainid,
True,
"Domain admin is not able to deploy a VM for regular user in subdomain in a shared network with scope=Domain and subdomain access")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_domainadmin_scope_domain_withsubdomainaccess_subdomainadminuser(self):
"""
Valiate that Domain admin is able to deploy a VM for admin user in subdomain in a shared network with scope=Domain and subdomain access
"""
# Deploy VM as an admin user in a subdomain under a domain that has shared network with subdomain access
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
self.vmdata["name"] = self.acldata["vmD111"]["name"] + "-shared-scope-domain-withsubdomainaccess-domain-admin"
self.vmdata["displayname"] = self.acldata["vmD111"]["displayname"] + "-shared-scope-domain-withsubdomainaccess-domain-admin"
vm = VirtualMachine.create(
self.apiclient,
self.vmdata,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.shared_network_domain_with_subdomain_d11.id,
accountid=self.account_d111.name,
domainid=self.account_d111.domainid
)
self.assertEqual(vm.state == "Running" and vm.account == self.account_d111.name and vm.domainid == self.account_d111.domainid,
True,
"Domain admin is not able to deploy a VM for admin user in subdomain in a shared network with scope=Domain and subdomain access")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_domainadmin_scope_domain_withsubdomainaccess_parentdomainuser(self):
"""
Valiate that Domain admin is NOT able to deploy a VM for regular user in parent domain in a shared network with scope=Domain and subdomain access
"""
# Deploy VM as user in parentdomain of a domain that has shared network with subdomain access
self.apiclient.connection.apiKey = self.user_d1_apikey
| |
<reponame>sloebrich/sage
# -*- coding: utf-8 -*-
r"""
Interfaces to R
This is the reference to the Sagemath R interface, usable from any
Sage program.
The %r interface creating an R cell in the sage
notebook is decribed in the Notebook manual.
The %R and %%R interface creating an R line or an R cell in the
Jupyter notebook are briefly decribed at the end of this page. This
documentation will be expanded and placed in the Jupyter notebook
manual when this manual exists.
The following examples try to follow "An Introduction to R" which can
be found at http://cran.r-project.org/doc/manuals/R-intro.html .
EXAMPLES:
Simple manipulations; numbers and vectors
The simplest data structure in R is the numeric vector which
consists of an ordered collection of numbers. To create a
vector named $x$ using the R interface in Sage, you pass the
R interpreter object a list or tuple of numbers::
sage: x = r([10.4,5.6,3.1,6.4,21.7]); x
[1] 10.4 5.6 3.1 6.4 21.7
You can invert elements of a vector x in R by using the
invert operator or by doing 1/x::
sage: ~x
[1] 0.09615385 0.17857143 0.32258065 0.15625000 0.04608295
sage: 1/x
[1] 0.09615385 0.17857143 0.32258065 0.15625000 0.04608295
The following assignment creates a vector $y$ with 11 entries which
consists of two copies of $x$ with a 0 in between::
sage: y = r([x,0,x]); y
[1] 10.4 5.6 3.1 6.4 21.7 0.0 10.4 5.6 3.1 6.4 21.7
Vector Arithmetic
The following command generates a new vector $v$ of length 11 constructed
by adding together (element by element) $2x$ repeated 2.2 times, $y$
repeated just once, and 1 repeated 11 times::
sage: v = 2*x+y+1; v
[1] 32.2 17.8 10.3 20.2 66.1 21.8 22.6 12.8 16.9 50.8 43.5
One can compute the sum of the elements of an R vector in the following
two ways::
sage: sum(x)
[1] 47.2
sage: x.sum()
[1] 47.2
One can calculate the sample variance of a list of numbers::
sage: ((x-x.mean())^2/(x.length()-1)).sum()
[1] 53.853
sage: x.var()
[1] 53.853
sage: x.sort()
[1] 3.1 5.6 6.4 10.4 21.7
sage: x.min()
[1] 3.1
sage: x.max()
[1] 21.7
sage: x
[1] 10.4 5.6 3.1 6.4 21.7
sage: r(-17).sqrt()
[1] NaN
sage: r('-17+0i').sqrt()
[1] 0+4.123106i
Generating an arithmetic sequence::
sage: r('1:10')
[1] 1 2 3 4 5 6 7 8 9 10
Because ``from`` is a keyword in Python, it can't be used
as a keyword argument. Instead, ``from_`` can be passed, and
R will recognize it as the correct thing::
sage: r.seq(length=10, from_=-1, by=.2)
[1] -1.0 -0.8 -0.6 -0.4 -0.2 0.0 0.2 0.4 0.6 0.8
sage: x = r([10.4,5.6,3.1,6.4,21.7])
sage: x.rep(2)
[1] 10.4 5.6 3.1 6.4 21.7 10.4 5.6 3.1 6.4 21.7
sage: x.rep(times=2)
[1] 10.4 5.6 3.1 6.4 21.7 10.4 5.6 3.1 6.4 21.7
sage: x.rep(each=2)
[1] 10.4 10.4 5.6 5.6 3.1 3.1 6.4 6.4 21.7 21.7
Missing Values::
sage: na = r('NA')
sage: z = r([1,2,3,na])
sage: z
[1] 1 2 3 NA
sage: ind = r.is_na(z)
sage: ind
[1] FALSE FALSE FALSE TRUE
sage: zero = r(0)
sage: zero / zero
[1] NaN
sage: inf = r('Inf')
sage: inf-inf
[1] NaN
sage: r.is_na(inf)
[1] FALSE
sage: r.is_na(inf-inf)
[1] TRUE
sage: r.is_na(zero/zero)
[1] TRUE
sage: r.is_na(na)
[1] TRUE
sage: r.is_nan(inf-inf)
[1] TRUE
sage: r.is_nan(zero/zero)
[1] TRUE
sage: r.is_nan(na)
[1] FALSE
Character Vectors::
sage: labs = r.paste('c("X","Y")', '1:10', sep='""'); labs
[1] "X1" "Y2" "X3" "Y4" "X5" "Y6" "X7" "Y8" "X9" "Y10"
Index vectors; selecting and modifying subsets of a data set::
sage: na = r('NA')
sage: x = r([10.4,5.6,3.1,6.4,21.7,na]); x
[1] 10.4 5.6 3.1 6.4 21.7 NA
sage: x['!is.na(self)']
[1] 10.4 5.6 3.1 6.4 21.7
sage: x = r([10.4,5.6,3.1,6.4,21.7,na]); x
[1] 10.4 5.6 3.1 6.4 21.7 NA
sage: (x+1)['(!is.na(self)) & self>0']
[1] 11.4 6.6 4.1 7.4 22.7
sage: x = r([10.4,-2,3.1,-0.5,21.7,na]); x
[1] 10.4 -2.0 3.1 -0.5 21.7 NA
sage: (x+1)['(!is.na(self)) & self>0']
[1] 11.4 4.1 0.5 22.7
Distributions::
sage: r.options(width="60")
$width
[1] 80
sage: rr = r.dnorm(r.seq(-3,3,0.1))
sage: rr
[1] 0.004431848 0.005952532 0.007915452 0.010420935
[5] 0.013582969 0.017528300 0.022394530 0.028327038
[9] 0.035474593 0.043983596 0.053990967 0.065615815
[13] 0.078950158 0.094049077 0.110920835 0.129517596
[17] 0.149727466 0.171368592 0.194186055 0.217852177
[21] 0.241970725 0.266085250 0.289691553 0.312253933
[25] 0.333224603 0.352065327 0.368270140 0.381387815
[29] 0.391042694 0.396952547 0.398942280 0.396952547
[33] 0.391042694 0.381387815 0.368270140 0.352065327
[37] 0.333224603 0.312253933 0.289691553 0.266085250
[41] 0.241970725 0.217852177 0.194186055 0.171368592
[45] 0.149727466 0.129517596 0.110920835 0.094049077
[49] 0.078950158 0.065615815 0.053990967 0.043983596
[53] 0.035474593 0.028327038 0.022394530 0.017528300
[57] 0.013582969 0.010420935 0.007915452 0.005952532
[61] 0.004431848
Convert R Data Structures to Python/Sage::
sage: rr = r.dnorm(r.seq(-3,3,0.1))
sage: sum(rr._sage_())
9.9772125168981...
Or you get a dictionary to be able to access all the information::
sage: rs = r.summary(r.c(1,4,3,4,3,2,5,1))
sage: rs
Min. 1st Qu. Median Mean 3rd Qu. Max.
1.000 1.750 3.000 2.875 4.000 5.000
sage: d = rs._sage_()
sage: d['DATA']
[1, 1.75, 3, 2.875, 4, 5]
sage: d['_Names']
['Min.', '1st Qu.', 'Median', 'Mean', '3rd Qu.', 'Max.']
sage: d['_r_class']
['summaryDefault', 'table']
It is also possible to access the plotting capabilities of R
through Sage. For more information see the documentation of
r.plot() or r.png().
THE JUPYTER NOTEBOOK INTERFACE (work in progress).
The %r interface described in the Sage notebook manual is not useful
in the Jupyter notebook : it creates a inferior R interpreter which
cannot be escaped.
The RPy2 library allows the creation of an R cell in the Jupyter
notebook analogous to the %r escape in command line or %r cell in a
Sage notebook.
The interface is loaded by a cell containing the sole code:
"%load_ext rpy2.ipython"
After execution of this code, the %R and %%R magics are available:
- %R allows the execution of a single line of R code. Data exchange is
possible via the -i and -o options. Do "%R?" in a standalone cell
to get the documentation.
- %%R allows the execution in R of the whole text of a cell, with
similar options (do "%%R?" in a standalone cell for
documentation).
A few important points must be noted:
- The R interpreter launched by this interface IS (currently)
DIFFERENT from the R interpreter used br other r... functions.
- Data exchanged via the -i and -o options have a format DIFFERENT
from the format used by the r... functions (RPy2 mostly uses arrays,
and bugs the user to use the pandas Python package).
- R graphics are (beautifully) displayed in output cells, but are not
directly importable. You have to save them as .png, .pdf or .svg
files and import them in Sage for further use.
In its current incarnation, this interface is mostly useful to
statisticians needing Sage for a few symbolic computations but mostly
using R for applied work.
AUTHORS:
- <NAME> (2007-11-01)
- <NAME> (2008-04-19)
- <NAME> (2008-03-20)
- <NAME> (2008-04-19)
- <NAME> (2015-12-12, RPy2 interface)
"""
##########################################################################
#
# Copyright (C) 2007 <NAME> <<EMAIL>>
# 2007 <NAME> <<EMAIL>>
# 2008 <NAME> <<EMAIL>>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# http://www.gnu.org/licenses/
#
##########################################################################
from __future__ import print_function, absolute_import
from six.moves import range
import six
from .interface import Interface, InterfaceElement, InterfaceFunction, InterfaceFunctionElement
from sage.env import DOT_SAGE
import re
from sage.structure.element import parent
from sage.interfaces.tab_completion import ExtraTabCompletion
from sage.docs.instancedoc import instancedoc
# see the _lazy_init for some reasoning behind the lazy imports
from sage.misc.lazy_import import lazy_import
lazy_import("rpy2", "robjects")
lazy_import("rpy2.robjects", "packages", "rpy2_packages")
lazy_import("rpy2.robjects.conversion", "localconverter")
# for help page fetching
lazy_import("rpy2.robjects.help", "Package")
lazy_import("rpy2", "rinterface")
COMMANDS_CACHE = '%s/r_commandlist.sobj'%DOT_SAGE
#there is a mirror network, but lets take #1 for now
RRepositoryURL = "http://cran.r-project.org/"
RFilteredPackages = ['.GlobalEnv']
# crosscheck with https://svn.r-project.org/R/trunk/src/main/names.c
# but package:base should cover this. i think.
RBaseCommands = ['c', "NULL", "NA", "True", "False", "Inf", "NaN"]
def _setup_r_to_sage_converter():
"""
Set up a the converter used to convert from rpy2's
representation of R objects to the one sage expects.
EXAMPLES::
Test
Simple numeric values are represented as vectors in R. So `1` would actually
be an array of length 1. We convert all vectors of length 1 to simple values,
whether or not they "originally" were simple values or not:
sage: r([42]).sage()
42
sage: r(42).sage()
42
sage: r('c("foo")').sage()
'foo'
Arrays of length greater than one are treated normally:
sage: r([42, 43]).sage()
[42, 43]
We also convert all numeric values to integers if that is possible without
loss of precision:
sage: type(r([1.0]).sage()) == int
True
sage: r([1.0, 42.5]).sage()
[1, 42.5]
Matrices are converted to sage matrices:
sage: r('matrix(c(2,4,3,1,5,7), nrow=2, ncol=3)').sage()
[2 3 5]
[4 1 7]
More complex r structures are represented by dictionaries:
sage: r.summary(1).sage()
{'DATA': | |
loc = para_local[0], scale = para_local[1]) == 0:
plume_draw[i] = numpy.random.normal(loc = input_plume[i], scale = draw_scale[i], size = 1)
print "try resample plume"
if prior_type_plume[i] == "Uniform":
plume_draw[i] = numpy.random.normal(loc = input_plume[i], scale = draw_scale[i], size = 1)
while plume_draw[i] < para_local[0] or plume_draw[i] > para_local[1]:
plume_draw[i] = numpy.random.normal(loc = input_plume[i], scale = draw_scale[i], size = 1)
print "try resample plume"
if prior_type_plume[i] == "Fixed":
plume_draw[i] = input_plume[i]
return plume_draw
# End of function "draw_input_Gaus"
#------------------------------------------------------------------------------------------------------------------------------
def draw_wind_Gaus(input_wind, prior_type_wind, draw_scale_wind, prior_para_wind):
# This function is used to draw new samples based on the proposal distribution for the wind conditions.
# It first identifies if a certain variable for the wind conditions needs to be modified or not by
# variable "prior_type_wind", and then draws the proposed samples from a Gaussian distribution centered at input_wind[i] with standard
# deviation draw_scale_wind[i]. Each parameter for the wind conditions is drawn separately/independently with a for loop.
# Input:
# input_wind: Wind conditions at the jth iteration;
# it should look like/has the same dimensions as the "input_wind" in function "process_wind_temp".
# prior_type_wind: The prior type for the wind conditions;
# if certain initial conditions are assumed to be known, then it is "Fixed", meaning that we keep
# it fixed, and do not draw new values for it.
# See "run_mh.py" file for an example of this variable.
# draw_scale_wind: The standard deviation of the proposal function (a Gaussian distribution);
# See "run_mh.py" file for an example of this variable.
# prior_para_wind: The parameters that are used to define the prior.
# For example, if prior_type_wind[0] = "Gaussian", and prior_para_wind[[0]] = [0,1],
# that specify the prior for the input_wind[0] to be a Gaussian distribution centered at 0 with std being 1;
#
# Output:
# wind_draw: Proposed samples drawn based on the Gaussian distribution centered at input_wind[i] and draw_scale_wind[i];
# It is a vector that has the same dimesions as variable "input_plume"
# *** Note:
# Currently the code only supports Gaussian and Uniform distributions for the prior.
wind_draw = zeros(len(input_wind))
for i in range(len(input_wind)):
para_local = numpy.array(prior_para_wind[i])
if prior_type_wind[i] == "Gaussian":
wind_draw[i] = numpy.random.normal(loc = input_wind[i], scale = draw_scale_wind[i], size = 1)
if i == 1:
while wind_draw[i] <0:
wind_draw[i] = numpy.random.normal(loc = input_wind[i], scale = draw_scale_wind[i], size = 1)
print "drawn wind speed below zero"
if prior_type_wind[i] == "Uniform":
wind_draw[i] = numpy.random.normal(loc = input_wind[i], scale = draw_scale_wind[i], size = 1)
while wind_draw[i] < para_local[0] or wind_draw[i] > para_local[1]:
wind_draw[i] = numpy.random.normal(loc = input_wind[i], scale = draw_scale_wind[i], size = 1)
print "try resample wind"
if i == 1:
while wind_draw[i] <0:
wind_draw[i] = numpy.random.normal(loc = input_wind[i], scale = draw_scale_wind[i], size = 1)
print "drawn wind speed below zero"
if prior_type_wind[i] == "Fixed":
wind_draw[i] = input_wind[i]
return wind_draw
# End of function "draw_wind_Gaus"
#------------------------------------------------------------------------------------------------------------------------------
def likelihood_function(prediction, observation, likelihood_ratio):
# This function calculates value of the likelihood function for each pair of observation and simulation;
# Input:
# prediction: Simulated values from tephra2.
# observation: Observations from the field;
# In our example, the ``observations'' are generated from tephra2.
# likelihood_ratio: A constant that characterizes the scale of the likelihood function.
# Output:
# likelihood_array: A vector that contains the LOG-TRANSFORMED value of the likelihood function for each pair
# of observation and simulation.
# *** It should be noted that different forms of likelihood function can be specified.
# *** The one we are using scales with the magnitude of observationm and is a
# *** Gaussian for the value log(observation/prediction) with a mean of 0, and scale of "likelihood_ratio".
# *** We assume that each observation is made independently;
# *** If prediction from tephra2 produces value below 0.001, it is turned to 0.001 to avoid "any value/0" problem.
n = len(observation) # Number of observations
likelihood_array = zeros(n)
prediction[prediction<0.001] = repeat(0.001, size(prediction[prediction<0.001]))
# ****** We turn simulations that are thinner than 0.001 (including 0) to 0.001 to avoid negative infinity
# for the log of the likelihood function because log(0) goes to infinity.
for i in range(n):
likelihood_array[i] = math.log(norm.pdf(math.log(observation[i]/prediction[i],10),loc = 0, scale = likelihood_ratio),10)
# ****** Please note the FORMAT of the likelihood function. It SCALES with the observation.
return likelihood_array
# End of function "likelihood_function"
#------------------------------------------------------------------------------------------------------------------------------
def prior_function_plume(prior_type_plume, input_plume, prior_para):
# This function calculates the prior for the initial conditions of the plume
# Input:
# prior_type_plume: The prior types for the initial conditions of the plume;
# Currently the code only supports Uniform and Gaussian distributions;
# If some initial conditions on the plume are known to be fixed (e.g., we know exactly the vent location),
# we just keep it "Fixed", and not calculate the prior;
# See "run_mh.py" for an example of this variable.
# input_plume: The initial conditions of the plume at the jth iteration.
# prior_para: The parameters for the prior, if prior_type_plume[0] = "Gaussian", prior_para[0] = [0,1] specifies
# that it follows a Gaussian distribution centered at 0 with a std of 1.
# If prior_type_plume[0] = "Uniform", prior_para[0] = [0,1], the [0,1] specifies the lower and upper
# bounds for the uniform distribution.
# See "run_mh.py" for an example of this variable.
# Output:
# priors: The log-transformed prior (base:10) for each initial condition for the plume that needs to be
# estimated.
n = len(input_plume)
priors = zeros(n)
for i in range(n):
if prior_type_plume[i] == 'Uniform':
para_local = numpy.array(prior_para[i])
priors[i] = math.log(uniform.pdf(input_plume[i], loc = para_local[0], scale = para_local[1]-para_local[0]),10)
if prior_type_plume[i] == 'Gaussian':
para_local = prior_para[i]
priors[i] = math.log(norm.pdf(input_plume[i], loc = para_local[0], scale = para_local[1]),10)
if prior_type_plume[i] == 'Fixed':
priors[i] = 0
return (priors)
# End of function "prior_function_plume"
#------------------------------------------------------------------------------------------------------------------------------
def prior_function_wind(prior_type_wind, input_wind, prior_para_wind):
# This function calculates the prior for the wind conditions.
# Input:
# prior_type_wind: The prior types for the wind conditions;
# Currently the code only supports Uniform and Gaussian distributions;
# If some wind conditions are known to be fixed (e.g., we know exactly the wind direction),
# we just keep it "Fixed", and not calculate the prior;
# See "run_mh.py" for an example of this variable.
# input_wind: The wind conditions at the jth iteration.
# prior_para_wind: The parameters for the prior, if prior_type_plume[0] = "Gaussian", prior_para[0] = [0,1] specifies
# that it follows a Gaussian distribution centered at 0 with a std of 1.
# If prior_type_plume[0] = "Uniform", prior_para[0] = [0,1], the [0,1] specifies the lower and upper
# bounds for the uniform distribution.
# See "run_mh.py" for an example of this variable.
# Output:
# priors_wind: The log-transformed prior (base:10) for each wind condition that needs to be
# estimated.
n = len(input_wind)
priors_wind = zeros(n)
for i in range(n):
if prior_type_wind[i] == 'Uniform':
para_local = prior_para_wind[i]
priors_wind[i] = math.log(uniform.pdf(input_wind[i], loc = para_local[0], scale = para_local[1]-para_local[0]),10)
if prior_type_wind[i] == 'Gaussian':
para_local = prior_para_wind[i]
priors_wind[i] = math.log(norm.pdf(input_wind[i], loc = para_local[0], scale = para_local[1]),10)
if prior_type_wind[i] == 'Fixed':
priors_wind[i] = 0
return (priors_wind)
#------------------------------------------------------------------------------------------------------------------------------
def proposal_function(input_plume, input_wind,
prior_type_plume, prior_type_wind,
draw_scale,draw_scale_wind,
prior_para,prior_para_wind,
elevation,runs,likelihood_scale,observation,check_snapshot):
# This function makes use of all above functions, and implments the Metropolis-Hastings algorithm;
# Input:
# All input variables except for "runs" have been introduced in previous functions;
# See "run_mh.py" for examples of these variables.
# runs: The number of sample draws for the Metropolis-Hastings algorithm.
# Output:
# chain: The chain that contains all samples drawn for the Metropolis-Hastings algorithm;
# The initial conditions for the plume and wind conditions that are specified to be "Fixed"
# are kept in the chain (elments in the corresponding column have the same value).
# post_chain: Value of likelihood function*prior for each accepted/kept sample.
# acceptance_count: The number of samples that are accepted during the implmentation of the Metropolis-Hastings algorithm.
# Acceptance rate = acceptance_count/runs.
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: <NAME>, <EMAIL>
# !python3
"""
Make nice spectrum objects to pass around SED class
"""
import copy
from functools import wraps, partial
from itertools import groupby
from operator import itemgetter
from multiprocessing import Pool
import os
from pkg_resources import resource_filename
import astropy.constants as ac
import astropy.units as q
import astropy.io.votable as vo
from astropy.io import fits
# from astropy.modeling import fitting, models
from bokeh.plotting import figure, show
from bokeh.models import ColumnDataSource, HoverTool
import numpy as np
from pandas import DataFrame
from svo_filters import Filter
from . import mcmc as mc
from . import utilities as u
def copy_raw(func):
"""A wrapper to copy the raw data to the new Spectrum object"""
@wraps(func)
def _copy_raw(*args, **kwargs):
"""Run the function then update the raw data attribute"""
# Grab the original data
raw = args[0].raw or args[0].spectrum
# Run the function and update the raw attribute
new_spec = func(*args, **kwargs)
new_spec.raw = raw
return new_spec
return _copy_raw
class Spectrum:
"""
A class to store, calibrate, fit, and plot a single spectrum
"""
def __init__(self, wave, flux, unc=None, snr=None, trim=None, const=1, phot=False,
name=None, ref=None, header=None, verbose=False, **kwargs):
"""Initialize the Spectrum object
Parameters
----------
wave: astropy.units.quantity.Quantity
The wavelength array
flux: astropy.units.quantity.Quantity
The flux density array
unc: np.ndarray
The flux density uncertainty array
snr: float (optional)
A value to override spectrum SNR
trim: dict
A dictionary of the `trim` method arguments to run
const: int, float
A multiplicative factor for the flux
phot: bool
Photometric spectrum
name: str
A name for the spectrum
ref: str
A reference for the data
header: str
The header for the spectrum file
verbose: bool
Print helpful stuff
"""
# Meta
self.verbose = verbose
self.name = name or 'New Spectrum'
self.ref = ref
self.header = header
self.const = const
self.phot = phot
# Make sure the arrays are the same shape
if not wave.shape == flux.shape and ((unc is None) or not (unc.shape == flux.shape)):
raise TypeError("Wavelength, flux and uncertainty arrays must be the same shape.")
# Check wave units are length
if not u.equivalent(wave, q.um):
raise TypeError("Wavelength array must be in astropy.units.quantity.Quantity length units, e.g. 'um'")
# Check flux units are flux density
if not u.equivalent(flux, u.FLAM):
raise TypeError("Flux array must be in astropy.units.quantity.Quantity flux density units, e.g. 'erg/s/cm2/A'")
# Generate uncertainty array
if unc is None and isinstance(snr, (int, float)):
unc = flux / snr
# Make sure the uncertainty array is in the correct units
if unc is not None:
if not u.equivalent(unc, u.FLAM):
raise TypeError("Uncertainty array must be in astropy.units.quantity.Quantity flux density units, e.g. 'erg/s/cm2/A'")
# Replace negatives, zeros, and infs with nans
spectrum = [wave, flux]
spectrum += [unc] if unc is not None else []
spectrum = u.scrub(spectrum, fill_value=np.nan)
# Store history info and raw data
self.history = {}
self.raw = None
# Strip and store units
self._wave_units = wave.unit
self._flux_units = flux.unit
self.units = [self._wave_units, self._flux_units]
self.units += [self._flux_units] if unc is not None else []
spectrum = [i.value for i in spectrum]
# Add the data
self.wave = spectrum[0]
self._flux = spectrum[1]
self._unc = None if unc is None else spectrum[2]
# Store components if added
self.components = None
self.best_fit = {}
# Store kwargs
for key, val in kwargs.items():
setattr(self, key, val)
# Trim
if trim is not None:
self.trim(**trim)
@copy_raw
def __add__(self, spec):
"""Add the spectra of this and another Spectrum object
Parameters
----------
spec: sedkit.spectrum.Spectrum
The spectrum object to add
Returns
-------
sedkit.spectrum.Spectrum
A new spectrum object with the input spectra stitched together
"""
# If None is added, just return a copy
if spec is None:
return Spectrum(*self.spectrum, name=self.name)
if not isinstance(type(spec), type(Spectrum)):
raise TypeError('spec must be sedkit.spectrum.Spectrum')
# Make spec the same units
spec.wave_units = self.wave_units
spec.flux_units = self.flux_units
# Get the two spectra to stitch
s1 = self.data
s2 = spec.data
# Determine if overlapping
overlap = True
try:
if s1[0][-1] > s1[0][0] > s2[0][-1] or s2[0][-1] > s2[0][0] > s1[0][-1]:
overlap = False
except IndexError:
overlap = False
# Concatenate and order two segments if no overlap
if not overlap:
# Drop uncertainties on both spectra if one is missing
if self.unc is None or spec.unc is None:
s1 = [self.wave, self.flux]
s2 = [spec.wave, spec.flux]
# Concatenate arrays and sort by wavelength
new_spec = np.concatenate([s1, s2], axis=1).T
new_spec = new_spec[np.argsort(new_spec[:, 0])].T
# Otherwise there are three segments, (left, overlap, right)
else:
# Get the left segemnt
left = s1[:, s1[0] <= s2[0][0]]
if not np.any(left):
left = s2[:, s2[0] <= s1[0][0]]
# Get the right segment
right = s1[:, s1[0] >= s2[0][-1]]
if not np.any(right):
right = s2[:, s2[0] >= s1[0][-1]]
# Get the overlapping segements
o1 = s1[:, np.where((s1[0] < right[0][0]) & (s1[0] > left[0][-1]))].squeeze()
o2 = s2[:, np.where((s2[0] < right[0][0]) & (s2[0] > left[0][-1]))].squeeze()
# Get the resolutions
r1 = s1.shape[1] / (max(s1[0]) - min(s1[0]))
r2 = s2.shape[1] / (max(s2[0]) - min(s2[0]))
# Make higher resolution s1
if r1 < r2:
o1, o2 = o2, o1
# Interpolate s2 to s1
o2_flux = np.interp(o1[0], s2[0], s2[1])
# Get the average
o_flux = np.nanmean([o1[1], o2_flux], axis=0)
# Calculate uncertainties if possible
if len(s2) == len(o1) == 3:
o2_unc = np.interp(o1[0], s2[0], s2[2])
o_unc = np.sqrt(o1[2]**2 + o2_unc**2)
overlap = np.array([o1[0], o_flux, o_unc])
else:
overlap = np.array([o1[0], o_flux])
left = left[:2]
right = right[:2]
# Make sure it is 2D
if overlap.shape == (3,):
overlap.shape = 3, 1
if overlap.shape == (2,):
overlap.shape = 2, 1
# Concatenate the segments
new_spec = np.concatenate([left, overlap, right], axis=1)
# Add units
new_spec = [i * Q for i, Q in zip(new_spec, self.units)]
# Make the new spectrum object
new_spec = Spectrum(*new_spec)
# Store the components
new_spec.components = Spectrum(*self.spectrum), spec
return new_spec
def best_fit_model(self, modelgrid, report=None, name=None, **kwargs):
"""Perform simple fitting of the spectrum to all models in the given
modelgrid and store the best fit
Parameters
----------
modelgrid: sedkit.modelgrid.ModelGrid
The model grid to fit
report: str
The name of the parameter to plot versus the
Goodness-of-fit statistic
name: str
A name for the fit
"""
# Prepare modelgrid
modelgrid.wave_units = self.wave_units
# Prepare data
name = name or '{} fit'.format(modelgrid.name)
spectrum = Spectrum(*self.spectrum)
rows = [row for n, row in modelgrid.index.iterrows()]
# Iterate over entire model grid
pool = Pool(8)
func = partial(fit_model, fitspec=spectrum, resample=not modelgrid.phot)
fit_rows = pool.map(func, rows)
pool.close()
pool.join()
# Turn the results into a DataFrame and sort
models = DataFrame(fit_rows)
models = models.sort_values('gstat')
# Get the best fit
bf = copy.copy(models.iloc[0])
# Make into a dictionary
bdict = dict(bf)
# Add full model
bdict['wave_units'] = self.wave_units
bdict['flux_units'] = self.flux_units
bdict['fit_to'] = 'phot' if modelgrid.phot else 'spec'
full_model = modelgrid.get_spectrum(const=bdict['const'], snr=5, **{par: val for par, val in bdict.items() if par in modelgrid.parameters})
full_model.phot = True
bdict['full_model'] = full_model
self.message(bf[modelgrid.parameters])
if report is not None:
# Configure plot
tools = "pan, wheel_zoom, box_zoom, reset"
rep = figure(tools=tools, x_axis_label=report, y_axis_label='Goodness-of-fit', plot_width=600, plot_height=400)
# Single out best fit
best = ColumnDataSource(data=models.iloc[:1])
others = ColumnDataSource(data=models.iloc[1:])
# Add hover tool
hover = HoverTool(tooltips=[('label', '@label'), ('gstat', '@gstat')])
rep.add_tools(hover)
# Plot the fits
rep.circle(report, 'gstat', source=best, color='red', legend_label=bf['label'])
rep.circle(report, 'gstat', source=others)
# Show the plot
show(rep)
self.best_fit[name] = bdict
def convolve_filter(self, filter, **kwargs):
"""
Convolve the spectrum with a filter
Parameters
----------
filter: svo_filters.svo.Filter, str
The filter object or name
Returns
-------
sedkit.spectrum.Spectrum
The convolved spectrum object
"""
# Ensure filter object
if isinstance(filter, str):
filter = Filter(filter)
# Get the wavelengths and throughput
flx, unc = filter.apply(self.spectrum)
# Make the new spectrum object
new_spec = Spectrum(filter.wave[0], flx, unc=unc)
return new_spec
@property
def data(self):
"""
Store the spectrum without units
"""
if self.unc is None:
data = np.stack([self.wave, self.flux])
else:
data = np.stack([self.wave, self.flux, self.unc])
return data
def export(self, filepath, header=None):
"""
Export the spectrum to file
Parameters
----------
filepath: str
The path for the exported file
"""
# Get target directory
dirname = os.path.dirname(filepath) or '.'
name = self.name.replace(' ', '_')
# Check the parent directory
if not os.path.exists(dirname):
| |
#!/usr/bin/env python
#-------------------------------------------------------------------#
# Benchmark runner
#-------------------------------------------------------------------#
# NOTE: this script reads from the standard output/error to extract
# information about OCCAM and ROPgadget. Thus, any change in these
# tools might break this script.
import sys
import os
import os.path
import re
import datetime
import collections
import argparse as a
import pptable
import commands as cmd
verbose = False
# named tuple for occam stats
occam_stats = collections.namedtuple('OccamStats', 'funcs insts mem_insts')
def get_occam_home():
path = os.environ.get('OCCAM_HOME')
if path is None:
cmd.raise_error("need to set OCCAM_HOME environment variable")
return path
def get_ropgadget():
ropgadget = None
if 'ROPGADGET' in os.environ: ROPGADGET = os.environ ['ROPGADGET']
if not cmd.is_exec(ropgadget): ropgadget = cmd.which('ropgadget')
if not cmd.is_exec(ropgadget): ropgadget = cmd.which('ROPgadget.py')
if not cmd.is_exec(ropgadget):
raise IOError("Cannot find ropgadget")
return ropgadget
def get_benchmarks(name):
import json
with open(name, "r") as f:
benchs = list()
try:
d = json.load(f)
benchs = d['benchmarks']
except ValueError as msg:
print "Error: while decoding JSON file " + name
print msg
f.close()
return benchs
def read_occam_output(logfile):
b_funcs, b_insts, b_mem = 0, 0, 0
a_funcs, a_insts, a_mem = 0, 0, 0
before_done = False
with open(logfile, 'r') as fd:
for line in fd:
if re.search('^Statistics for \S* after specialization', line):
before_done = True
if re.search('Number of functions', line):
n = int(line.split()[0])
if not before_done:
b_funcs = n
else:
a_funcs = n
if re.search('Number of instructions', line):
n = int(line.split()[0])
if not before_done:
b_insts = n
else:
a_insts = n
if re.search('Statically unknown memory accesses', line):
n = int(line.split()[0])
if not before_done:
b_mem = n
else:
a_mem = n
before = occam_stats(funcs=b_funcs, insts=b_insts, mem_insts=b_mem)
after = occam_stats(funcs=a_funcs, insts=a_insts, mem_insts=a_mem)
fd.close()
return (before, after)
def read_ropgadget_output(logfile):
num_gadgets = 0
with open(logfile, 'r') as fd:
for line in fd:
if re.search('Unique gadgets found:', line):
num_gadgets = int(line.split()[3])
fd.close()
return num_gadgets
fd.close()
return num_gadgets
def pretty_printing_occam(results):
tab = []
for benchmark, before, after in results:
if before is None or after is None:
continue
func_red = 0.0
if (before.funcs > 0):
func_red = (1.0 - (float(after.funcs) / float(before.funcs))) * 100.0
insts_red = 0.0
if (before.insts > 0):
insts_red = (1.0 - (float(after.insts) / float(before.insts))) * 100.0
mem_red = 0.0
if (before.mem_insts > 0):
mem_red = (1.0 - (float(after.mem_insts) / float(before.mem_insts))) * 100.0
tab.append([benchmark,
before.funcs,
after.funcs,
float("{0:.2f}".format(func_red)),
before.insts,
after.insts,
float("{0:.2f}".format(insts_red)),
before.mem_insts,
after.mem_insts,
float("{0:.2f}".format(mem_red))])
table = [["Program", \
"B Fun", "A Fun", "% Fun Red", \
"B Ins", "A Ins", "% Ins Red", \
"B Mem Ins", "A Mem Ins", "% Mem Ins Red"]]
for row in tab:
table.append(row)
import pptable
out = sys.stdout
pptable.pprint_table(out,table)
def pretty_printing_ropgadget(results):
tab = []
for benchmark, \
total_before, total_after, \
jop_before, jop_after, \
sys_before, sys_after, \
rop_before, rop_after in results:
# total_red = 0.0
# if (total_before > 0):
# total_red = (1.0 - (float(total_after) / float(total_before))) * 100.0
rop_red = 0.0
if (rop_before > 0):
rop_red = (1.0 - (float(rop_after) / float(rop_before))) * 100.0
sys_red = 0.0
if (sys_before > 0):
sys_red = (1.0 - (float(sys_after) / float(sys_before))) * 100.0
jop_red = 0.0
if (jop_before > 0):
jop_red = (1.0 - (float(jop_after) / float(jop_before))) * 100.0
tab.append([benchmark,
#total_before,
#total_after,
#float("{0:.2f}".format(total_red)),
rop_before,
rop_after,
float("{0:.2f}".format(rop_red)),
sys_before,
sys_after,
float("{0:.2f}".format(sys_red)),
jop_before,
jop_after,
float("{0:.2f}".format(jop_red))])
table = [["Program", \
"B ROP", "A ROP", "% ROP Red", \
"B SYS", "A SYS", "% SYS Red", \
"B JOP", "A JOP", "% JOP Red"]]
for row in tab:
table.append(row)
out = sys.stdout
pptable.pprint_table(out,table)
def run_occam(dirname, execname, workdir, cpu, mem, slash_opts= []):
#benchmark_name = os.path.basename(os.path.normpath(dirname))
benchmark_name = execname
outfile = benchmark_name + ".occam.out"
errfile = benchmark_name + ".occam.err"
outfd = open(os.path.join(workdir, outfile), "w")
errfd = open(os.path.join(workdir, errfile), "w")
res_before, res_after = None, None
#1. Generate bitcode: run `make`
returncode,_,_,_ = cmd.run_limited_cmd(['make'], outfd, errfd, benchmark_name, dirname)
if returncode <> 0:
cmd.warning("something failed while running \"make\"" + benchmark_name + "\n" + \
"Read logs " + outfile + " and " + errfile)
else:
#2. Run slash (OCCAM) on it: `build.sh opts`
slash_args = ['./build.sh']
slash_args.extend(slash_opts)
print "Running slash with options " + str(slash_args)
returncode,_,_,_ = \
cmd.run_limited_cmd(slash_args, outfd, errfd, benchmark_name, dirname, cpu, mem)
if returncode <> 0:
cmd.warning("something failed while running \"" + ' '.join(slash_args) + \
"\"" + benchmark_name + "\n" + \
"Read logs " + outfile + " and " + errfile)
outfd.close()
errfd.close()
if returncode == 0:
#All OCCAM output is redirected to error
logfile = os.path.join(workdir, errfile)
(res_before, res_after) = read_occam_output(logfile)
return (benchmark_name, res_before, res_after)
#Pre: run_occam has been executed already and thus, there are two
#executables in dirname: the original one and the one after occam.
def run_ropgadget(dirname, execname, workdir, cpu, mem):
def run_ropgadget_on_pair(bench_name, prog_before, prog_after, opts, \
logfile, outfd, errfd):
outfd.seek(0,0)
errfd.seek(0,0)
res_before, res_after = None, None
args = [get_ropgadget(), '--binary', prog_before] + opts
returncode,_,_,_ = cmd.run_limited_cmd(args, outfd, errfd, bench_name)
# ROPgadget returns 1 if success
if returncode <> 1:
cmd.warning("something failed while running \"" + ' '.join(args) + "\"")
else:
res_before = read_ropgadget_output(logfile)
outfd.seek(0,0)
errfd.seek(0,0)
args = [get_ropgadget(), '--binary', prog_after] + opts
returncode,_,_,_ = cmd.run_limited_cmd(args, outfd, errfd, bench_name)
# ROPgadget returns 1 if success
if returncode <> 1:
cmd.warning("something failed while running \"" + ' '.join(args) + "\"")
else:
res_after = read_ropgadget_output(logfile)
return (res_before, res_after)
#benchname = os.path.basename(os.path.normpath(dirname))
benchname = execname
outfile = benchname + ".ropgadget.out"
errfile = benchname + ".ropgadget.err"
outfd = open(os.path.join(workdir, outfile), "w")
errfd = open(os.path.join(workdir, errfile), "w")
prog_before = os.path.join(dirname, benchname + "_orig")
prog_after = os.path.join(dirname, benchname + "_occamized")
total_before, total_after = None, None
rop_before, rop_after = None, None
jop_before, jop_after = None, None
sys_before, sys_after = None, None
if os.path.exists(prog_before) and os.path.exists(prog_after):
# total gadgets: ropgadget --binary prog
# only jop: ropgadget --binary --norop --nosys
# only sys: ropgadget --binary --nojop --norop
# only rop: ropgadget --binary --nojop --nosys
logfile = os.path.join(workdir, outfile)
opts = []
(total_before, total_after) = run_ropgadget_on_pair(benchname, prog_before, prog_after, \
opts, logfile, outfd, errfd)
opts = ['--norop','--nosys']
(jop_before, jop_after) = run_ropgadget_on_pair(benchname, prog_before, prog_after, \
opts, logfile, outfd, errfd)
opts = ['--nojop', '--norop']
(sys_before, sys_after) = run_ropgadget_on_pair(benchname, prog_before, prog_after, \
opts, logfile, outfd, errfd)
opts = ['--nojop', '--nosys']
(rop_before, rop_after) = run_ropgadget_on_pair(benchname, prog_before, prog_after, \
opts, logfile, outfd, errfd)
else:
if not os.path.exists(prog_before):
cmd.warning(prog_before + " does not exist")
if not os.path.exists(prog_after):
cmd.warning(prog_after + " does not exist")
outfd.close()
errfd.close()
return (benchname, \
total_before, total_after, \
jop_before, jop_after, \
sys_before, sys_after, \
rop_before, rop_after)
def parse_opt (argv):
p = a.ArgumentParser (description='Benchmark Runner for OCCAM')
p.add_argument ('--save-temps', '--keep-temps',
dest="save_temps",
help="Do not delete temporary files",
action="store_true", default=False)
p.add_argument ('--temp-dir', dest='temp_dir', metavar='DIR',
help="Temporary directory", default=None)
p.add_argument ('--cpu', type=int, dest='cpu', metavar='SEC',
help='CPU time limit (seconds)', default=-1)
p.add_argument ('--mem', type=int, dest='mem', metavar='MB',
help='MEM limit (MB)', default=-1)
p.add_argument ('--sets', type=str,
help="List of .set files (separated by comma)",
dest='benchmark_sets', default="")
p.add_argument ('--slash-opts', type=str,
help="Options passed to slash.py (options separated by comma)",
dest='slash_opts', default="")
p.add_argument ('--rop', help="Generate statistics about ROP/SYS/JOP gadgets (not maintained anymore)",
dest='rop', default=False, action="store_true")
args = p.parse_args (argv)
return args
def main (argv):
args = parse_opt (argv[1:])
workdir = cmd.create_work_dir(args.temp_dir, args.save_temps)
sets = []
occam_opts = []
occam_tab = list()
ropgadget_tab = list()
for slash_opt in args.slash_opts.split(","):
occam_opts += [slash_opt]
for benchmark_set in args.benchmark_sets.split(","):
if benchmark_set is not "":
sets += [benchmark_set]
if not sets:
print "Warning: you need to choose a benchmark set. Use option --sets"
return 0
if args.rop:
print "Warning: option --rop is not maintained anymore."
args.rop = False
dt = datetime.datetime.now ().strftime ('%d/%m/%Y %H:%M:%S')
print "[" + dt + "] " + "STARTED runbench"
for s in sets:
for t in get_benchmarks(s):
if t['enabled'] == 'false':
continue
dirname = t['dirname']
if not os.path.isdir(dirname):
dirname = os.path.join(get_occam_home(), dirname)
if not os.path.isdir(dirname):
cmd.raise_error(t['dirname'] + " is not a directory")
execname = t['execname']
res = run_occam(dirname, execname, workdir, args.cpu, args.mem, occam_opts)
occam_tab.append(res)
if args.rop:
res = run_ropgadget(dirname, execname, workdir, args.cpu, args.mem)
ropgadget_tab.append(res)
dt = datetime.datetime.now ().strftime ('%d/%m/%Y %H:%M:%S')
print "[" + dt + "] | |
#!/usr/bin/env python
# encoding: utf-8
"""
first_level.py
Created by O.Colizoli, June-2019
Last update: 11-06-2019
Python version 2.7
The following packages need to be installed because they are called from the command line, but not imported:
fsl
"""
# TO DO:
# bids output files: sub, session, task, run, filetypebids coverted does latter...
# run-01 or run-1?? bids coverted does latter...
import os, subprocess, sys
import shutil as sh
import nibabel as nib
import pandas as pd
import numpy as np
from IPython import embed as shell # for Oly's debugging only
class first_level_class(object):
def __init__(self, subject, analysis_dir, deriv_dir, mask_dir, template_dir, timing_files_dir, TR):
self.subject = 'sub-'+str(subject)
self.analysis_dir = str(analysis_dir)
self.deriv_dir = str(deriv_dir)
self.mask_dir = str(mask_dir)
self.template_dir = str(template_dir)
self.timing_files_dir = str(timing_files_dir)
self.TR = str(TR)
if not os.path.isdir(self.mask_dir):
os.mkdir(self.mask_dir)
if not os.path.isdir(self.template_dir):
os.mkdir(self.template_dir)
if not os.path.isdir(self.timing_files_dir):
os.mkdir(self.timing_files_dir)
os.mkdir(os.path.join(self.timing_files_dir,'task-colors'))
os.mkdir(os.path.join(self.timing_files_dir,'task-letters'))
os.mkdir(os.path.join(self.timing_files_dir,'task-rsa'))
self.preprocess_dir = os.path.join(self.deriv_dir,'preprocessing')
self.first_level_dir = os.path.join(self.deriv_dir,'first_level')
if not os.path.isdir(self.first_level_dir):
os.mkdir(self.first_level_dir)
if not os.path.isdir(os.path.join(self.first_level_dir,'task-colors')):
os.mkdir(os.path.join(self.first_level_dir,'task-colors'))
if not os.path.isdir(os.path.join(self.first_level_dir,'task-letters')):
os.mkdir(os.path.join(self.first_level_dir,'task-letters'))
if not os.path.isdir(os.path.join(self.first_level_dir,'task-rsa')):
os.mkdir(os.path.join(self.first_level_dir,'task-rsa'))
# write unix commands to job to run in parallel
self.first_level_job_path = os.path.join(self.analysis_dir,'jobs','job_first_level_{}.txt'.format(self.subject))
if not os.path.exists(self.first_level_job_path):
self.first_level_job = open(self.first_level_job_path, "w")
self.first_level_job.write("#!/bin/bash\n")
self.first_level_job.close()
def loc_combine_epi(self, task):
# concatenate the 2 sessions of EPI data to perform a single GLM
# output is the concantenated bold of both sessions (input to first level)
outFile = os.path.join(self.first_level_dir,'task-{}'.format(task),'task-{}_{}_bold_mni.nii.gz'.format(task,self.subject))
N1 = nib.load(os.path.join(self.preprocess_dir,'task-{}'.format(task),'task-{}_{}_{}.feat'.format(task,self.subject,'ses-01'),'filtered_func_data_mni.nii.gz')) # preprocessed session 1
N2 = nib.load(os.path.join(self.preprocess_dir,'task-{}'.format(task),'task-{}_{}_{}.feat'.format(task,self.subject,'ses-02'),'filtered_func_data_mni.nii.gz')) # preprocessed session 2
BOLD1 = N1.get_data()
BOLD2 = N2.get_data()
BOLD = np.concatenate([BOLD1,BOLD2],axis=-1)
outData = nib.Nifti1Image(BOLD, affine=N1.affine, header=N1.header) # pass affine and header from last MNI image
outData.set_data_dtype(np.float32)
nib.save(outData, outFile)
print('success: loc_combine_epi {}'.format(self.subject))
def loc_combine_timing_files(self, task):
# concatenate the timing files: 2nd session have to add time = #TRs * TR
# GLM timing files for blocked design (localizers)
# ABAB blocked designs
stim_dur = 0.75 # stimulus duration seconds
# take FIRST session's BOLD to count TRs to add to 2nd sessions' onsets
BOLD1 = nib.load(os.path.join(self.preprocess_dir,'task-{}'.format(task),'task-{}_{}_{}.feat'.format(task,self.subject,'ses-01'),'filtered_func_data_mni.nii.gz'))
ntrs = BOLD1.shape[-1] # number of TRs
time2add = ntrs*float(self.TR) # time to add in seconds to block 2's onsets
print('Time to add to run 2: {}'.format(time2add))
# open block 2's events
events2 = pd.read_csv(os.path.join(self.deriv_dir,self.subject,'ses-02','func','{}_{}_task-{}_events.tsv'.format(self.subject,'ses-02',task)),sep='\t')
events2['onset'] = events2['onset'] + time2add
# open block 1's events and concantenate
events1 = pd.read_csv(os.path.join(self.deriv_dir,self.subject,'ses-01','func','{}_{}_task-{}_events.tsv'.format(self.subject,'ses-01',task)),sep='\t')
events = pd.concat([events1,events2],axis=0)
events.to_csv(os.path.join(self.first_level_dir,'task-{}'.format(task),'task-{}_{}_events.tsv'.format(task,self.subject)),sep='\t') # save concantenated events file
# generate 3 column files for each of the 2x2 conditions
for c,cond in enumerate(np.unique(events['trial_type'])):
outFile = os.path.join(self.deriv_dir,'timing_files','task-{}'.format(task),'task-{}_{}_{}.txt'.format(task,self.subject,cond))
# main regressors
first = np.array(events[events['trial_type']==cond]['onset']) # onset in s
second = np.repeat(stim_dur, len(first)) # duration in s
third = np.array(np.repeat(1, len(first)),dtype=int) # amplitude
output = np.array(np.vstack((first, second, third)).T) # 1 x 3
np.savetxt(outFile, output, delimiter='/t', fmt='%.2f %.2f %i') #3rd column has to be an integer for FSL!
print(outFile)
print('success: loc_combine_timing_files {}'.format(self.subject))
def loc_nuisance_regressors(self, task):
# concatenate the 2 sessions of motion parameters from preprocessing
# these are found in derivatives/preprocessing/task/task_subject_session.feat/mc/prefiltered_func_data_mcf.par
# Nrows = NTRs, Ncols = 6 (mc directions), note space separated
# This function also outputs the columns of 1s and 0s for each blocks' mean
#### Motion parameters ####
mc1 = pd.read_csv(os.path.join(self.preprocess_dir,'task-{}'.format(task),'task-{}_{}_{}.feat'.format(task,self.subject,'ses-01'),'mc','prefiltered_func_data_mcf.par'),header=None,sep='\s+',float_precision='round_trip')
mc2 = pd.read_csv(os.path.join(self.preprocess_dir,'task-{}'.format(task),'task-{}_{}_{}.feat'.format(task,self.subject,'ses-02'),'mc','prefiltered_func_data_mcf.par'),header=None,sep='\s+',float_precision='round_trip')
for col in mc1.columns.values: # convert data to numeric
mc1[col] = pd.to_numeric(mc1[col])
mc2[col] = pd.to_numeric(mc2[col])
mc = pd.concat([mc1,mc2],axis=0) # concantenate the motion regressors
#### Session means - make columns of 1s and 0s for the length of each session ####
b1 = np.concatenate((np.repeat(1,len(mc1)),np.repeat(0,len(mc2))),axis=0) # session 1: 1s then 0s
b2 = np.concatenate((np.repeat(0,len(mc1)),np.repeat(1,len(mc2))),axis=0) # sessoin 2: 0s then 1s
# add to motion dataframe
mc['b1'] = b1
mc['b2'] = b2
# save without header or index! suppress scientific notation!
mc.to_csv(os.path.join(self.timing_files_dir,'task-{}'.format(task),'task-{}_{}_nuisance_regressors.txt'.format(task,self.subject)),header=None,index=False,sep=',',float_format='%.15f')
print('success: loc_nuisance_regressors {}'.format(self.subject))
def loc_fsf(self,task):
# Creates the FSF files for each subject's first level analysis - localizers
# Run the actual FSF from the command line: feat task-colors_sub-01_ses-01.fsf
template_filename = os.path.join(self.analysis_dir,'templates','task-{}_first_level_template.fsf'.format(task))
markers = [
'[$OUTPUT_PATH]',
'[$NR_TRS]',
'[$INPUT_FILENAME]',
'[$NUISANCE]',
'[$EV1_FILENAME]',
'[$EV2_FILENAME]',
'[$NR_VOXELS]',
'[$MNI_BRAIN]'
]
FSF_filename = os.path.join(self.first_level_dir,'task-{}'.format(task),'task-{}_{}.fsf'.format(task,self.subject)) # save fsf
output_path = os.path.join(self.first_level_dir,'task-{}'.format(task),'task-{}_{}'.format(task,self.subject))
BOLD = os.path.join(self.first_level_dir,'task-{}'.format(task),'task-{}_{}_bold_mni.nii.gz'.format(task,self.subject))
# calculate size of input data
nii = nib.load(BOLD).get_data() # only do once
nr_trs = str(nii.shape[-1])
nr_voxels = str(nii.size)
# motion parameters and columns for each session's mean
nuisance_regressors = os.path.join(self.timing_files_dir,'task-{}'.format(task),'task-{}_{}_nuisance_regressors.txt'.format(task,self.subject))
# timing files for each EV
if task == 'colors':
EVS = ['Color','Black']
elif task == 'letters':
EVS = ['Letter','Symbol']
EV1_path = os.path.join(self.deriv_dir,'timing_files','task-{}'.format(task),'task-{}_{}_{}.txt'.format(task,self.subject,EVS[0]))
EV2_path = os.path.join(self.deriv_dir,'timing_files','task-{}'.format(task),'task-{}_{}_{}.txt'.format(task,self.subject,EVS[1]))
MNI_BRAIN = os.path.join(self.mask_dir, 'MNI152_T1_2mm_brain.nii.gz')
# replacements
replacements = [ # needs to match order of 'markers'
output_path,
nr_trs,
BOLD,
nuisance_regressors,
EV1_path,
EV2_path,
nr_voxels,
MNI_BRAIN
]
# open the template file, load the text data
f = open(template_filename,'r')
filedata = f.read()
f.close()
# search and replace
for st,this_string in enumerate(markers):
filedata = filedata.replace(this_string,replacements[st])
# write output file
f = open(FSF_filename,'w')
f.write(filedata)
f.close()
# open job and write command as new line
cmd = 'feat {}'.format(FSF_filename)
self.first_level_job = open(self.first_level_job_path, "a") # append is important, not write
self.first_level_job.write(cmd) # feat command
self.first_level_job.write("\n\n") # new line
self.first_level_job.close()
print('success: loc_fsf {}'.format(FSF_filename))
def rsa_combine_epi(self,task='rsa'):
# concatenates the 4 runs per session of EPI data to perform a single GLM (RSA task)
for self.session in ['ses-01','ses-02']:
# output is the concantenated bold of all runs per session (input to first level)
outFile = os.path.join(self.first_level_dir,'task-{}'.format(task),'task-{}_{}_{}_bold_mni.nii.gz'.format(task,self.subject,self.session))
N1 = nib.load(os.path.join(self.preprocess_dir,'task-{}'.format(task),'task-{}_{}_{}_{}.feat'.format(task,self.subject,self.session,'run-01'),'filtered_func_data_mni.nii.gz')) # preprocessed run 1
N2 = nib.load(os.path.join(self.preprocess_dir,'task-{}'.format(task),'task-{}_{}_{}_{}.feat'.format(task,self.subject,self.session,'run-02'),'filtered_func_data_mni.nii.gz')) # preprocessed run 2
N3 = nib.load(os.path.join(self.preprocess_dir,'task-{}'.format(task),'task-{}_{}_{}_{}.feat'.format(task,self.subject,self.session,'run-03'),'filtered_func_data_mni.nii.gz')) # preprocessed run 3
N4 = nib.load(os.path.join(self.preprocess_dir,'task-{}'.format(task),'task-{}_{}_{}_{}.feat'.format(task,self.subject,self.session,'run-04'),'filtered_func_data_mni.nii.gz')) # preprocessed run 4
BOLD1 = N1.get_data()
BOLD2 = N2.get_data()
BOLD3 = N3.get_data()
BOLD4 = N4.get_data()
BOLD = np.concatenate([BOLD1,BOLD2,BOLD3,BOLD4],axis=-1)
outData = nib.Nifti1Image(BOLD, affine=N1.affine, header=N1.header) # pass affine and header from last MNI image
outData.set_data_dtype(np.float32)
nib.save(outData, outFile)
print(BOLD.shape)
print('success: rsa_combine_epi')
def rsa_combine_events(self,task='rsa'):
# for the RSA task, concantenates the events files of all 4 runs and outputs in first_level directory
for self.session in ['ses-01','ses-02']:
### 1 ###
# take FIRST run's BOLD to count TRs to add to 2nd runs' onsets
BOLD1 = nib.load(os.path.join(self.preprocess_dir,'task-{}'.format(task),'task-{}_{}_{}_{}.feat'.format(task,self.subject,self.session,'run-01'),'filtered_func_data_mni.nii.gz'))
ntrs = BOLD1.shape[-1] # number of TRs
time2add = ntrs*float(self.TR) # time to add in seconds to run 2's onsets
print('Time to add to run 2: {}'.format(time2add))
# open run 2's events
events2 = pd.read_csv(os.path.join(self.deriv_dir,self.subject,self.session,'func','{}_{}_task-{}_{}_events.tsv'.format(self.subject,self.session,task,'run-02')),sep='\t')
events2['onset'] = events2['onset'] + time2add
### 2 ###
# take SECOND run's BOLD to count TRs to add to 3rd runs' onsets
BOLD2 = nib.load(os.path.join(self.preprocess_dir,'task-{}'.format(task),'task-{}_{}_{}_{}.feat'.format(task,self.subject,self.session,'run-02'),'filtered_func_data_mni.nii.gz'))
ntrs = BOLD2.shape[-1] # number of TRs
time2add = time2add + ntrs*float(self.TR) # time to add in seconds to run 3's onsets
print('Time to add to run 3: {}'.format(time2add))
# open run 3's events
events3 = pd.read_csv(os.path.join(self.deriv_dir,self.subject,self.session,'func','{}_{}_task-{}_{}_events.tsv'.format(self.subject,self.session,task,'run-03')),sep='\t')
events3['onset'] = events3['onset'] + time2add
### 4 ###
# take THIRD run's BOLD to count TRs to add to 4th runs' onsets
BOLD3 = nib.load(os.path.join(self.preprocess_dir,'task-{}'.format(task),'task-{}_{}_{}_{}.feat'.format(task,self.subject,self.session,'run-03'),'filtered_func_data_mni.nii.gz'))
ntrs = BOLD3.shape[-1] # number of TRs
time2add = time2add + ntrs*float(self.TR) # time to add in seconds to run 4's onsets
print('Time to add to run 4: {}'.format(time2add))
# open block 4's events
events4 = pd.read_csv(os.path.join(self.deriv_dir,self.subject,self.session,'func','{}_{}_task-{}_{}_events.tsv'.format(self.subject,self.session,task,'run-04')),sep='\t')
events4['onset'] = events4['onset'] + time2add
# concantenate all runs
# open run 1's events
events1 = pd.read_csv(os.path.join(self.deriv_dir,self.subject,self.session,'func','{}_{}_task-{}_{}_events.tsv'.format(self.subject,self.session,task,'run-01')),sep='\t')
events = pd.concat([events1,events2,events3,events4],axis=0)
events = events.loc[:, ~events.columns.str.contains('^Unnamed')] # drop unnamed columns
# add unique identifiers for each color
rgb_codes = [
(events['r'] == 188) & (events['g'] == 188) & (events['b'] == 188), # grey (oddballs)
(events['r'] == 117) & (events['g'] == 117) & (events['b'] == 117), # grey (oddballs)
(events['r'] == 128) & (events['g'] == 128) & (events['b'] == 128), # grey (oddballs)
(events['r'] == 0) & (events['g'] == 0) & (events['b'] == 0), # black
(events['r'] == 0) & (events['g'] == 163) & (events['b'] == 228), # light_blue
(events['r'] == 161) & (events['g'] == 199) & (events['b'] == 70), # lime_green
(events['r'] == 183) & (events['g'] == 61) & (events['b'] == 160), # magenta
(events['r'] == 181) & (events['g'] == 44) & (events['b'] == 67), # dark_red
(events['r'] == 16) & (events['g'] == 114) & (events['b'] == | |
<gh_stars>0
# multiAgents.py
# --------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by <NAME>
# (<EMAIL>) and <NAME> (<EMAIL>).
# Student side autograding was added by <NAME>, <NAME>, and
# <NAME> (<EMAIL>).
from util import manhattanDistance
from game import Directions
import random, util
from game import Agent
class ReflexAgent(Agent):
"""
A reflex agent chooses an action at each choice point by examining
its alternatives via a state evaluation function.
The code below is provided as a guide. You are welcome to change
it in any way you see fit, so long as you don't touch our method
headers.
"""
def getAction(self, gameState):
"""
You do not need to change this method, but you're welcome to.
getAction chooses among the best options according to the evaluation function.
Just like in the previous project, getAction takes a GameState and returns
some Directions.X for some X in the set {NORTH, SOUTH, WEST, EAST, STOP}
"""
# Collect legal moves and successor states
legalMoves = gameState.getLegalActions()
# Choose one of the best actions
scores = [self.evaluationFunction(gameState, action) for action in legalMoves]
bestScore = max(scores)
bestIndices = [index for index in range(len(scores)) if scores[index] == bestScore]
chosenIndex = random.choice(bestIndices) # Pick randomly among the best
"Add more of your code here if you want to"
return legalMoves[chosenIndex]
def evaluationFunction(self, currentGameState, action):
"""
Design a better evaluation function here.
The evaluation function takes in the current and proposed successor
GameStates (pacman.py) and returns a number, where higher numbers are better.
The code below extracts some useful information from the state, like the
remaining food (newFood) and Pacman position after moving (newPos).
newScaredTimes holds the number of moves that each ghost will remain
scared because of Pacman having eaten a power pellet.
Print out these variables to see what you're getting, then combine them
to create a masterful evaluation function.
"""
# Useful information you can extract from a GameState (pacman.py)
successorGameState = currentGameState.generatePacmanSuccessor(action)
newPos = successorGameState.getPacmanPosition()
newFood = successorGameState.getFood()
newGhostStates = successorGameState.getGhostStates()
newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]
"*** YOUR CODE HERE ***"
score = 0
dis_to_ghost = manhattanDistance(newPos, newGhostStates[0].getPosition())
if dis_to_ghost <= 1:
score -= 100000
else:
score += 5 * dis_to_ghost
current_foodlist = currentGameState.getFood().asList()
new_foodlist = newFood.asList()
if len(current_foodlist) > len(new_foodlist):
score += 10000
if len(new_foodlist) > 0:
min_dis = float('inf')
for food in new_foodlist:
min_dis = min(min_dis, manhattanDistance(newPos, food))
score -= 10 * min_dis
return score
def scoreEvaluationFunction(currentGameState):
"""
This default evaluation function just returns the score of the state.
The score is the same one displayed in the Pacman GUI.
This evaluation function is meant for use with adversarial search agents
(not reflex agents).
"""
return currentGameState.getScore()
class MultiAgentSearchAgent(Agent):
"""
This class provides some common elements to all of your
multi-agent searchers. Any methods defined here will be available
to the MinimaxPacmanAgent, AlphaBetaPacmanAgent & ExpectimaxPacmanAgent.
You *do not* need to make any changes here, but you can if you want to
add functionality to all your adversarial search agents. Please do not
remove anything, however.
Note: this is an abstract class: one that should not be instantiated. It's
only partially specified, and designed to be extended. Agent (game.py)
is another abstract class.
"""
def __init__(self, evalFn = 'scoreEvaluationFunction', depth = '2'):
self.index = 0 # Pacman is always agent index 0
self.evaluationFunction = util.lookup(evalFn, globals())
self.depth = int(depth)
class MinimaxAgent(MultiAgentSearchAgent):
"""
Your minimax agent (question 2)
"""
def getAction(self, gameState):
"""
Returns the minimax action from the current gameState using self.depth
and self.evaluationFunction.
Here are some method calls that might be useful when implementing minimax.
gameState.getLegalActions(agentIndex):
Returns a list of legal actions for an agent
agentIndex=0 means Pacman, ghosts are >= 1
gameState.generateSuccessor(agentIndex, action):
Returns the successor game state after an agent takes an action
gameState.getNumAgents():
Returns the total number of agents in the game
gameState.isWin():
Returns whether or not the game state is a winning state
gameState.isLose():
Returns whether or not the game state is a losing state
"""
"*** YOUR CODE HERE ***"
self._DFMiniMax(gameState, depth=0, agent=0)
return self.action
def _DFMiniMax(self, state, depth, agent=0):
agent = agent % state.getNumAgents()
if state.isWin() or state.isLose():
return self.evaluationFunction(state)
elif agent == 0:
if depth == self.depth:
return self.evaluationFunction(state)
else:
return self._Max(state, depth + 1, 0)
else:
return self._Min(state, depth, agent)
def _Max(self, state, depth, agent=0):
best_value = float('-inf')
for action in state.getLegalActions(agent):
next_state = state.generateSuccessor(agent, action)
temp = self._DFMiniMax(next_state, depth, agent + 1)
if temp > best_value and depth == 1:
self.action = action
best_value = max(best_value, temp)
return best_value
def _Min(self, state, depth, agent):
best_value = float('inf')
for action in state.getLegalActions(agent):
next_state = state.generateSuccessor(agent, action)
temp = self._DFMiniMax(next_state, depth, agent + 1)
best_value = min(best_value, temp)
return best_value
class AlphaBetaAgent(MultiAgentSearchAgent):
"""
Your minimax agent with alpha-beta pruning (question 3)
"""
def getAction(self, gameState):
"""
Returns the minimax action using self.depth and self.evaluationFunction
"""
"*** YOUR CODE HERE ***"
self._AlphaBeta(gameState, depth=0, alpha=float('-inf'), beta=float('inf'), agent=0)
return self.action
def _AlphaBeta(self, state, depth, alpha, beta, agent=0):
agent = agent % state.getNumAgents()
if state.isWin() or state.isLose():
return self.evaluationFunction(state)
elif agent == 0:
if depth == self.depth:
return self.evaluationFunction(state)
else:
return self._Max(state, depth + 1, alpha, beta, 0)
else:
return self._Min(state, depth, alpha, beta, agent)
def _Max(self, state, depth, alpha, beta, agent=0):
best_value = float('-inf')
for action in state.getLegalActions(agent):
next_state = state.generateSuccessor(agent, action)
temp = self._AlphaBeta(next_state, depth, alpha, beta, agent + 1)
if temp > best_value and depth == 1:
self.action = action
best_value = max(best_value, temp)
if best_value >= beta:
return best_value
alpha = max(alpha, best_value)
return best_value
def _Min(self, state, depth, alpha, beta, agent):
best_value = float('inf')
for action in state.getLegalActions(agent):
next_state = state.generateSuccessor(agent, action)
temp = self._AlphaBeta(next_state, depth, alpha, beta, agent + 1)
best_value = min(best_value, temp)
if best_value <= alpha:
return best_value
beta = min(beta, best_value)
return best_value
class ExpectimaxAgent(MultiAgentSearchAgent):
"""
Your expectimax agent (question 4)
"""
def getAction(self, gameState):
"""
Returns the expectimax action using self.depth and self.evaluationFunction
All ghosts should be modeled as choosing uniformly at random from their
legal moves.
"""
"*** YOUR CODE HERE ***"
self._ExpectiMax(gameState, depth=0, agent=0)
return self.action
def _ExpectiMax(self, state, depth, agent=0):
agent = agent % state.getNumAgents()
if state.isWin() or state.isLose():
return self.evaluationFunction(state)
elif agent == 0:
if depth == self.depth:
return self.evaluationFunction(state)
else:
return self._Max(state, depth + 1, 0)
else:
return self._Min(state, depth, agent)
def _Max(self, state, depth, agent=0):
best_value = float('-inf')
for action in state.getLegalActions(agent):
next_state = state.generateSuccessor(agent, action)
temp = self._ExpectiMax(next_state, depth, agent + 1)
if temp > best_value and depth == 1:
self.action = action
best_value = max(best_value, temp)
return best_value
def _Min(self, state, depth, agent):
sum_value, N = 0.0, 0
for action in state.getLegalActions(agent):
next_state = state.generateSuccessor(agent, action)
sum_value += self._ExpectiMax(next_state, depth, agent + 1)
N += 1
return sum_value / N
def betterEvaluationFunction(currentGameState):
"""
Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable
evaluation function (question 5).
DESCRIPTION: <write something here so we know what you did>
"""
"*** YOUR CODE HERE ***"
position = currentGameState.getPacmanPosition()
food_list = currentGameState.getFood().asList()
GhostStates = currentGameState.getGhostStates()
capsule = currentGameState.getCapsules()
score = 0
# consider food
min_dis_food = float('inf')
for food in food_list:
min_dis_food = min(min_dis_food, manhattanDistance(food, position))
score += 1.0 / min_dis_food
# consider ghosts
min_dis_ghost = float('inf')
for ghost in GhostStates:
temp = manhattanDistance(position, ghost.getPosition())
if (min_dis_ghost > temp):
min_dis_ghost = temp
closestGhost = ghost
if min_dis_ghost > 0:
if closestGhost.scaredTimer > 0:
score += 1.0 / min_dis_ghost
else:
if min_dis_ghost < 6:
score -= 1.0 / min_dis_ghost
if min_dis_ghost < 2:
score -= 50
# consider capsules
score -= len(capsule) * 25
if len(capsule) > 0:
| |
"""
d = {}
for t in reversed(cls.__mro__):
d = dict(d, **getattr(t, 'permissions_desc', {}))
return d.get(permission, '')
def parent_security_context(self):
"""Return the parent of this object.
Used for calculating permissions based on trees of ACLs.
"""
return self.config.parent_security_context()
@property
def installable(self):
"""Checks whether to add a tool to the project.
Return True if app can be installed.
:rtype: bool
"""
return self._installable(self.config.tool_name,
self.project.neighborhood,
self.project.app_configs,
)
@classmethod
def _installable(cls, tool_name, nbhd, project_tools):
if tool_name.lower() in nbhd.get_prohibited_tools():
return False
tools_list = [tool.tool_name.lower() for tool in project_tools]
return tools_list.count(tool_name.lower()) < cls.max_instances
@classmethod
def validate_mount_point(cls, mount_point):
"""Check if ``mount_point`` is valid for this Application.
In general, subclasses should not override this, but rather toggle
the strictness of allowed mount point names by toggling
:attr:`Application.relaxed_mount_points`.
:param mount_point: the mount point to validate
:type mount_point: str
:rtype: A :class:`regex Match object <_sre.SRE_Match>` if the mount
point is valid, else None
"""
re = (h.re_relaxed_tool_mount_point if cls.relaxed_mount_points
else h.re_tool_mount_point)
return re.match(mount_point)
@classmethod
def status_int(self):
"""Return the :attr:`status` of this Application as an int.
Used for sorting available Apps by status in the Admin interface.
"""
return self.status_map.index(self.status)
@classmethod
def icon_url(cls, size):
"""Return URL for icon of the given ``size``.
Subclasses can define their own icons by overriding
:attr:`icons`.
"""
resource, url = cls.icons.get(size), ''
if resource:
resource_path = os.path.join('nf', resource)
url = (g.forge_static(resource) if cls.has_resource(resource_path)
else g.theme_href(resource))
return url
@classmethod
@memoize
def has_resource(cls, resource_path):
"""Determine whether this Application has the resource pointed to by
``resource_path``.
If the resource is not found for the immediate class, its parents
will be searched. The return value is the class that "owns" the
resource, or None if the resource is not found.
"""
for klass in [o for o in cls.__mro__ if issubclass(o, Application)]:
if pkg_resources.resource_exists(klass.__module__, resource_path):
return klass
def has_access(self, user, topic):
"""Return True if ``user`` can send email to ``topic``.
Default is False.
:param user: :class:`allura.model.User` instance
:param topic: str
:rtype: bool
"""
return False
def is_visible_to(self, user):
"""Return True if ``user`` can view this app.
:type user: :class:`allura.model.User` instance
:rtype: bool
"""
return has_access(self, 'read')(user=user)
def subscribe_admins(self):
"""Subscribe all project Admins (for this Application's project) to the
:class:`allura.model.notification.Mailbox` for this Application.
"""
for uid in g.credentials.userids_with_named_role(self.project._id, 'Admin'):
model.Mailbox.subscribe(
type='direct',
user_id=uid,
project_id=self.project._id,
app_config_id=self.config._id)
def subscribe(self, user):
"""Subscribe :class:`user <allura.model.auth.User>` to the
:class:`allura.model.notification.Mailbox` for this Application.
"""
if user and user != model.User.anonymous():
model.Mailbox.subscribe(
type='direct',
user_id=user._id,
project_id=self.project._id,
app_config_id=self.config._id)
@classmethod
def default_options(cls):
"""Return a ``(name, default value)`` mapping of this Application's
:class:`config_options <ConfigOption>`.
:rtype: dict
"""
return dict(
(co.name, co.default)
for co in cls.config_options)
@classmethod
def options_on_install(cls):
"""
Return a list of :class:`config_options <ConfigOption>` which should be
configured by user during app installation.
:rtype: list
"""
return [o for o in cls.config_options
if o.name in cls.config_on_install]
def install(self, project):
'Whatever logic is required to initially set up a tool'
# Create the discussion object
discussion = self.DiscussionClass(
shortname=self.config.options.mount_point,
name='%s Discussion' % self.config.options.mount_point,
description='Forum for %s comments' % self.config.options.mount_point)
session(discussion).flush()
self.config.discussion_id = discussion._id
self.subscribe_admins()
def uninstall(self, project=None, project_id=None):
'Whatever logic is required to tear down a tool'
if project_id is None:
project_id = project._id
# De-index all the artifacts belonging to this tool in one fell swoop
index_tasks.solr_del_tool.post(project_id, self.config.options['mount_point'])
for d in model.Discussion.query.find({
'project_id': project_id,
'app_config_id': self.config._id}):
d.delete()
self.config.delete()
session(self.config).flush()
@property
def uninstallable(self):
"""Return True if this app can be uninstalled. Controls whether the
'Delete' option appears on the admin menu for this app.
By default, an app can be uninstalled iff it can be installed, although
some apps may want/need to override this (e.g. an app which can
not be installed directly by a user, but may be uninstalled).
"""
return self.installable
def main_menu(self):
"""Return a list of :class:`SitemapEntries <allura.app.SitemapEntry>`
to display in the main project nav for this Application.
Default implementation returns :attr:`sitemap` without any children.
"""
sitemap_without_children = []
for sm in self.sitemap:
sm_copy = copy(sm)
sm_copy.children = []
sitemap_without_children.append(sm_copy)
return sitemap_without_children
def sidebar_menu(self):
"""Return a list of :class:`SitemapEntries <allura.app.SitemapEntry>`
to render in the left sidebar for this Application.
"""
return []
def sidebar_menu_js(self):
"""Return Javascript needed by the sidebar menu of this Application.
:return: a string of Javascript code
"""
return ""
@LazyProperty
def _webhooks(self):
"""A list of webhooks that can be triggered by this app.
:return: a list of :class:`WebhookSender <allura.webhooks.WebhookSender>`
"""
tool_name = self.config.tool_name.lower()
webhooks = [w for w in g.entry_points['webhooks'].itervalues()
if tool_name in w.triggered_by]
return webhooks
def admin_menu(self, force_options=False):
"""Return the admin menu for this Application.
Default implementation will return a menu with up to 4 links:
- 'Permissions', if the current user has admin access to the
project in which this Application is installed
- 'Options', if this Application has custom options, or
``force_options`` is True
- 'Rename', for editing this Application's label
- 'Webhooks', if this Application can trigger any webhooks
Subclasses should override this method to provide additional admin
menu items.
:param force_options: always include an 'Options' link in the menu,
even if this Application has no custom options
:return: a list of :class:`SitemapEntries <allura.app.SitemapEntry>`
"""
admin_url = c.project.url() + 'admin/' + \
self.config.options.mount_point + '/'
links = []
if self.permissions and has_access(c.project, 'admin')():
links.append(
SitemapEntry('Permissions', admin_url + 'permissions'))
if force_options or len(self.config_options) > 3:
links.append(
SitemapEntry('Options', admin_url + 'options', className='admin_modal'))
links.append(
SitemapEntry('Rename', admin_url + 'edit_label', className='admin_modal'))
if len(self._webhooks) > 0:
links.append(SitemapEntry('Webhooks', admin_url + 'webhooks'))
return links
@LazyProperty
def admin_menu_collapse_button(self):
"""Returns button for showing/hiding admin sidebar menu"""
return SitemapEntry(
label=u'Admin - {}'.format(self.config.options.mount_label),
extra_html_attrs={
'id': 'sidebar-admin-menu-trigger',
})
@LazyProperty
def admin_menu_delete_button(self):
"""Returns button for deleting an app if app can be deleted"""
anchored_tools = self.project.neighborhood.get_anchored_tools()
anchored = self.tool_label.lower() in anchored_tools.keys()
if self.uninstallable and not anchored:
return SitemapEntry(
label='Delete Everything',
url=self.admin_url + 'delete',
className='admin_modal',
)
def handle_message(self, topic, message):
"""Handle incoming email msgs addressed to this tool.
Default is a no-op.
:param topic: portion of destination email address preceeding the '@'
:type topic: str
:param message: parsed email message
:type message: dict - result of
:func:`allura.lib.mail_util.parse_message`
:rtype: None
"""
pass
def handle_artifact_message(self, artifact, message):
"""Handle message addressed to this Application.
:param artifact: Specific artifact to which the message is addressed
:type artifact: :class:`allura.model.artifact.Artifact`
:param message: the message
:type message: :class:`allura.model.artifact.Message`
Default implementation posts the message to the appropriate discussion
thread for the artifact.
"""
# Find ancestor comment and thread
thd, parent_id = artifact.get_discussion_thread(message)
# Handle attachments
message_id = message['message_id']
if message.get('filename'):
# Special case - the actual post may not have been created yet
log.info('Saving attachment %s', message['filename'])
fp = StringIO(message['payload'])
self.AttachmentClass.save_attachment(
message['filename'], fp,
content_type=message.get(
'content_type', 'application/octet-stream'),
discussion_id=thd.discussion_id,
thread_id=thd._id,
post_id=message_id,
artifact_id=message_id)
return
# Handle duplicates (from multipart mail messages)
post = self.PostClass.query.get(_id=message_id)
if post:
log.info(
'Existing message_id %s found - saving this as text attachment' %
message_id)
try:
fp = StringIO(message['payload'].encode('utf-8'))
except UnicodeDecodeError:
fp = StringIO(message['payload'])
post.attach(
'alternate', fp,
content_type=message.get(
'content_type', 'application/octet-stream'),
discussion_id=thd.discussion_id,
thread_id=thd._id,
post_id=message_id)
else:
text = message['payload'] or '--no text body--'
post = thd.post(
message_id=message_id,
parent_id=parent_id,
text=text,
subject=message['headers'].get('Subject', 'no subject'))
def bulk_export(self, f, export_path='', with_attachments=False):
"""Export all artifacts in the tool into json file.
:param f: File Object to write to
Set exportable to True for applications implementing this.
"""
raise NotImplementedError('bulk_export')
def doap(self, parent):
"""App's representation for DOAP API.
:param parent: Element to contain the results
:type parent: xml.etree.ElementTree.Element or xml.etree.ElementTree.SubElement
"""
feature = ET.SubElement(parent, 'sf:feature')
feature = ET.SubElement(feature, 'sf:Feature')
ET.SubElement(feature, 'name').text = self.config.options.mount_label
ET.SubElement(feature, 'foaf:page', {'rdf:resource': h.absurl(self.url)})
def __json__(self):
"""App's representation for JSON API.
Returns dict that will be included in project's API under tools key.
"""
return {
'name': self.config.tool_name,
'mount_point': self.config.options.mount_point,
'url': self.config.url(),
'icons': self.icons,
'installable': self.installable,
'tool_label': self.tool_label,
'mount_label': self.config.options.mount_label
}
def get_attachment_export_path(self, path='', *args):
return os.path.join(path, self.config.options.mount_point, *args)
def make_dir_for_attachments(self, path):
if not os.path.exists(path):
os.makedirs(path)
def save_attachments(self, path, attachments):
self.make_dir_for_attachments(path)
for attachment in attachments:
attachment_path = os.path.join(
path,
os.path.basename(attachment.filename)
)
with open(attachment_path.encode('utf8', 'replace'), 'wb') as fl:
fl.write(attachment.rfile().read())
class AdminControllerMixin(object):
"""Provides common functionality admin controllers need"""
def _before(self, *remainder, **params):
# Display app's sidebar on admin page, instead of | |
<filename>src/python/turicreate/toolkits/clustering/dbscan.py
# -*- coding: utf-8 -*-
# Copyright © 2017 Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can
# be found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
"""
Class definition and create method for DBSCAN clustering.
"""
from __future__ import print_function as _
from __future__ import division as _
from __future__ import absolute_import as _
import time as _time
import logging as _logging
import turicreate as _tc
import turicreate.aggregate as _agg
from turicreate.toolkits._model import CustomModel as _CustomModel
import turicreate.toolkits._internal_utils as _tkutl
from turicreate.toolkits._internal_utils import _toolkit_repr_print
from turicreate.toolkits._private_utils import _summarize_accessible_fields
from turicreate.toolkits._model import PythonProxy as _PythonProxy
def create(dataset, features=None, distance=None, radius=1.,
min_core_neighbors=10, verbose=True):
"""
Create a DBSCAN clustering model. The DBSCAN method partitions the input
dataset into three types of points, based on the estimated probability
density at each point.
- **Core** points have a large number of points within a given neighborhood.
Specifically, `min_core_neighbors` must be within distance `radius` of a
point for it to be considered a core point.
- **Boundary** points are within distance `radius` of a core point, but
don't have sufficient neighbors of their own to be considered core.
- **Noise** points comprise the remainder of the data. These points have too
few neighbors to be considered core points, and are further than distance
`radius` from all core points.
Clusters are formed by connecting core points that are neighbors of each
other, then assigning boundary points to their nearest core neighbor's
cluster.
Parameters
----------
dataset : SFrame
Training data, with each row corresponding to an observation. Must
include all features specified in the `features` parameter, but may have
additional columns as well.
features : list[str], optional
Name of the columns with features to use in comparing records. 'None'
(the default) indicates that all columns of the input `dataset` should
be used to train the model. All features must be numeric, i.e. integer
or float types.
distance : str or list[list], optional
Function to measure the distance between any two input data rows. This
may be one of two types:
- *String*: the name of a standard distance function. One of
'euclidean', 'squared_euclidean', 'manhattan', 'levenshtein',
'jaccard', 'weighted_jaccard', 'cosine', 'dot_product' (deprecated),
or 'transformed_dot_product'.
- *Composite distance*: the weighted sum of several standard distance
functions applied to various features. This is specified as a list of
distance components, each of which is itself a list containing three
items:
1. list or tuple of feature names (str)
2. standard distance name (str)
3. scaling factor (int or float)
For more information about Turi Create distance functions, please
see the :py:mod:`~turicreate.toolkits.distances` module.
For sparse vectors, missing keys are assumed to have value 0.0.
If 'distance' is left unspecified, a composite distance is constructed
automatically based on feature types.
radius : int or float, optional
Size of each point's neighborhood, with respect to the specified
distance function.
min_core_neighbors : int, optional
Number of neighbors that must be within distance `radius` of a point in
order for that point to be considered a "core point" of a cluster.
verbose : bool, optional
If True, print progress updates and model details during model creation.
Returns
-------
out : DBSCANModel
A model containing a cluster label for each row in the input `dataset`.
Also contains the indices of the core points, cluster boundary points,
and noise points.
See Also
--------
DBSCANModel, turicreate.toolkits.distances
Notes
-----
- Our implementation of DBSCAN first computes the similarity graph on the
input dataset, which can be a computationally intensive process. In the
current implementation, some distances are substantially faster than
others; in particular "euclidean", "squared_euclidean", "cosine", and
"transformed_dot_product" are quite fast, while composite distances can be
slow.
- Any distance function in the Turi Create library may be used with DBSCAN but
the results may be poor for distances that violate the standard metric
properties, i.e. symmetry, non-negativity, triangle inequality, and
identity of indiscernibles. In particular, the DBSCAN algorithm is based
on the concept of connecting high-density points that are *close* to each
other into a single cluster, but the notion of *close* may be very
counterintuitive if the chosen distance function is not a valid metric.
The distances "euclidean", "manhattan", "jaccard", and "levenshtein" will
likely yield the best results.
References
----------
- <NAME>., et al. (1996) `A Density-Based Algorithm for Discovering
Clusters in Large Spatial Databases with Noise
<https://www.aaai.org/Papers/KDD/1996/KDD96-037.pdf>`_. In Proceedings of the
Second International Conference on Knowledge Discovery and Data Mining.
pp. 226-231.
- `Wikipedia - DBSCAN <https://en.wikipedia.org/wiki/DBSCAN>`_
- `Visualizing DBSCAN Clustering
<http://www.naftaliharris.com/blog/visualizing-dbscan-clustering/>`_
Examples
--------
>>> sf = turicreate.SFrame({
... 'x1': [0.6777, -9.391, 7.0385, 2.2657, 7.7864, -10.16, -8.162,
... 8.8817, -9.525, -9.153, 2.0860, 7.6619, 6.5511, 2.7020],
... 'x2': [5.6110, 8.5139, 5.3913, 5.4743, 8.3606, 7.8843, 2.7305,
... 5.1679, 6.7231, 3.7051, 1.7682, 7.4608, 3.1270, 6.5624]})
...
>>> model = turicreate.dbscan.create(sf, radius=4.25, min_core_neighbors=3)
>>> model.cluster_id.print_rows(15)
+--------+------------+----------+
| row_id | cluster_id | type |
+--------+------------+----------+
| 8 | 0 | core |
| 7 | 2 | core |
| 0 | 1 | core |
| 2 | 2 | core |
| 3 | 1 | core |
| 11 | 2 | core |
| 4 | 2 | core |
| 1 | 0 | boundary |
| 6 | 0 | boundary |
| 5 | 0 | boundary |
| 9 | 0 | boundary |
| 12 | 2 | boundary |
| 10 | 1 | boundary |
| 13 | 1 | boundary |
+--------+------------+----------+
[14 rows x 3 columns]
"""
import warnings
## Start the training time clock and instantiate an empty model
logger = _logging.getLogger(__name__)
start_time = _time.time()
if distance == 'dot_product':
warnings.warn("Using a \"dot_product\" distance is deprecated. This functionality will be removed in the next major release.")
## Validate the input dataset
_tkutl._raise_error_if_not_sframe(dataset, "dataset")
_tkutl._raise_error_if_sframe_empty(dataset, "dataset")
## Validate neighborhood parameters
if not isinstance(min_core_neighbors, int) or min_core_neighbors < 0:
raise ValueError("Input 'min_core_neighbors' must be a non-negative " +
"integer.")
if not isinstance(radius, (int, float)) or radius < 0:
raise ValueError("Input 'radius' must be a non-negative integer " +
"or float.")
## Compute all-point nearest neighbors within `radius` and count
# neighborhood sizes
knn_model = _tc.nearest_neighbors.create(dataset, features=features,
distance=distance,
method='brute_force',
verbose=verbose)
knn = knn_model.similarity_graph(k=None, radius=radius,
include_self_edges=False,
output_type='SFrame',
verbose=verbose)
neighbor_counts = knn.groupby('query_label', _agg.COUNT)
### NOTE: points with NO neighbors are already dropped here!
## Identify core points and boundary candidate points. Not all of the
# boundary candidates will be boundary points - some are in small isolated
# clusters.
if verbose:
logger.info("Identifying noise points and core points.")
boundary_mask = neighbor_counts['Count'] < min_core_neighbors
core_mask = 1 - boundary_mask
# this includes too small clusters
boundary_idx = neighbor_counts[boundary_mask]['query_label']
core_idx = neighbor_counts[core_mask]['query_label']
## Build a similarity graph on the core points
## NOTE: careful with singleton core points - the second filter removes them
# from the edge set so they have to be added separately as vertices.
if verbose:
logger.info("Constructing the core point similarity graph.")
core_vertices = knn.filter_by(core_idx, 'query_label')
core_edges = core_vertices.filter_by(core_idx, 'reference_label')
core_graph = _tc.SGraph()
core_graph = core_graph.add_vertices(core_vertices[['query_label']],
vid_field='query_label')
core_graph = core_graph.add_edges(core_edges, src_field='query_label',
dst_field='reference_label')
## Compute core point connected components and relabel to be consecutive
# integers
cc = _tc.connected_components.create(core_graph, verbose=verbose)
cc_labels = cc.component_size.add_row_number('__label')
core_assignments = cc.component_id.join(cc_labels, on='component_id',
how='left')[['__id', '__label']]
core_assignments['type'] = 'core'
## Join potential boundary points to core cluster labels (points that aren't
# really on a boundary are implicitly dropped)
if verbose:
logger.info("Processing boundary points.")
boundary_edges = knn.filter_by(boundary_idx, 'query_label')
# separate real boundary points from points in small isolated clusters
boundary_core_edges = boundary_edges.filter_by(core_idx, 'reference_label')
# join a boundary point to its single closest core point.
boundary_assignments = boundary_core_edges.groupby('query_label',
{'reference_label': _agg.ARGMIN('rank', 'reference_label')})
boundary_assignments = boundary_assignments.join(core_assignments,
on={'reference_label': '__id'})
boundary_assignments = boundary_assignments.rename({'query_label': '__id'}, inplace=True)
boundary_assignments = boundary_assignments.remove_column('reference_label', inplace=True)
boundary_assignments['type'] = 'boundary'
## Identify boundary candidates that turned out to be in small clusters but
# not on real cluster boundaries
small_cluster_idx = set(boundary_idx).difference(
boundary_assignments['__id'])
## Identify individual noise points by the | |
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""XmonSimulator for the Google's Xmon class quantum computers.
The simulator can be used to run all of a Circuit or to step through the
simulation Moment by Moment. The simulator requires that all gates used in
the circuit are either native to the xmon architecture (i.e. cause
`cirq.google.is_native_xmon_op` to return true) or else can be decomposed into
such operations (by being composite or having a known unitary). Measurement
gates must all have unique string keys.
A simple example:
circuit = Circuit([Moment([X(q1), X(q2)]), Moment([CZ(q1, q2)])])
sim = XmonSimulator()
results = sim.run(circuit)
Note that there are two types of methods for the simulator. "Run" methods
mimic what the quantum hardware provides, and, for example, do not give
access to the wave function. "Simulate" methods give access to the wave
function, i.e. one can retrieve the final wave function from the simulation
via.
final_state = sim.simulate(circuit).final_state
"""
import math
import collections
from typing import cast, Dict, Iterator, List, Set, Union
from typing import Tuple # pylint: disable=unused-import
import numpy as np
from cirq import circuits, ops, study, protocols, optimizers
from cirq.sim import simulator
from cirq.google import convert_to_xmon_gates
from cirq.google.sim import xmon_stepper
class XmonOptions:
"""XmonOptions for the XmonSimulator.
Attributes:
num_prefix_qubits: Sharding of the wave function is performed over 2
raised to this value number of qubits.
min_qubits_before_shard: Sharding will be done only for this number
of qubits or more. The default is 18.
use_processes: Whether or not to use processes instead of threads.
Processes can improve the performance slightly (varies by machine
but on the order of 10 percent faster). However this varies
significantly by architecture, and processes should not be used
for interactive use on Windows.
"""
def __init__(self,
num_shards: int=None,
min_qubits_before_shard: int=18,
use_processes: bool=False) -> None:
"""XmonSimulator options constructor.
Args:
num_shards: sharding will be done for the greatest value of a
power of two less than this value. If None, the default will
be used which is the smallest power of two less than or equal
to the number of CPUs.
min_qubits_before_shard: Sharding will be done only for this number
of qubits or more. The default is 18.
use_processes: Whether or not to use processes instead of threads.
Processes can improve the performance slightly (varies by
machine but on the order of 10 percent faster). However this
varies significantly by architecture, and processes should not
be used for interactive python use on Windows.
"""
assert num_shards is None or num_shards > 0, (
"Num_shards cannot be less than 1.")
if num_shards is None:
self.num_prefix_qubits = None
else:
self.num_prefix_qubits = int(math.log(num_shards, 2))
assert min_qubits_before_shard >= 0, (
'Min_qubit_before_shard must be positive.')
self.min_qubits_before_shard = min_qubits_before_shard
self.use_processes = use_processes
class XmonSimulator(simulator.SimulatesSamples,
simulator.SimulatesIntermediateWaveFunction):
"""XmonSimulator for Xmon class quantum circuits.
This simulator has different methods for different types of simulations.
For simulations that mimic the quantum hardware, the run methods are
defined in the SimulatesSamples interface:
run
run_sweep
These methods do not return or give access to the full wave function.
To get access to the wave function during a simulation, including being
able to set the wave function, the simulate methods are defined in the
SimulatesFinalWaveFunction interface:
simulate
simulate_sweep
simulate_moment_steps (for stepping through a circuit moment by moment)
"""
def __init__(self, options: XmonOptions = None) -> None:
"""Construct a XmonSimulator.
Args:
options: XmonOptions configuring the simulation.
"""
self.options = options or XmonOptions()
def _run(
self,
circuit: circuits.Circuit,
param_resolver: study.ParamResolver,
repetitions: int,
) -> Dict[str, List[np.ndarray]]:
"""See definition in `cirq.SimulatesSamples`."""
xmon_circuit, keys = self._to_xmon_circuit(
circuit,
param_resolver)
if xmon_circuit.are_all_measurements_terminal():
return self._run_sweep_sample(xmon_circuit, repetitions)
else:
return self._run_sweep_repeat(keys, xmon_circuit, repetitions)
def _run_sweep_repeat(self, keys, circuit, repetitions):
measurements = {k: [] for k in
keys} # type: Dict[str, List[np.ndarray]]
for _ in range(repetitions):
all_step_results = self._base_iterator(
circuit,
qubit_order=ops.QubitOrder.DEFAULT,
initial_state=0)
for step_result in all_step_results:
for k, v in step_result.measurements.items():
measurements[k].append(np.array(v, dtype=bool))
return {k: np.array(v) for k, v in measurements.items()}
def _run_sweep_sample(self, circuit, repetitions):
all_step_results = self._base_iterator(
circuit,
qubit_order=ops.QubitOrder.DEFAULT,
initial_state=0,
perform_measurements=False)
step_result = None
for step_result in all_step_results:
pass
if step_result is None:
return {}
measurement_ops = [op for _, op, _ in
circuit.findall_operations_with_gate_type(
ops.MeasurementGate)]
return step_result.sample_measurement_ops(measurement_ops, repetitions)
def _simulator_iterator(
self,
circuit: circuits.Circuit,
param_resolver: study.ParamResolver,
qubit_order: ops.QubitOrderOrList,
initial_state: Union[int, np.ndarray],
perform_measurements: bool = True,
) -> Iterator['XmonStepResult']:
"""See definition in `cirq.SimulatesIntermediateWaveFunction`."""
param_resolver = param_resolver or study.ParamResolver({})
xmon_circuit, _ = self._to_xmon_circuit(circuit, param_resolver)
return self._base_iterator(xmon_circuit,
qubit_order,
initial_state,
perform_measurements)
def _base_iterator(
self,
circuit: circuits.Circuit,
qubit_order: ops.QubitOrderOrList,
initial_state: Union[int, np.ndarray],
perform_measurements: bool=True,
) -> Iterator['XmonStepResult']:
"""See _simulator_iterator."""
qubits = ops.QubitOrder.as_qubit_order(qubit_order).order_for(
circuit.all_qubits())
qubit_map = {q: i for i, q in enumerate(reversed(qubits))}
if isinstance(initial_state, np.ndarray):
initial_state = initial_state.astype(dtype=np.complex64,
casting='safe')
with xmon_stepper.Stepper(
num_qubits=len(qubits),
num_prefix_qubits=self.options.num_prefix_qubits,
initial_state=initial_state,
min_qubits_before_shard=self.options.min_qubits_before_shard,
use_processes=self.options.use_processes
) as stepper:
for moment in circuit:
measurements = collections.defaultdict(
list) # type: Dict[str, List[bool]]
phase_map = {} # type: Dict[Tuple[int, ...], float]
for op in moment.operations:
gate = cast(ops.GateOperation, op).gate
if isinstance(gate, ops.ZPowGate):
index = qubit_map[op.qubits[0]]
phase_map[(index,)] = cast(float, gate.exponent)
elif isinstance(gate, ops.CZPowGate):
index0 = qubit_map[op.qubits[0]]
index1 = qubit_map[op.qubits[1]]
phase_map[(index0, index1)] = cast(float,
gate.exponent)
elif isinstance(gate, ops.XPowGate):
index = qubit_map[op.qubits[0]]
stepper.simulate_w(
index=index,
half_turns=gate.exponent,
axis_half_turns=0)
elif isinstance(gate, ops.YPowGate):
index = qubit_map[op.qubits[0]]
stepper.simulate_w(
index=index,
half_turns=gate.exponent,
axis_half_turns=0.5)
elif isinstance(gate, ops.PhasedXPowGate):
index = qubit_map[op.qubits[0]]
stepper.simulate_w(
index=index,
half_turns=gate.exponent,
axis_half_turns=gate.phase_exponent)
elif isinstance(gate, ops.MeasurementGate):
if perform_measurements:
invert_mask = (
gate.invert_mask or len(op.qubits) * (False,))
for qubit, invert in zip(op.qubits, invert_mask):
index = qubit_map[qubit]
result = stepper.simulate_measurement(index)
if invert:
result = not result
measurements[cast(str, gate.key)].append(result)
else:
# coverage: ignore
raise TypeError('{!r} is not supported by the '
'xmon simulator.'.format(gate))
stepper.simulate_phases(phase_map)
yield XmonStepResult(stepper, qubit_map, measurements)
def _to_xmon_circuit(
self,
circuit: circuits.Circuit,
param_resolver: study.ParamResolver
) -> Tuple[circuits.Circuit, Set[str]]:
# TODO: Use one optimization pass.
xmon_circuit = protocols.resolve_parameters(circuit, param_resolver)
convert_to_xmon_gates.ConvertToXmonGates().optimize_circuit(
xmon_circuit)
optimizers.DropEmptyMoments().optimize_circuit(xmon_circuit)
keys = find_measurement_keys(xmon_circuit)
return xmon_circuit, keys
def find_measurement_keys(circuit: circuits.Circuit) -> Set[str]:
keys = set() # type: Set[str]
for _, _, gate in circuit.findall_operations_with_gate_type(
ops.MeasurementGate):
key = gate.key
if key in keys:
raise ValueError('Repeated Measurement key {}'.format(key))
keys.add(key)
return keys
class XmonStepResult(simulator.StepResult):
"""Results of a step of the simulator.
Attributes:
qubit_map: A map from the Qubits in the Circuit to the the index
of this qubit for a canonical ordering. This canonical ordering is
used to define the state (see the state_vector() method).
measurements: A dictionary from measurement gate key to measurement
results, ordered by the qubits that the measurement operates on.
"""
def __init__(
self,
stepper: xmon_stepper.Stepper,
qubit_map: Dict,
measurements: Dict[str, np.ndarray]) -> None:
self.qubit_map = qubit_map or {}
self.measurements = measurements or collections.defaultdict(list)
self._stepper = stepper
def state_vector(self) -> np.ndarray:
"""Return the state (wave function) at this point in the computation.
The state is returned in the computational basis with these basis
states defined by the qubit_map. In particular the value in the
qubit_map is the index of the qubit, and these are translated into
binary vectors where the last qubit is the 1s bit of the index, the
second-to-last is the 2s bit of the index, and so forth (i.e. big
endian ordering).
Example:
qubit_map: {QubitA: 0, QubitB: 1, QubitC: 2}
Then the returned vector will have indices mapped to qubit basis
states like the following table
| QubitA | QubitB | QubitC
:-: | :----: | :----: | :----:
0 | 0 | 0 | 0
1 | 0 | 0 | 1
2 | 0 | 1 | 0
3 | 0 | 1 | 1
4 | 1 | 0 | 0
5 | 1 | 0 | 1
6 | 1 | 1 | 0
7 | 1 | 1 | 1
"""
return self._stepper.current_state
def set_state(self, state: Union[int, np.ndarray]):
"""Updates the state of the simulator to the given new state.
| |
import pythoncom
import win32com.client
import time
from pythoncom import com_error
import robot.libraries.Screenshot as screenshot
import os
from robot.api import logger
class SapGuiLibrary:
"""The SapGuiLibrary is a library that enables users to create tests for the Sap Gui application
The library uses the Sap Scripting Engine, therefore Scripting must be enabled in Sap in order for this library to work.
= Opening a connection / Before running tests =
First of all, you have to *make sure the Sap Logon Pad is started*. You can automate this process by using the
AutoIT library or the Process Library.
After the Sap Login Pad is started, you can connect to the Sap Session using the keyword `connect to session`.
If you have a successful connection you can use `Open Connection` to open a new connection from the Sap Logon Pad
or `Connect To Existing Connection` to connect to a connection that is already open.
= Locating or specifying elements =
You need to specify elements starting from the window ID, for example, wnd[0]/tbar[1]/btn[8]. In some cases the SAP
ID contains backslashes. Make sure you escape these backslashes by adding another backslash in front of it.
= Screenshots (on error) =
The SapGUILibrary offers an option for automatic screenshots on error.
Default this option is enabled, use keyword `disable screenshots on error` to skip the screenshot functionality.
Alternatively, this option can be set at import.
"""
__version__ = '1.0'
ROBOT_LIBRARY_SCOPE = 'GLOBAL'
def __init__(self, screenshots_on_error=True, screenshot_directory=None):
"""Sets default variables for the library
"""
self.explicit_wait = float(0.0)
self.sapapp = -1
self.session = -1
self.connection = -1
self.take_screenshots = screenshots_on_error
self.screenshot = screenshot.Screenshot()
if screenshot_directory is not None:
if not os.path.exists(screenshot_directory):
os.makedirs(screenshot_directory)
self.screenshot.set_screenshot_directory(screenshot_directory)
def click_element(self, element_id):
"""Performs a single click on a given element. Used only for buttons, tabs and menu items.
In case you want to change a value of an element like checkboxes of selecting an option in dropdown lists,
use `select checkbox` or `select from list by label` instead.
"""
# Performing the correct method on an element, depending on the type of element
element_type = self.get_element_type(element_id)
if (element_type == "GuiTab"
or element_type == "GuiMenu"):
self.session.findById(element_id).select()
elif element_type == "GuiButton":
self.session.findById(element_id).press()
else:
self.take_screenshot()
message = "You cannot use 'click_element' on element type '%s', maybe use 'select checkbox' instead?" % element_type
raise Warning(message)
time.sleep(self.explicit_wait)
def click_toolbar_button(self, table_id, button_id):
"""Clicks a button of a toolbar within a GridView 'table_id' which is contained within a shell object.
Use the Scripting tracker recorder to find the 'button_id' of the button to click
"""
self.element_should_be_present(table_id)
try:
self.session.findById(table_id).pressToolbarButton(button_id)
except AttributeError:
self.take_screenshot()
self.session.findById(table_id).pressButton(button_id)
except com_error:
self.take_screenshot()
message = "Cannot find Button_id '%s'." % button_id
raise ValueError(message)
time.sleep(self.explicit_wait)
def connect_to_existing_connection(self, connection_name):
"""Connects to an open connection. If the connection matches the given connection_name, the session is connected
to this connection.
"""
self.connection = self.sapapp.Children(0)
if self.connection.Description == connection_name:
self.session = self.connection.children(0)
else:
self.take_screenshot()
message = "No existing connection for '%s' found." % connection_name
raise ValueError(message)
def connect_to_session(self, explicit_wait=0):
"""Connects to an open session SAP.
See `Opening a connection / Before running tests` for details about requirements before connecting to a session.
Optionally `set explicit wait` can be used to set the explicit wait time.
*Examples*:
| *Keyword* | *Attributes* |
| connect to session | |
| connect to session | 3 |
| connect to session | explicit_wait=500ms |
"""
lenstr = len("SAPGUI")
rot = pythoncom.GetRunningObjectTable()
rotenum = rot.EnumRunning()
while True:
monikers = rotenum.Next()
if not monikers:
break
ctx = pythoncom.CreateBindCtx(0)
name = monikers[0].GetDisplayName(ctx, None);
if name[-lenstr:] == "SAPGUI":
obj = rot.GetObject(monikers[0])
sapgui = win32com.client.Dispatch(obj.QueryInterface(pythoncom.IID_IDispatch))
self.sapapp = sapgui.GetScriptingEngine
# Set explicit_wait after connection succeed
self.set_explicit_wait(explicit_wait)
if hasattr(self.sapapp, "OpenConnection") == False:
self.take_screenshot()
message = "Could not connect to Session, is Sap Logon Pad open?"
raise Warning(message)
# run explicit wait last
time.sleep(self.explicit_wait)
def disable_screenshots_on_error(self):
"""Disables automatic screenshots on error.
"""
self.take_screenshots = False
def doubleclick_element(self, element_id, item_id, column_id):
"""Performs a double-click on a given element. Used only for shell objects.
"""
# Performing the correct method on an element, depending on the type of element
element_type = self.get_element_type(element_id)
if element_type == "GuiShell":
self.session.findById(element_id).doubleClickItem(item_id, column_id)
else:
self.take_screenshot()
message = "You cannot use 'doubleclick element' on element type '%s', maybe use 'click element' instead?" % element_type
raise Warning(message)
time.sleep(self.explicit_wait)
def element_should_be_present(self, element_id, message=None):
"""Checks whether an element is present on the screen.
"""
try:
self.session.findById(element_id)
except com_error:
self.take_screenshot()
if message is None:
message = "Cannot find Element '%s'." % element_id
raise ValueError(message)
def element_value_should_be(self, element_id, expected_value, message=None):
"""Checks whether the element value is the same as the expected value.
The possible expected values depend on the type of element (see usage).
Usage:
| *Element type* | *possible values* |
| textfield | text |
| label | text |
| checkbox | checked / unchecked |
| radiobutton | checked / unchecked |
| combobox | text of the option to be expected |
"""
element_type = self.get_element_type(element_id)
actual_value = self.get_value(element_id)
# Breaking up the different element types so we can check the value the correct way
if (element_type == "GuiTextField"
or element_type == "GuiCTextField"
or element_type == "GuiComboBox"
or element_type == "GuiLabel"):
self.session.findById(element_id).setfocus()
time.sleep(self.explicit_wait)
# In these cases we can simply check the text value against the value of the element
if expected_value != actual_value:
if message is None:
message = "Element value of '%s' should be '%s', but was '%s'" % (
element_id, expected_value, actual_value)
self.take_screenshot()
raise AssertionError(message)
elif element_type == "GuiStatusPane":
if expected_value != actual_value:
if message is None:
message = "Element value of '%s' should be '%s', but was '%s'" % (
element_id, expected_value, actual_value)
self.take_screenshot()
raise AssertionError(message)
elif (element_type == "GuiCheckBox"
or element_type == "GuiRadioButton"):
# First check if there is a correct value given, otherwise raise an assertion error
self.session.findById(element_id).setfocus()
if (expected_value.lower() != "checked"
and expected_value.lower() != "unchecked"):
# Raise an AsertionError when no correct expected_value is given
self.take_screenshot()
if message is None:
message = "Incorrect value for element type '%s', provide checked or unchecked" % element_type
raise AssertionError(message)
# Check whether the expected value matches the actual value. If not, raise an assertion error
if expected_value.lower() != actual_value:
self.take_screenshot()
if message is None:
message = "Element value of '%s' didn't match the expected value" % element_id
raise AssertionError(message)
else:
# When the type of element can't be checked, raise an assertion error
self.take_screenshot()
message = "Cannot use keyword 'element value should be' for element type '%s'" % element_type
raise Warning(message)
# Run explicit wait as last
time.sleep(self.explicit_wait)
def element_value_should_contain(self, element_id, expected_value, message=None):
"""Checks whether the element value contains the expected value.
The possible expected values depend on the type of element (see usage).
Usage:
| *Element type* | *possible values* |
| textfield | text |
| label | text |
| combobox | text of the option to be expected |
"""
element_type = self.get_element_type(element_id)
# Breaking up the different element types so we can check the value the correct way
if (element_type == "GuiTextField"
or element_type == "GuiCTextField"
or element_type == "GuiComboBox"
or element_type == "GuiLabel"):
self.session.findById(element_id).setfocus()
actual_value = self.get_value(element_id)
time.sleep(self.explicit_wait)
# In these cases we can simply check the text value against the value of the element
if expected_value not in actual_value:
self.take_screenshot()
if message is None:
message = "Element value '%s' does not contain '%s', (but was '%s')" % (
element_id, expected_value, actual_value)
raise AssertionError(message)
else:
# When the element content can't be checked, raise an assertion error
self.take_screenshot()
message = "Cannot use keyword 'element value should contain' for element type '%s'" % element_type
raise Warning(message)
# Run explicit wait as last
time.sleep(self.explicit_wait)
def enable_screenshots_on_error(self):
"""Enables automatic screenshots on error.
"""
self.take_screenshots = True
def get_cell_value(self, table_id, row_num, col_id):
"""Returns the cell value for the specified cell.
"""
self.element_should_be_present(table_id)
try:
cellValue = self.session.findById(table_id).getCellValue(row_num, col_id)
return cellValue
except com_error:
self.take_screenshot()
message = "Cannot find Column_id '%s'." % col_id
raise ValueError(message)
| |
from __future__ import division, print_function, absolute_import
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import standard_ops
import tflearn
from tflearn import utils
from tflearn import variables as va
from tflearn import activations
from tflearn import initializations
from tflearn import regularizers
def input_data(shape=None, placeholder=None, dtype=tf.float32,
data_preprocessing=None, data_augmentation=None,
name="InputData"):
""" Input Data.
This layer is used for inputting (aka. feeding) data to a network.
A TensorFlow placeholder will be used if it is supplied,
otherwise a new placeholder will be created with the given shape.
Either a shape or placeholder must be provided, otherwise an
exception will be raised.
Furthermore, the placeholder is added to TensorFlow collections
so it can be retrieved using tf.get_collection(tf.GraphKeys.INPUTS)
as well as tf.GraphKeys.LAYER_TENSOR + '/' + name. Similarly for
the data preprocessing and augmentation objects which are stored in
the collections with tf.GraphKeys.DATA_PREP and tf.GraphKeys.DATA_AUG.
This allows other parts of TFLearn to easily retrieve and use these
objects by referencing these graph-keys.
Input:
List of `int` (Shape), to create a new placeholder.
Or
`Tensor` (Placeholder), to use an existing placeholder.
Output:
Placeholder Tensor with given shape.
Arguments:
shape: list of `int`. An array or tuple representing input data shape.
It is required if no placeholder is provided. First element should
be 'None' (representing batch size), if not provided, it will be
added automatically.
placeholder: A Placeholder to use for feeding this layer (optional).
If not specified, a placeholder will be automatically created.
You can retrieve that placeholder through graph key: 'INPUTS',
or the 'placeholder' attribute of this function's returned tensor.
dtype: `tf.type`, Placeholder data type (optional). Default: float32.
data_preprocessing: A `DataPreprocessing` subclass object to manage
real-time data pre-processing when training and predicting (such
as zero center data, std normalization...).
data_augmentation: `DataAugmentation`. A `DataAugmentation` subclass
object to manage real-time data augmentation while training (
such as random image crop, random image flip, random sequence
reverse...).
name: `str`. A name for this layer (optional).
"""
# We need either a placeholder or a shape, otherwise raise an exception.
if placeholder is None:
if shape is None:
raise Exception("Either a `shape` or `placeholder` argument is required to consruct an input layer.")
# We have a shape but no placeholder, so we must now create a placeholder.
# Ensure the first element of shape is None by prepending None if necessary.
# TODO: Why is there a len(shape)>1 condition? Please explain here.
if len(shape) > 1 and shape[0] is not None:
shape = list(shape)
shape = [None] + shape
# Create a new tf.placeholder with the given shape.
with tf.name_scope(name):
placeholder = tf.placeholder(shape=shape, dtype=dtype, name="X")
# Store the placeholder object in TensorFlow collections so it can be
# retrieved and used elsewhere.
tf.add_to_collection(tf.GraphKeys.INPUTS, placeholder)
tf.add_to_collection(tf.GraphKeys.LAYER_TENSOR + '/' + name, placeholder)
# Store the objects for data-preprocessing and -augmentation
# in TensorFlow collections so they can be retrieved and used elsewhere.
tf.add_to_collection(tf.GraphKeys.DATA_PREP, data_preprocessing)
tf.add_to_collection(tf.GraphKeys.DATA_AUG, data_augmentation)
return placeholder
def fully_connected(incoming, n_units, activation='linear', bias=True,
weights_init='truncated_normal', bias_init='zeros',
regularizer=None, weight_decay=0.001, trainable=True,
restore=True, reuse=False, scope=None,
name="FullyConnected"):
""" Fully Connected.
A fully connected layer.
Input:
(2+)-D Tensor [samples, input dim]. If not 2D, input will be flatten.
Output:
2D Tensor [samples, n_units].
Arguments:
incoming: `Tensor`. Incoming (2+)D Tensor.
n_units: `int`, number of units for this layer.
activation: `str` (name) or `function` (returning a `Tensor`).
Activation applied to this layer (see tflearn.activations).
Default: 'linear'.
bias: `bool`. If True, a bias is used.
weights_init: `str` (name) or `Tensor`. Weights initialization.
(see tflearn.initializations) Default: 'truncated_normal'.
bias_init: `str` (name) or `Tensor`. Bias initialization.
(see tflearn.initializations) Default: 'zeros'.
regularizer: `str` (name) or `Tensor`. Add a regularizer to this
layer weights (see tflearn.regularizers). Default: None.
weight_decay: `float`. Regularizer decay parameter. Default: 0.001.
trainable: `bool`. If True, weights will be trainable.
restore: `bool`. If True, this layer weights will be restored when
loading a model.
reuse: `bool`. If True and 'scope' is provided, this layer variables
will be reused (shared).
scope: `str`. Define this layer scope (optional). A scope can be
used to share variables between layers. Note that scope will
override name.
name: A name for this layer (optional). Default: 'FullyConnected'.
Attributes:
scope: `Scope`. This layer scope.
W: `Tensor`. Variable representing units weights.
b: `Tensor`. Variable representing biases.
"""
input_shape = utils.get_incoming_shape(incoming)
assert len(input_shape) > 1, "Incoming Tensor shape must be at least 2-D"
n_inputs = int(np.prod(input_shape[1:]))
with tf.variable_scope(scope, default_name=name, values=[incoming],
reuse=reuse) as scope:
name = scope.name
W_init = weights_init
filter_size = [n_inputs, n_units]
if isinstance(weights_init, str):
W_init = initializations.get(weights_init)()
elif type(W_init) in [tf.Tensor, np.ndarray, list]:
filter_size = None
W_regul = None
if regularizer is not None:
W_regul = lambda x: regularizers.get(regularizer)(x, weight_decay)
W = va.variable('W', shape=filter_size, regularizer=W_regul,
initializer=W_init, trainable=trainable,
restore=restore)
tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, W)
b = None
if bias:
b_shape = [n_units]
if isinstance(bias_init, str):
bias_init = initializations.get(bias_init)()
elif type(bias_init) in [tf.Tensor, np.ndarray, list]:
b_shape = None
if isinstance(bias_init, str):
bias_init = initializations.get(bias_init)()
b = va.variable('b', shape=b_shape, initializer=bias_init,
trainable=trainable, restore=restore)
tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, b)
inference = incoming
# If input is not 2d, flatten it.
if len(input_shape) > 2:
inference = tf.reshape(inference, [-1, n_inputs])
inference = tf.matmul(inference, W)
if b is not None: inference = tf.nn.bias_add(inference, b)
if activation:
if isinstance(activation, str):
inference = activations.get(activation)(inference)
elif hasattr(activation, '__call__'):
inference = activation(inference)
else:
raise ValueError("Invalid Activation.")
# Track activations.
tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, inference)
# Add attributes to Tensor to easy access weights.
inference.scope = scope
inference.W = W
inference.b = b
# Track output tensor.
tf.add_to_collection(tf.GraphKeys.LAYER_TENSOR + '/' + name, inference)
return inference
def dropout(incoming, keep_prob, noise_shape=None, name="Dropout"):
""" Dropout.
Outputs the input element scaled up by `1 / keep_prob`. The scaling is so
that the expected sum is unchanged.
By default, each element is kept or dropped independently. If noise_shape
is specified, it must be broadcastable to the shape of x, and only dimensions
with noise_shape[i] == shape(x)[i] will make independent decisions. For
example, if shape(x) = [k, l, m, n] and noise_shape = [k, 1, 1, n], each
batch and channel component will be kept independently and each row and column
will be kept or not kept together.
Arguments:
incoming : A `Tensor`. The incoming tensor.
keep_prob : A float representing the probability that each element
is kept.
noise_shape : A 1-D Tensor of type int32, representing the shape for
randomly generated keep/drop flags.
name : A name for this layer (optional).
References:
Dropout: A Simple Way to Prevent Neural Networks from Overfitting.
<NAME>, <NAME>, <NAME>, <NAME> & <NAME>,
(2014), Journal of Machine Learning Research, 5(Jun)(2), 1929-1958.
Links:
[https://www.cs.toronto.edu/~hinton/absps/JMLRdropout.pdf]
(https://www.cs.toronto.edu/~hinton/absps/JMLRdropout.pdf)
"""
with tf.name_scope(name) as scope:
inference = incoming
def apply_dropout():
if type(inference) in [list, np.array]:
for x in inference:
x = tf.nn.dropout(x, keep_prob, noise_shape)
return inference
else:
return tf.nn.dropout(inference, keep_prob, noise_shape)
is_training = tflearn.get_training_mode()
inference = tf.cond(is_training, apply_dropout, lambda: inference)
# Track output tensor.
tf.add_to_collection(tf.GraphKeys.LAYER_TENSOR + '/' + name, inference)
return inference
def custom_layer(incoming, custom_fn, **kwargs):
""" Custom Layer.
A custom layer that can apply any operations to the incoming Tensor or
list of `Tensor`. The custom function can be pass as a parameter along
with its parameters.
Arguments:
incoming : A `Tensor` or list of `Tensor`. Incoming tensor.
custom_fn : A custom `function`, to apply some ops on incoming tensor.
**kwargs: Some custom parameters that custom function might need.
"""
name = "CustomLayer"
if 'name' in kwargs:
name = kwargs['name']
with tf.name_scope(name):
inference = custom_fn(incoming, **kwargs)
return inference
def reshape(incoming, new_shape, name="Reshape"):
""" Reshape.
A layer that reshape the incoming layer tensor output to the desired shape.
Arguments:
incoming: A `Tensor`. The incoming tensor.
new_shape: A list of `int`. The desired shape.
name: A name for this layer (optional).
"""
with tf.name_scope(name) as scope:
inference = incoming
if isinstance(inference, list):
inference = tf.concat(0, inference)
inference = tf.cast(inference, tf.float32)
inference = tf.reshape(inference, shape=new_shape)
inference.scope = scope
# Track output tensor.
tf.add_to_collection(tf.GraphKeys.LAYER_TENSOR + '/' + name, inference)
return inference
def flatten(incoming, name="Flatten"):
""" Flatten.
Flatten the incoming Tensor.
Input:
(2+)-D `Tensor`.
| |
<filename>pygears/hls/ir.py<gh_stars>100-1000
import ast as opc
import inspect
import attr
import typing
import textwrap
from dataclasses import dataclass, field
from pygears.typing.base import TypingMeta, GenericMeta, class_and_instance_method
from functools import reduce
from pygears import Intf
from pygears.core.port import InPort, OutPort
from pygears.core.gear import InSig, OutSig
from pygears.typing import (Bool, Integer, Queue, Tuple, Uint, is_type, typeof, Array, Union, Unit)
# from .ast.utils import get_property_type
import operator
BOOLEAN_OPERATORS = {opc.BitOr, opc.BitAnd, opc.BitXor, opc.Invert, opc.Not, opc.And, opc.Or}
BIN_OPERATORS = [opc.Eq, opc.Gt, opc.GtE, opc.Lt, opc.LtE, opc.NotEq, opc.And, opc.Or]
EXTENDABLE_OPERATORS = [
opc.Add, opc.Sub, opc.Mult, opc.Div, opc.Mod, opc.Pow, opc.LShift, opc.RShift, opc.BitOr,
opc.BitAnd, opc.BitXor, opc.Div, opc.Invert, opc.Not
]
OPMAP = {
opc.Add: '+',
opc.Sub: '-',
opc.MatMult: '@',
opc.Mult: '*',
opc.Div: '/',
opc.Mod: '%',
opc.Pow: '**',
opc.LShift: '<<',
opc.RShift: '>>',
opc.BitOr: '|',
opc.BitAnd: '&',
opc.BitXor: '^',
opc.FloorDiv: '/',
opc.Invert: '~',
opc.Not: '!',
opc.UAdd: '+',
opc.USub: '-',
opc.Eq: '==',
opc.Gt: '>',
opc.GtE: '>=',
opc.Lt: '<',
opc.LtE: '<=',
opc.NotEq: '!=',
opc.And: '&&',
opc.Or: '||'
}
PYOPMAP = {
opc.Add: operator.__add__,
opc.And: operator.__and__,
opc.BitAnd: operator.__and__,
opc.BitOr: operator.__or__,
opc.BitXor: operator.__xor__,
opc.Div: operator.__truediv__,
opc.Eq: operator.__eq__,
opc.Gt: operator.__gt__,
opc.GtE: operator.__ge__,
opc.FloorDiv: operator.__floordiv__,
opc.Lt: operator.__lt__,
opc.LtE: operator.__le__,
opc.LShift: operator.__lshift__,
opc.MatMult: operator.__matmul__,
opc.Mult: operator.__mul__,
opc.NotEq: operator.__ne__,
opc.Not: operator.__not__,
opc.Or: operator.__or__,
opc.RShift: operator.__rshift__,
opc.Sub: operator.__sub__,
opc.UAdd: operator.__pos__,
opc.USub: operator.__neg__,
}
REDUCE_INITIAL = {
opc.Add: 0,
opc.And: True,
opc.BitAnd: True,
opc.BitOr: False,
opc.Mult: 1,
opc.Or: False,
}
def opex(op, *operands):
return PYOPMAP[op](*(p.val for p in operands))
def bin_op_reduce(intfs, func, op, dflt=None):
if not intfs:
return dflt
intf1 = func(intfs[0])
if len(intfs) == 1:
return intf1
else:
return BinOpExpr([intf1, bin_op_reduce(intfs[1:], func, op, dflt=dflt)], op)
def find_sub_dtype(val):
if isinstance(val, (tuple, list)):
sub = max(val, key=lambda x: getattr(x, 'dtype').width)
return type(sub)
return type(val)
def find_name(node):
name = getattr(node, 'name', None)
if name is not None:
return name
if hasattr(node, 'val'):
return find_name(node.val)
if hasattr(node, 'op'):
return find_name(node.op)
return None
def get_contextpr(node):
if isinstance(node, ResExpr):
return node.val
# TODO: rethink this? This should be some sort of named constant expression?
if isinstance(node, Name):
obj = node.obj
if isinstance(obj, Variable) and obj.val is not None and not obj.reg:
return obj.val
return None
class IntfTypeMeta(GenericMeta):
iin = 0
iout = 1
@property
def dtype(self):
return self.args[0]
@property
def direction(self):
return self.args[1]
class IntfType(tuple, metaclass=IntfTypeMeta):
__parameters__ = ['dtype', 'direction']
def pull_nb(self):
pass
def empty(self):
pass
def ack(self):
pass
@attr.s(auto_attribs=True, kw_only=True)
class Expr:
@property
def dtype(self):
pass
# Type aliases
PgType = typing.Union[TypingMeta, Unit, int]
OpType = typing.Union[Expr, PgType, str]
# Type definitions
class ResExpr(Expr):
def __new__(cls, val):
if isinstance(val, Expr):
return val
inst = super().__new__(cls)
inst.val = val
return inst
def __init__(self, val):
super().__init__()
def __eq__(self, other):
if not isinstance(other, ResExpr):
return False
return self.val == other.val
def __hash__(self):
return hash((self.val, type(self)))
def __repr__(self):
return f'ResExpr({repr(self.val)})'
def __str__(self):
return str(self.val)
@property
def dtype(self):
# if is_type(type(self.val)):
# return type(self.val)
if not is_type(type(self.val)) and isinstance(self.val, int):
return type(Integer(self.val))
# TODO: Remove this if unecessary
if isinstance(self.val, Intf):
return IntfType[self.val.dtype]
return type(self.val)
# return None
res_true = ResExpr(Bool(True))
res_false = ResExpr(Bool(False))
@dataclass
class TupleExpr(Expr):
val: typing.Sequence
def __getitem__(self, key):
return self.val[key]
@dataclass
class Variable:
name: str
dtype: typing.Union[PgType, typing.Any] = None
val: typing.Union[PgType, Expr] = None
any_init: Bool = False
reg: Bool = False
def __post_init__(self):
if self.dtype is None:
if self.val is not None:
self.dtype = self.val.dtype
@dataclass
class Interface(Expr):
intf: typing.Union[InPort, OutPort, Expr]
direction: str
_name: str = None
@property
def name(self):
if self._name:
return self._name
try:
return self.intf.basename
except AttributeError:
return find_name(self.intf)
def __str__(self):
return self.name
@property
def dtype(self):
return find_sub_dtype(self.intf)
@attr.s(auto_attribs=True)
class Name(Expr):
name: str
obj: Variable = None
ctx: str = 'load'
def __repr__(self):
return f'Id({self.name})'
def __str__(self):
if self.ctx in ['load', 'store']:
return self.name
else:
return f'{self.name}.{self.ctx}'
@property
def dtype(self):
return self.obj.dtype
@attr.s(auto_attribs=True)
class Component(Expr):
val: Expr
field: str
def __repr__(self):
return f'{repr(self.val)}.{self.field}'
def __str__(self):
return f'{self.val}.{self.field}'
@property
def dtype(self):
if self.field in ['ready', 'valid']:
return Bool
elif self.field == 'data':
assert typeof(self.val.dtype, IntfType)
return self.val.dtype.dtype
@attr.s(auto_attribs=True)
class Await(Expr):
expr: Expr = None
in_await: Expr = res_true
exit_await: Expr = res_true
@property
def dtype(self):
if self.expr is None:
return None
return self.expr.dtype
def __str__(self):
if self.in_await != res_true:
footer = f'(in-await {self.in_await})'
if self.exit_await != res_true:
footer = f'(exit-await {self.exit_await})'
if self.expr:
return f'{str(self.expr)} {footer}'
else:
return footer
@attr.s(auto_attribs=True)
class InterfacePull(Expr):
intf: Interface
@property
def in_await(self):
return Component(self.intf, 'valid')
@in_await.setter
def in_await(self, val):
pass
@property
def dtype(self):
return self.intf.obj.dtype
def __str__(self):
return f'{str(self.intf)}.data'
@attr.s(auto_attribs=True)
class InterfaceReady(Expr):
intf: Interface
@property
def exit_await(self):
return Component(self.intf, 'ready')
@exit_await.setter
def exit_await(self, val):
pass
@property
def dtype(self):
return Bool
def __str__(self):
return f'{str(self.intf)}.ready'
@dataclass
class InterfaceAck(Expr):
intf: Interface
@property
def dtype(self):
return Bool
@dataclass
class ConcatExpr(Expr):
def __repr__(self):
return 'ConcatExpr(' + ', '.join([repr(v) for v in self.operands]) + ')'
def __str__(self):
return '(' + ', '.join([str(v) for v in self.operands]) + ')'
def __init__(self, operands: typing.Sequence[Expr]):
pass
def __eq__(self, other):
if not isinstance(other, BinOpExpr):
return False
return all(ops == opo for ops, opo in zip(self.operands, other.operands))
def __new__(cls, operands: typing.Sequence[Expr]):
if all(isinstance(v, ResExpr) for v in operands):
if all(is_type(v.dtype) for v in operands):
return ResExpr(Tuple[tuple(v.dtype for v in operands)](tuple(v.val
for v in operands)))
else:
return ResExpr(tuple(v.val for v in operands))
inst = super().__new__(cls)
inst.operands = operands
return inst
@property
def dtype(self):
return Tuple[tuple(op.dtype for op in self.operands)]
class UnaryOpExpr(Expr):
def __init__(self, operand, operator):
pass
def __repr__(self):
return f'{OPMAP[self.operator]}({self.operand})'
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
return (self.operand == other.operand and self.operator == other.operator)
def __new__(cls, operand, operator):
if isinstance(operand, ResExpr):
return ResExpr(opex(operator, operand))
if operator == opc.Not and isinstance(operand, BinOpExpr):
if operand.operator == opc.Eq:
return BinOpExpr(operand.operands, opc.NotEq)
if operand.operator == opc.NotEq:
return BinOpExpr(operand.operands, opc.Eq)
if operator == opc.Not and isinstance(operand, UnaryOpExpr) and operand.operator == opc.Not:
return operand.operand
inst = super().__new__(cls)
inst.operand = operand
inst.operator = operator
return inst
@property
def dtype(self):
if self.operator == opc.Not:
return Uint[1]
res_t = eval(f'{OPMAP[self.operator]} op', {'op': self.operand.dtype})
if isinstance(res_t, bool):
return Uint[1]
return res_t
class CastExpr(Expr):
def __init__(self, operand, cast_to):
pass
def __repr__(self):
return f'CastTo({self.operand}, {self.cast_to})'
def __str__(self):
return f"({self.cast_to})'({self.operand})"
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
return (self.operand == other.operand and self.cast_to == other.cast_to)
def __new__(cls, operand, cast_to):
if isinstance(cast_to, ResExpr):
cast_to = cast_to.val
if cast_to == int:
cast_to = Uint[operand.dtype.width]
if operand.dtype == cast_to:
return operand
if isinstance(operand, ConcatExpr) and typeof(cast_to, (Array, Tuple, Queue, Union)):
cast_ops = [
CastExpr(op, cast_t) if op.dtype != cast_t else op
for op, cast_t in zip(operand.operands, cast_to)
]
operand = ConcatExpr(cast_ops)
inst = super().__new__(cls)
inst.operand = operand
inst.cast_to = cast_to
return inst
@property
def dtype(self):
return self.cast_to
class SliceExpr(Expr):
def __repr__(self):
return f'({self.start if self.start else ""}:{self.stop if self.stop else ""}:{self.step if self.step else ""})'
def __init__(self, start: OpType, stop: OpType, step: OpType):
pass
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
return (self.start == other.start and self.stop == other.stop and self.step == other.step)
def __new__(cls, start: OpType, stop: OpType, step: OpType):
if isinstance(start, ResExpr) and isinstance(stop, ResExpr) and isinstance(step, ResExpr):
return ResExpr(slice(start.val, stop.val, step.val))
inst = super().__new__(cls)
inst.start = start
inst.stop = stop
inst.step = step
return inst
class BinOpExpr(Expr):
def __repr__(self):
try:
return f'{type(self).__name__}(operands={repr(self.operands)}, operator={self.operator.__name__})'
except:
return f'{type(self).__name__}(operands={repr(self.operands)}, operator={self.operator})'
def __str__(self):
return f'({self.operands[0]} {OPMAP[self.operator]} {self.operands[1]})'
def __init__(self, operands: typing.Tuple[OpType], operator):
pass
def __new__(cls, operands: typing.Tuple[OpType], operator):
op1, op2 = operands
if isinstance(op1, ResExpr) and isinstance(op2, ResExpr):
return ResExpr(opex(operator, op1, op2))
if operator == opc.And:
if isinstance(op1, ResExpr):
return op2 if op1.val else op1
if isinstance(op2, ResExpr):
return op1 if op2.val else op2
elif operator == opc.Or:
if isinstance(op1, ResExpr):
return op1 if op1.val else op2
if isinstance(op2, ResExpr):
return op2 if op2.val else op1
elif operator in (opc.RShift, opc.LShift):
if isinstance(op2, ResExpr) and op2.val == 0:
return op1
inst = super().__new__(cls)
inst.operands = operands
inst.operator = operator
return inst
def __eq__(self, other):
if not isinstance(other, BinOpExpr):
return False
return ((self.operator == other.operator)
and (all(ops == opo for ops, opo in zip(self.operands, other.operands))))
@property
def dtype(self):
if self.operator in BIN_OPERATORS:
return Uint[1]
if (self.operator in (opc.LShift, opc.RShift)) and isinstance(self.operands[1], ResExpr):
op2 = self.operands[1].val
else:
op2 = self.operands[1].dtype
res_t = eval(f'op1 {OPMAP[self.operator]} op2', {'op1': self.operands[0].dtype, 'op2': op2})
if isinstance(res_t, bool):
return Uint[1]
return res_t
class ArrayOpExpr(Expr):
def __repr__(self):
return f'{type(self).__name__}(val={repr(self.array)}, op={repr(self.operator)})'
def __str__(self):
return f'{OPMAP[self.operator]}({str(self.array)})'
def __init__(self, array: Expr, operator):
pass
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
return self.operator == other.operator and | |
<filename>ditto/utils.py
from collections import deque
from typing import Type, List, Tuple
from airflow import DAG
from airflow.models import BaseOperator
import networkx as nx
from ditto.api import TaskMatcher, DAGFragment
import re
from queue import Queue
class TransformerUtils:
@staticmethod
def get_list_index_from_xcom_pull(xcom_template: str) -> str:
"""
Parses an airflow template variable and finds the list index accessed
from the return value of an :meth:`~airflow.models.TaskInstance.xcom_pull`
:Example:
>>> task_instance.xcom_pull("add_steps_to_cluster", key="return_value")[3]
will return "3"
>>> {{ ti.xcom_pull("add_steps_to_cluster", key="return_value")[2] }}
will return "2"
:param xcom_template: airflow template string to parse
:return: the list index accessed
"""
return re.search("\[(\d+)\]", xcom_template).group(1)
@staticmethod
def get_task_id_from_xcom_pull(xcom_template: str) -> str:
"""
Parses an airflow template variable and finds the task ID
from which an :meth:`~airflow.models.TaskInstance.xcom_pull` is being done
:Example:
>>> {{ ti.xcom_pull("add_steps_to_cluster", key="return_value")[0] }}
will return "add_steps_to_cluster"
>>> {{ task_instance.xcom_pull(task_ids='add_steps', key='return_value')[0] }}
will return "add_steps"
:param xcom_template: airflow template string to parse
:return: the task_id
"""
return re.search("{{.*xcom_pull\s*\(.*[\"|\'](.*)[\"|\'],.*}}", xcom_template).group(1)
@staticmethod
def add_downstream_dag_fragment(fragment_up: DAGFragment, fragment_down: DAGFragment):
"""
Attaches the roots of the `fragment_down` to the leaves of `fragment_up`,
.. note::
The leaves of `fragment_up` are found by traversing its operator DAG, not
its :class:`~ditto.api.DAGFragment` dag. This does not join fragment DAGS but
only operator DAGS in two :class:`~ditto.api.DAGFragment`\'s
See the documentation of :class:`~ditto.api.DAGFragment` for understanding
what that means.
:param fragment_up: the upstream :class:`~ditto.api.DAGFragment` to which `fragment_down`
has to be added
:param fragment_down: the downstream :class:`~ditto.api.DAGFragment`
"""
downstream_task_q: "Queue[BaseOperator]" = Queue()
seen_tasks = set()
if fragment_up is None:
return fragment_down
if fragment_down is None:
return fragment_up
for task in fragment_up.tasks:
downstream_task_q.put(task)
# add fragment_down.root_steps to the leaves of fragment_up
while not downstream_task_q.empty():
task = downstream_task_q.get()
if len(task.downstream_list) > 0:
for downstream_task in task.downstream_list:
if downstream_task not in seen_tasks:
downstream_task_q.put(downstream_task)
seen_tasks.add(downstream_task)
else:
task.set_downstream(fragment_down.tasks)
return fragment_up
@classmethod
def find_op_in_parent_fragment_chain(cls, parent_fragment: DAGFragment,
operator_type: Type[BaseOperator] = None,
task_id: str = None) -> BaseOperator:
"""
Finds the operator matched by the `operator_type` class and having task ID `task_id`
in the passed linked-list referenced by the `parent_fragment` :class:`~ditto.api.DAGFragment`
Uses the :meth:`~ditto.utils.TransformerUtils.find_op_in_dag_fragment` for understanding
how the search is done.
:param parent_fragment: See :paramref:`~ditto.api.OperatorTransformer.transform.parent_fragment`
:param operator_type: the type of operator to find
:param task_id: the task_id of the operator to find
:return: the operator found
"""
op_found = None
fragment_q: "Queue[DAGFragment]" = Queue()
fragment_q.put(parent_fragment)
while not fragment_q.empty():
dag_fragment = fragment_q.get()
op_found = cls.find_op_in_dag_fragment(dag_fragment,
operator_type=operator_type, task_id=task_id)
if op_found:
return op_found
for parent in dag_fragment.parents:
fragment_q.put(parent)
@classmethod
def find_op_in_fragment_list(cls, fragment_list: List[DAGFragment],
operator_type: Type[BaseOperator] = None,
task_id: str = None) -> BaseOperator:
"""
Lenient version of :meth:`~ditto.utils.TransformerUtils.find_op_in_fragment_list_strict`
:param fragment_list: the list of :class:`~ditto.api.DAGFragment`\'s to search in
:param operator_type: the type of operator to find
:param task_id: the task_id of the operator to find
:return: the operator found
"""
found_op = cls.find_op_in_fragment_list_strict(fragment_list,
operator_type=operator_type,
task_id=task_id)
if not found_op:
found_op = cls.find_op_in_fragment_list_strict(fragment_list,
operator_type=operator_type)
return found_op
@classmethod
def find_op_in_fragment_list_strict(cls, fragment_list: List[DAGFragment],
operator_type: Type[BaseOperator] = None,
task_id: str = None) -> BaseOperator:
"""
Uses :meth:`~ditto.utils.TransformerUtils.find_op_in_dag_fragment` to find
an operator in a list of :class:`~ditto.api.DAGFragment`\'s
:param fragment_list: the list of :class:`~ditto.api.DAGFragment`\'s to search in
:param operator_type: the type of operator to find
:param task_id: the task_id of the operator to find
:return: the operator found
"""
for fragment in fragment_list:
op_found = cls.find_op_in_dag_fragment(fragment,
operator_type=operator_type, task_id=task_id)
if op_found:
return op_found
@staticmethod
def find_op_in_dag_fragment(dag_fragment: DAGFragment,
operator_type: Type[BaseOperator] = None,
task_id: str = None,
upstream=False) -> BaseOperator:
"""
Traverses the operator dag of the given :class:`~ditto.api.DAGFragment`
and finds a :class:`~airflow.models.BaseOperator` matching the given `operator_type`
and `task_id`. First matches using the `operator_type` and subsequently using the
`task_id`. Can search upstream or downstream of the tasks in the given
:class:`~ditto.api.DAGFragment`
:param dag_fragment: fragment whose operator dag has to be searched
:param operator_type: the type of operator to find
:param task_id: the task_id of the operator to find
:param upstream: search upstream if `True` otherwise search `downstream`
:return: the operator found
"""
task_q: "Queue[BaseOperator]" = Queue()
seen_tasks = set()
for task in dag_fragment.tasks:
task_q.put(task)
while not task_q.empty():
task = task_q.get()
found_task = False
if operator_type:
if isinstance(task, operator_type):
found_task = True
if task_id:
if task.task_id == task_id:
found_task = True
else:
found_task = False
if found_task:
return task
relative_task_list = task.downstream_list
if upstream and task.upstream_list:
relative_task_list = task.upstream_list
if relative_task_list:
for relative_task in relative_task_list:
if relative_task not in seen_tasks:
task_q.put(relative_task)
seen_tasks.add(relative_task)
@staticmethod
def get_digraph_from_airflow_dag(dag: DAG) -> nx.DiGraph:
"""
Construct a :class:`~networkx.DiGraph` from the given airflow :class:`~airflow.models.DAG`
:param dag: the airflow DAG
:return: the networkx DiGraph
"""
dg = nx.OrderedDiGraph()
task_q: "deque[BaseOperator]" = deque()
task_q.extend(dag.roots)
while len(task_q) > 0:
task = task_q.popleft()
dg.add_node(task, op=task)
if task.downstream_list:
task_q.extend(task.downstream_list)
for child in task.downstream_list:
dg.add_node(child, op=child)
dg.add_edge(task, child)
return dg
@staticmethod
def get_digraph_from_matcher_dag(matcher_roots: List[TaskMatcher]) -> nx.DiGraph:
"""
Construct a :class:`~networkx.DiGraph` from the given :class:`~ditto.api.TaskMatcher` dag
:param dag: the matcher DAG
:return: the networkx DiGraph
"""
dg = nx.OrderedDiGraph()
matcher_q: "deque[TaskMatcher]" = deque()
matcher_q.extend(matcher_roots)
while len(matcher_q) > 0:
matcher = matcher_q.popleft()
dg.add_node(matcher, m=matcher)
if matcher.children:
matcher_q.extend(matcher.children)
for child in matcher.children:
dg.add_node(child, m=child)
dg.add_edge(matcher, child)
return dg
@classmethod
def find_sub_dag(cls, dag: DAG, matcher_roots: List[TaskMatcher]) -> Tuple[nx.DiGraph, List[nx.DiGraph]]:
"""
The problem is to find a sub-DAG in a DAG where the sub-DAG's nodes are
matcher functions which test nodes
It can be generalized to: find if a DAG or DiGraph G1 is isomorphic with
a DAG G2, with the node comparison function being running of the matchers in G1
on nodes in G2
.. note::
This uses python's NetworkX graph library which uses the
`VF2 <https://networkx.github.io/documentation/stable/reference/algorithms/isomorphism.vf2.html>`_
algorithm for `graph isomorphism <https://ieeexplore.ieee.org/document/1323804>`_.
.. note::
We are trying to find an exact sub-DAG match. In graph theory, this is called a
`node-induced <https://math.stackexchange.com/questions/1013143/difference-between-a-sub-graph-and-induced-sub-graph>`_
subgraph. A subgraph 𝐻 of 𝐺 is called INDUCED, if for any two vertices
𝑢,𝑣 in 𝐻, 𝑢 and 𝑣 are adjacent in 𝐻 if and only if they are adjacent in 𝐺.
In other words, 𝐻 has the same edges as 𝐺 between the vertices in 𝐻.
.. seealso::
This is an NP-complete problem: https://en.wikipedia.org/wiki/Subgraph_isomorphism_problem
:param task: the DAG where the sub-dag has to be found
:param matcher: the root task matcher of the [TaskMatcher] dag
:return: a tuple containing the :class:`~networkx.DiGraph` of the souce DAG
and the list of matching subdag :class:`~networkx.DiGraph`\'s
"""
dag_dg = cls.get_digraph_from_airflow_dag(dag)
matcher_dg = cls.get_digraph_from_matcher_dag(matcher_roots)
def node_matcher(n1, n2):
task: BaseOperator = n1['op']
matcher: TaskMatcher = n2['m']
return matcher.does_match(task)
digm = nx.isomorphism.DiGraphMatcher(dag_dg, matcher_dg, node_match=node_matcher)
subdags: List[nx.DiGraph] = []
if digm.subgraph_is_isomorphic():
for subgraph in digm.subgraph_isomorphisms_iter():
subdags.append(dag_dg.subgraph(subgraph.keys()))
return (dag_dg, subdags)
@staticmethod
def remove_task_from_dag(dag: DAG, dag_nodes: List[BaseOperator], task: BaseOperator):
"""
Removes the given list of :class:`~airflow.models.BaseOperator`\'s from the given
:class:`~airflow.models.DAG`
:param dag: the source airflow DAG
:param dag_nodes: the list of nodes in the source DAG
:param task: the task to remove
"""
all_other_tasks = [t for t in dag_nodes if t is not task]
for this_task in all_other_tasks:
if task.task_id in this_task._upstream_task_ids:
this_task._upstream_task_ids.remove(task.task_id)
if task.task_id in this_task._downstream_task_ids:
this_task._downstream_task_ids.remove(task.task_id)
task._upstream_task_ids.clear()
task._downstream_task_ids.clear()
task._dag = None
del dag.task_dict[task.task_id]
@classmethod
def find_matching_tasks(cls, subdag: nx.DiGraph, matcher: TaskMatcher):
"""
Find matching tasks in a :class:`~networkx.DiGraph` of operators
:param subdag: the dag to search for matches
:param matcher: the task matcher to use
:return: matching nodes
"""
matching_nodes = []
for node in subdag.nodes:
if matcher.does_match(node):
matching_nodes.append(node)
return matching_nodes
@staticmethod
def assign_task_to_dag(op: BaseOperator, dag: DAG):
"""
Assigns the given :class:`~airflow.models.BaseOperator` and all its downstream
tasks to the given :class:`~airflow.models.DAG`
:param op: the task to assign
:param dag: the dag to assign the task and its downstream to
"""
task_q: "deque[BaseOperator]" = deque()
task_q.append(op)
seen_tasks = set()
while len(task_q) > 0:
task = task_q.popleft()
task.dag = dag
if task.downstream_list:
for child in task.downstream_list:
if child not in seen_tasks:
task_q.append(child)
seen_tasks.add(child)
@classmethod
def add_dag_fragment_to_dag(cls, dag: DAG, frag: DAGFragment):
"""
Traverses and assigns all the tasks in this fragment
to the given DAG using :meth:`.assign_task_to_dag`
:param dag: the dag to assign the fragment's tasks to
:param frag: the dag fragment to assign
"""
fragment_q: "deque[DAGFragment]" = deque()
fragment_q.append(frag)
seen_frag = set()
while len(fragment_q) > 0:
frag = fragment_q.popleft()
for task in frag.tasks:
cls.assign_task_to_dag(task, dag)
if frag.children:
for child in | |
<reponame>chkoar/scikit-learn-extra<filename>sklearn_extra/cluster/_k_medoids.py
# -*- coding: utf-8 -*-
"""K-medoids clustering"""
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# License: BSD 3 clause
import warnings
import numpy as np
from sklearn.base import BaseEstimator, ClusterMixin, TransformerMixin
from sklearn.metrics.pairwise import (
pairwise_distances,
pairwise_distances_argmin,
)
from sklearn.utils import check_array, check_random_state
from sklearn.utils.extmath import stable_cumsum
from sklearn.utils.validation import check_is_fitted
from sklearn.exceptions import ConvergenceWarning
class KMedoids(BaseEstimator, ClusterMixin, TransformerMixin):
"""k-medoids clustering.
Read more in the :ref:`User Guide <k_medoids>`.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of medoids to
generate.
metric : string, or callable, optional, default: 'euclidean'
What distance metric to use. See :func:metrics.pairwise_distances
init : {'random', 'heuristic', 'k-medoids++'}, optional, default: 'heuristic'
Specify medoid initialization method. 'random' selects n_clusters
elements from the dataset. 'heuristic' picks the n_clusters points
with the smallest sum distance to every other point. 'k-medoids++'
follows an approach based on k-means++_, and in general, gives initial
medoids which are more separated than those generated by the other methods.
.. _k-means++: https://theory.stanford.edu/~sergei/papers/kMeansPP-soda.pdf
max_iter : int, optional, default : 300
Specify the maximum number of iterations when fitting.
random_state : int, RandomState instance or None, optional
Specify random state for the random number generator. Used to
initialise medoids when init='random'.
Attributes
----------
cluster_centers_ : array, shape = (n_clusters, n_features)
or None if metric == 'precomputed'
Cluster centers, i.e. medoids (elements from the original dataset)
medoid_indices_ : array, shape = (n_clusters,)
The indices of the medoid rows in X
labels_ : array, shape = (n_samples,)
Labels of each point
inertia_ : float
Sum of distances of samples to their closest cluster center.
Examples
--------
>>> from sklearn_extra.cluster import KMedoids
>>> import numpy as np
>>> X = np.asarray([[1, 2], [1, 4], [1, 0],
... [4, 2], [4, 4], [4, 0]])
>>> kmedoids = KMedoids(n_clusters=2, random_state=0).fit(X)
>>> kmedoids.labels_
array([0, 0, 0, 1, 1, 1])
>>> kmedoids.predict([[0,0], [4,4]])
array([0, 1])
>>> kmedoids.cluster_centers_
array([[1, 2],
[4, 2]])
>>> kmedoids.inertia_
8.0
See scikit-learn-extra/examples/plot_kmedoids_digits.py for examples
of KMedoids with various distance metrics.
References
----------
<NAME>. and <NAME>., Statistical Data Analysis Based on
the L1–Norm and Related Methods, edited by <NAME>, North-Holland,
405–416. 1987
See also
--------
KMeans
The KMeans algorithm minimizes the within-cluster sum-of-squares
criterion. It scales well to large number of samples.
Notes
-----
Since all pairwise distances are calculated and stored in memory for
the duration of fit, the space complexity is O(n_samples ** 2).
"""
def __init__(
self,
n_clusters=8,
metric="euclidean",
init="heuristic",
max_iter=300,
random_state=None,
):
self.n_clusters = n_clusters
self.metric = metric
self.init = init
self.max_iter = max_iter
self.random_state = random_state
def _check_nonnegative_int(self, value, desc):
"""Validates if value is a valid integer > 0"""
if (
value is None
or value <= 0
or not isinstance(value, (int, np.integer))
):
raise ValueError(
"%s should be a nonnegative integer. "
"%s was given" % (desc, value)
)
def _check_init_args(self):
"""Validates the input arguments. """
# Check n_clusters and max_iter
self._check_nonnegative_int(self.n_clusters, "n_clusters")
self._check_nonnegative_int(self.max_iter, "max_iter")
# Check init
init_methods = ["random", "heuristic", "k-medoids++"]
if self.init not in init_methods:
raise ValueError(
"init needs to be one of "
+ "the following: "
+ "%s" % init_methods
)
def fit(self, X, y=None):
"""Fit K-Medoids to the provided data.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features), \
or (n_samples, n_samples) if metric == 'precomputed'
Dataset to cluster.
y : Ignored
Returns
-------
self
"""
random_state_ = check_random_state(self.random_state)
self._check_init_args()
X = check_array(X, accept_sparse=["csr", "csc"])
if self.n_clusters > X.shape[0]:
raise ValueError(
"The number of medoids (%d) must be less "
"than the number of samples %d."
% (self.n_clusters, X.shape[0])
)
D = pairwise_distances(X, metric=self.metric)
medoid_idxs = self._initialize_medoids(
D, self.n_clusters, random_state_
)
labels = None
# Continue the algorithm as long as
# the medoids keep changing and the maximum number
# of iterations is not exceeded
for self.n_iter_ in range(0, self.max_iter):
old_medoid_idxs = np.copy(medoid_idxs)
labels = np.argmin(D[medoid_idxs, :], axis=0)
# Update medoids with the new cluster indices
self._update_medoid_idxs_in_place(D, labels, medoid_idxs)
if np.all(old_medoid_idxs == medoid_idxs):
break
elif self.n_iter_ == self.max_iter - 1:
warnings.warn(
"Maximum number of iteration reached before "
"convergence. Consider increasing max_iter to "
"improve the fit.",
ConvergenceWarning,
)
# Set the resulting instance variables.
if self.metric == "precomputed":
self.cluster_centers_ = None
else:
self.cluster_centers_ = X[medoid_idxs]
# Expose labels_ which are the assignments of
# the training data to clusters
self.labels_ = labels
self.medoid_indices_ = medoid_idxs
self.inertia_ = self._compute_inertia(self.transform(X))
# Return self to enable method chaining
return self
def _update_medoid_idxs_in_place(self, D, labels, medoid_idxs):
"""In-place update of the medoid indices"""
# Update the medoids for each cluster
for k in range(self.n_clusters):
# Extract the distance matrix between the data points
# inside the cluster k
cluster_k_idxs = np.where(labels == k)[0]
if len(cluster_k_idxs) == 0:
warnings.warn(
"Cluster {k} is empty! "
"self.labels_[self.medoid_indices_[{k}]] "
"may not be labeled with "
"its corresponding cluster ({k}).".format(k=k)
)
continue
in_cluster_distances = D[
cluster_k_idxs, cluster_k_idxs[:, np.newaxis]
]
# Calculate all costs from each point to all others in the cluster
in_cluster_all_costs = np.sum(in_cluster_distances, axis=1)
min_cost_idx = np.argmin(in_cluster_all_costs)
min_cost = in_cluster_all_costs[min_cost_idx]
curr_cost = in_cluster_all_costs[
np.argmax(cluster_k_idxs == medoid_idxs[k])
]
# Adopt a new medoid if its distance is smaller then the current
if min_cost < curr_cost:
medoid_idxs[k] = cluster_k_idxs[min_cost_idx]
def transform(self, X):
"""Transforms X to cluster-distance space.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Data to transform.
Returns
-------
X_new : {array-like, sparse matrix}, shape=(n_query, n_clusters)
X transformed in the new space of distances to cluster centers.
"""
X = check_array(X, accept_sparse=["csr", "csc"])
if self.metric == "precomputed":
check_is_fitted(self, "medoid_indices_")
return X[:, self.medoid_indices_]
else:
check_is_fitted(self, "cluster_centers_")
Y = self.cluster_centers_
return pairwise_distances(X, Y=Y, metric=self.metric)
def predict(self, X):
"""Predict the closest cluster for each sample in X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
New data to predict.
Returns
-------
labels : array, shape = (n_query,)
Index of the cluster each sample belongs to.
"""
X = check_array(X, accept_sparse=["csr", "csc"])
if self.metric == "precomputed":
check_is_fitted(self, "medoid_indices_")
return np.argmin(X[:, self.medoid_indices_], axis=1)
else:
check_is_fitted(self, "cluster_centers_")
# Return data points to clusters based on which cluster assignment
# yields the smallest distance
return pairwise_distances_argmin(
X, Y=self.cluster_centers_, metric=self.metric
)
def _compute_inertia(self, distances):
"""Compute inertia of new samples. Inertia is defined as the sum of the
sample distances to closest cluster centers.
Parameters
----------
distances : {array-like, sparse matrix}, shape=(n_samples, n_clusters)
Distances to cluster centers.
Returns
-------
Sum of sample distances to closest cluster centers.
"""
# Define inertia as the sum of the sample-distances
# to closest cluster centers
inertia = np.sum(np.min(distances, axis=1))
return inertia
def _initialize_medoids(self, D, n_clusters, random_state_):
"""Select initial mediods when beginning clustering."""
if self.init == "random": # Random initialization
# Pick random k medoids as the initial ones.
medoids = random_state_.choice(len(D), n_clusters)
elif self.init == "k-medoids++":
medoids = self._kpp_init(D, n_clusters, random_state_)
elif self.init == "heuristic": # Initialization by heuristic
# Pick K first data points that have the smallest sum distance
# to every other point. These are the initial medoids.
medoids = np.argpartition(np.sum(D, axis=1), n_clusters - 1)[
:n_clusters
]
else:
raise ValueError(
"init value '{init}' not recognized".format(init=self.init)
)
return medoids
# Copied from sklearn.cluster.k_means_._k_init
def _kpp_init(self, D, n_clusters, random_state_, n_local_trials=None):
"""Init n_clusters seeds with a method similar to k-means++
Parameters
-----------
D : array, shape (n_samples, n_samples)
The distance matrix we will use to select medoid indices.
n_clusters : integer
The number of seeds to choose
random_state : RandomState
The generator used to initialize the centers.
n_local_trials : integer, optional
The number of seeding trials for each center (except the first),
of which the one reducing inertia the most is greedily chosen.
Set to None to make the number of trials depend logarithmically
on the number of seeds (2+log(k)); this is the default.
Notes
-----
Selects initial cluster centers for k-medoid clustering in a smart way
to speed up convergence. see: <NAME>. and | |
for forward pagination |default| :code:`None`
:param int first: Maximum number of objects to return. Maximum: 100. |default| :code:`20`
:param list[str] tag_ids: IDs of tags. Maximum 100 entries |default| :code:`None`
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.UnauthorizedException: if app authentication is not set or invalid
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:raises ValueError: if first is not in range 1 to 100 or tag_ids has more than 100 entries
:rtype: dict
"""
if first < 1 or first > 100:
raise ValueError('first must be between 1 and 100')
if tag_ids is not None and len(tag_ids) > 100:
raise ValueError('tag_ids can not have more than 100 entries')
param = {
'after': after,
'first': first,
'tag_id': tag_ids
}
url = build_url(TWITCH_API_BASE_URL + 'tags/streams', param, remove_none=True, split_lists=True)
result = self.__api_get_request(url, AuthType.APP, [])
return result.json()
def get_stream_tags(self,
broadcaster_id: str) -> dict:
"""Gets the list of tags for a specified stream (channel).\n\n
Requires User authentication\n
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#get-stream-tags
:param str broadcaster_id: ID of the stream that's tags are going to be fetched
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.UnauthorizedException: if app authentication is not set or invalid
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:rtype: dict
"""
url = build_url(TWITCH_API_BASE_URL + 'streams/tags', {'broadcaster_id': broadcaster_id})
result = self.__api_get_request(url, AuthType.USER, [])
return result.json()
def replace_stream_tags(self,
broadcaster_id: str,
tag_ids: List[str]) -> dict:
"""Applies specified tags to a specified stream, overwriting any existing tags applied to that stream.
If no tags are specified, all tags previously applied to the stream are removed.
Automated tags are not affected by this operation.\n\n
Requires User authentication with scope :const:`twitchAPI.types.AuthScope.CHANNEL_MANAGE_BROADCAST`\n
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#replace-stream-tags
:param str broadcaster_id: ID of the stream for which tags are to be replaced.
:param list[str] tag_ids: IDs of tags to be applied to the stream. Maximum of 100 supported.
:return: {}
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.UnauthorizedException: if user authentication is not set or invalid
:raises ~twitchAPI.types.MissingScopeException: if the user authentication is missing the required scope
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:raises ValueError: if more than 100 tag_ids where provided
:rtype: dict
"""
if len(tag_ids) > 100:
raise ValueError('tag_ids can not have more than 100 entries')
url = build_url(TWITCH_API_BASE_URL + 'streams/tags', {'broadcaster_id': broadcaster_id})
self.__api_put_request(url, AuthType.USER, [AuthScope.CHANNEL_MANAGE_BROADCAST], data={'tag_ids': tag_ids})
# this returns nothing
return {}
def get_channel_teams(self,
broadcaster_id: str) -> dict:
"""Retrieves a list of Twitch Teams of which the specified channel/broadcaster is a member.\n\n
Requires User or App authentication.
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference/#get-channel-teams
:param str broadcaster_id: User ID for a Twitch user.
:rtype: dict
:raises ~twitchAPI.types.UnauthorizedException: if app or user authentication is not set or invalid
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
"""
url = build_url(TWITCH_API_BASE_URL + 'teams/channel', {'broadcaster_id': broadcaster_id})
result = self.__api_get_request(url, AuthType.EITHER, [])
return make_fields_datetime(result.json(), ['created_at', 'updated_at'])
def get_teams(self,
team_id: Optional[str] = None,
name: Optional[str] = None) -> dict:
"""Gets information for a specific Twitch Team.\n\n
Requires User or App authentication.
One of the two optional query parameters must be specified.
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference/#get-teams
:param str team_id: Team ID |default| :code:`None`
:param str name: Team Name |default| :code:`None`
:raises ~twitchAPI.types.UnauthorizedException: if app or user authentication is not set or invalid
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:raises ValueError: if neither team_id nor name are given or if both team_id and names are given.
:rtype: dict
"""
if team_id is None and name is None:
raise ValueError('You need to specify one of the two optional parameter.')
if team_id is not None and name is not None:
raise ValueError('Only one optional parameter must be specified.')
param = {
'id': team_id,
'name': name
}
url = build_url(TWITCH_API_BASE_URL + 'teams', param, remove_none=True, split_lists=True)
result = self.__api_get_request(url, AuthType.EITHER, [])
return make_fields_datetime(result.json(), ['created_at', 'updated_at'])
def get_users(self,
user_ids: Optional[List[str]] = None,
logins: Optional[List[str]] = None) -> dict:
"""Gets information about one or more specified Twitch users.
Users are identified by optional user IDs and/or login name.
If neither a user ID nor a login name is specified, the user is the one authenticated.\n\n
Requires App authentication if either user_ids or logins is provided, otherwise requires a User authentication.
If you have user Authentication and want to get your email info, you also need the authentication scope
:const:`twitchAPI.types.AuthScope.USER_READ_EMAIL`\n
If you provide user_ids and/or logins, the maximum combined entries should not exceed 100.
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#get-users
:param list[str] user_ids: User ID. Multiple user IDs can be specified. Limit: 100. |default| :code:`None`
:param list[str] logins: User login name. Multiple login names can be specified. Limit: 100.
|default| :code:`None`
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.UnauthorizedException: if user authentication is not set or invalid
:raises ~twitchAPI.types.MissingScopeException: if the user authentication is missing the required scope
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:raises ValueError: if more than 100 combined user_ids and logins where provided
:rtype: dict
"""
if (len(user_ids) if user_ids is not None else 0) + (len(logins) if logins is not None else 0) > 100:
raise ValueError('the total number of entries in user_ids and logins can not be more than 100')
url_params = {
'id': user_ids,
'login': logins
}
url = build_url(TWITCH_API_BASE_URL + 'users', url_params, remove_none=True, split_lists=True)
response = self.__api_get_request(url,
AuthType.USER if (user_ids is None or len(user_ids) == 0) and (
logins is None or len(logins) == 0) else AuthType.EITHER,
[])
return response.json()
def get_users_follows(self,
after: Optional[str] = None,
first: int = 20,
from_id: Optional[str] = None,
to_id: Optional[str] = None) -> dict:
"""Gets information on follow relationships between two Twitch users.
Information returned is sorted in order, most recent follow first.\n\n
Requires App authentication.\n
You have to use at least one of the following fields: from_id, to_id
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#get-users-follows
:param str after: Cursor for forward pagination |default| :code:`None`
:param int first: Maximum number of objects to return. Maximum: 100. |default| :code:`20`
:param str from_id: User ID. The request returns information about users who are being followed by
the from_id user. |default| :code:`None`
:param str to_id: User ID. The request returns information about users who are following the to_id user.
|default| :code:`None`
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.UnauthorizedException: if app authentication is not set or invalid
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:raises ValueError: if first is not in range 1 to 100 or neither from_id nor to_id is provided
:rtype: dict
"""
if first > 100 or first < 1:
raise ValueError('first must be between 1 and 100')
if from_id is None and to_id is None:
raise ValueError('at least one of from_id and to_id needs to be set')
param = {
'after': after,
'first': first,
'from_id': from_id,
'to_id': to_id
}
url = build_url(TWITCH_API_BASE_URL + 'users/follows', param, remove_none=True)
result = self.__api_get_request(url, AuthType.EITHER, [])
return make_fields_datetime(result.json(), ['followed_at'])
def update_user(self,
description: str) -> dict:
"""Updates the description of the Authenticated user.\n\n
Requires User authentication with scope :const:`twitchAPI.types.AuthScope.USER_EDIT`\n
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#update-user
:param str description: User’s account description
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.UnauthorizedException: if user | |
z-faces", fontsize=16)
>>> ax2 = fig.add_subplot(122)
>>> mesh.plot_image(phi_c, ax=ax2, normal='Y', slice_loc=0, v_type="CC")
>>> ax2.set_title("Averaged to cell centers", fontsize=16)
>>> plt.show()
Below, we show a spy plot illustrating the sparsity and mapping
of the operator
.. collapse:: Expand to show scripting for plot
>>> fig = plt.figure(figsize=(9, 9))
>>> ax1 = fig.add_subplot(111)
>>> ax1.spy(Azc, ms=1)
>>> ax1.set_title("Z-Face Index", fontsize=12, pad=5)
>>> ax1.set_ylabel("Cell Index", fontsize=12)
>>> plt.show()
"""
if self.dim < 3:
return None
if getattr(self, "_average_face_z_to_cell", None) is None:
n = self.vnC
if self.dim == 3:
self._average_face_z_to_cell = kron3(av(n[2]), speye(n[1]), speye(n[0]))
return self._average_face_z_to_cell
@property
def average_cell_to_face(self):
"""Averaging operator from cell centers to faces (scalar quantities).
This property constructs an averaging operator that maps scalar
quantities from cell centers to face. This averaging operator is
used when a discrete scalar quantity defined cell centers must be
projected to faces. Once constructed, the operator is stored
permanently as a property of the mesh. *See notes*.
Returns
-------
(n_faces, n_cells) scipy.sparse.csr_matrix
The scalar averaging operator from cell centers to faces
Notes
-----
Let :math:`\\boldsymbol{\\phi_c}` be a discrete scalar quantity that
lives at cell centers. **average_cell_to_face** constructs a discrete
linear operator :math:`\\mathbf{A_{cf}}` that projects
:math:`\\boldsymbol{\\phi_c}` to faces, i.e.:
.. math::
\\boldsymbol{\\phi_f} = \\mathbf{A_{cf}} \\, \\boldsymbol{\\phi_c}
where :math:`\\boldsymbol{\\phi_f}` approximates the value of the scalar
quantity at the faces. For each face, we are performing a weighted average
between the values at adjacent cell centers. In 1D, where adjacent cells
:math:`i` and :math:`i+1` have widths :math:`h_i` and :math:`h_{i+1}`,
:math:`\\phi` on face is approximated by:
.. math::
\\phi_{i \\! + \\! 1/2} \\approx \\frac{h_{i+1} \\phi_i + h_i \\phi_{i+1}}{h_i + h_{i+1}}
On boundary faces, nearest neighbour is used to extrapolate the value
from the nearest cell center. Once the operator is construct, the averaging
is implemented as a matrix vector product, i.e.::
phi_f = Acf @ phi_c
Examples
--------
Here we compute the values of a scalar function at cell centers. We then create
an averaging operator to approximate the function on the faces. We choose
to define a scalar function that is strongly discontinuous in some places to
demonstrate how the averaging operator will smooth out discontinuities.
We start by importing the necessary packages and defining a mesh.
>>> from discretize import TensorMesh
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> h = np.ones(40)
>>> mesh = TensorMesh([h, h], x0="CC")
Create a scalar variable at cell centers
>>> phi_c = np.zeros(mesh.nC)
>>> xy = mesh.cell_centers
>>> phi_c[(xy[:, 1] > 0)] = 25.0
>>> phi_c[(xy[:, 1] < -10.0) & (xy[:, 0] > -10.0) & (xy[:, 0] < 10.0)] = 50.0
Next, we construct the averaging operator and apply it to
the discrete scalar quantity to approximate the value at the faces.
>>> Acf = mesh.average_cell_to_face
>>> phi_f = Acf @ phi_c
Plot the results
.. collapse:: Expand to show scripting for plot
>>> fig = plt.figure(figsize=(11, 5))
>>> ax1 = fig.add_subplot(121)
>>> mesh.plot_image(phi_c, ax=ax1, v_type="CC")
>>> ax1.set_title("Variable at cell centers", fontsize=16)
>>> ax2 = fig.add_subplot(122)
>>> mesh.plot_image(phi_f, ax=ax2, v_type="F")
>>> ax2.set_title("Averaged to faces", fontsize=16)
>>> plt.show()
Below, we show a spy plot illustrating the sparsity and mapping
of the operator.
.. collapse:: Expand to show scripting for plot
>>> fig = plt.figure(figsize=(9, 9))
>>> ax1 = fig.add_subplot(111)
>>> ax1.spy(Acf, ms=1)
>>> ax1.set_title("Cell Index", fontsize=12, pad=5)
>>> ax1.set_ylabel("Face Index", fontsize=12)
>>> plt.show()
"""
if getattr(self, "_average_cell_to_face", None) is None:
if self.dim == 1:
self._average_cell_to_face = av_extrap(self.shape_cells[0])
elif self.dim == 2:
self._average_cell_to_face = sp.vstack(
(
sp.kron(
speye(self.shape_cells[1]), av_extrap(self.shape_cells[0])
),
sp.kron(
av_extrap(self.shape_cells[1]), speye(self.shape_cells[0])
),
),
format="csr",
)
elif self.dim == 3:
self._average_cell_to_face = sp.vstack(
(
kron3(
speye(self.shape_cells[2]),
speye(self.shape_cells[1]),
av_extrap(self.shape_cells[0]),
),
kron3(
speye(self.shape_cells[2]),
av_extrap(self.shape_cells[1]),
speye(self.shape_cells[0]),
),
kron3(
av_extrap(self.shape_cells[2]),
speye(self.shape_cells[1]),
speye(self.shape_cells[0]),
),
),
format="csr",
)
return self._average_cell_to_face
@property
def average_cell_vector_to_face(self):
"""Averaging operator from cell centers to faces (vector quantities).
This property constructs the averaging operator that independently maps the
Cartesian components of vector quantities from cell centers to faces.
This averaging operators is used when a discrete vector quantity defined at
cell centers must be approximated on the faces. Once constructed, the operator is
stored permanently as a property of the mesh.
Be aware that the Cartesian components of the original vector
are defined seperately at cell centers in a 1D numpy.array organized [ux, uy, uz].
Once projected to faces, the Cartesian components are defined on their respective
faces; e.g. the x-component lives on x-faces. The operation is implemented as a
matrix vector product, i.e.::
u_f = Acf @ u_c
Returns
-------
(n_faces, dim * n_cells) scipy.sparse.csr_matrix
The vector averaging operator from cell centers to faces. Since we
are averaging a vector quantity from cell centers, the second dimension
of the operator is the mesh dimension times the number of cells.
Notes
-----
Let :math:`\\mathbf{u_c}` be the discrete representation of a vector
quantity whose Cartesian components are defined separately at cell centers.
**average_cell_vector_to_face** constructs a discrete linear operator
:math:`\\mathbf{A_{cf}}` that projects each Cartesian component of
:math:`\\mathbf{u_c}` to the faces, i.e.:
.. math::
\\mathbf{u_f} = \\mathbf{A_{cf}} \\, \\mathbf{u_c}
where :math:`\\mathbf{u_f}` is the discrete vector quantity whose Cartesian
components are approximated on their respective cell faces; e.g. the x-component is
approximated on x-faces. For each face (x, y or z), we are simply taking a weighted average
between the values of the correct Cartesian component at the corresponding cell centers.
E.g. for the x-component, which is projected to x-faces, the weighted average on
a 2D mesh would be:
.. math::
u_x(i \\! + \\! 1/2, j) = \\frac{h_{i+1} u_x (i,j) + h_i u_x(i \\! + \\! 1,j)}{hx_i + hx_{i+1}}
where :math:`h_i` and :math:`h_{i+1}` represent the cell respective cell widths
in the x-direction. For boundary faces, nearest neighbor is used to extrapolate
the values.
Examples
--------
Here we compute the values of a vector function discretized to cell centers.
We then create an averaging operator to approximate the function on the faces.
We start by importing the necessary packages and defining a mesh.
>>> from discretize import TensorMesh
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> h = 0.5 * np.ones(40)
>>> mesh = TensorMesh([h, h], x0="CC")
Then we create a discrete vector at cell centers,
>>> centers = mesh.cell_centers
>>> u_x = -(centers[:, 1] / np.sqrt(np.sum(centers ** 2, axis=1))) * np.exp(
... -(centers[:, 0] ** 2 + centers[:, 1] ** 2) / 6 ** 2
... )
>>> u_y = (centers[:, 0] / np.sqrt(np.sum(centers ** 2, axis=1))) * np.exp(
... -(centers[:, 0] ** 2 + centers[:, 1] ** 2) / 6 ** 2
... )
>>> u_c = np.r_[u_x, u_y]
Next, we construct the averaging operator and apply it to
the discrete vector quantity to approximate the value on the faces.
>>> Acf = mesh.average_cell_vector_to_face
>>> u_f = Acf @ u_c
And plot the results
.. collapse:: Expand to show scripting for plot
>>> fig = plt.figure(figsize=(11, 5))
>>> ax1 = fig.add_subplot(121)
>>> mesh.plot_image(u_c, ax=ax1, v_type="CCv", view='vec')
>>> ax1.set_title("Variable at faces", fontsize=16)
>>> ax2 = fig.add_subplot(122)
>>> mesh.plot_image(u_f, ax=ax2, v_type="F", view='vec')
>>> ax2.set_title("Averaged to cell centers", fontsize=16)
>>> plt.show()
Below, we show a spy plot illustrating the sparsity and mapping
of the operator
.. collapse:: Expand to show scripting for plot
>>> fig = plt.figure(figsize=(9, 9))
>>> ax1 = fig.add_subplot(111)
>>> ax1.spy(Acf, ms=1)
>>> ax1.set_title("Cell Vector Index", fontsize=12, pad=5)
>>> ax1.set_ylabel("Face Index", fontsize=12)
>>> plt.show()
"""
if getattr(self, "_average_cell_vector_to_face", None) is None:
if self.dim == 1:
self._average_cell_vector_to_face = self.aveCC2F
elif self.dim == 2:
aveCCV2Fx = sp.kron(
speye(self.shape_cells[1]), av_extrap(self.shape_cells[0])
)
aveCC2VFy = sp.kron(
av_extrap(self.shape_cells[1]), speye(self.shape_cells[0])
)
self._average_cell_vector_to_face = sp.block_diag(
(aveCCV2Fx, aveCC2VFy), format="csr"
)
elif self.dim == 3:
aveCCV2Fx = kron3(
speye(self.shape_cells[2]),
speye(self.shape_cells[1]),
av_extrap(self.shape_cells[0]),
)
aveCC2VFy = kron3(
speye(self.shape_cells[2]),
av_extrap(self.shape_cells[1]),
speye(self.shape_cells[0]),
)
aveCC2BFz = kron3(
av_extrap(self.shape_cells[2]),
speye(self.shape_cells[1]),
speye(self.shape_cells[0]),
)
self._average_cell_vector_to_face = sp.block_diag(
(aveCCV2Fx, aveCC2VFy, aveCC2BFz), format="csr"
)
return self._average_cell_vector_to_face
@property
def average_cell_to_edge(self):
"""Averaging operator from cell centers to edges (scalar quantities).
This | |
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 15 21:56:08 2020
@author: <NAME>
"""
# STEP1----------------- # Importing the libraries------------
#-------------------------------------------------------------
import os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import glob
import scipy.signal as ss
import csv
import sklearn
from quilt.data.ResidentMario import missingno_data
import missingno as msno
import seaborn as sns
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler # for preprocessing the data
from sklearn.ensemble import RandomForestClassifier # Random forest classifier
from sklearn.tree import DecisionTreeClassifier # for Decision Tree classifier
from sklearn.svm import SVC # for SVM classification
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split # to split the data
from sklearn.model_selection import KFold # For cross vbalidation
from sklearn.model_selection import GridSearchCV # for tunnig hyper parameter it will use all combination of given parameters
from sklearn.model_selection import RandomizedSearchCV # same for tunning hyper parameter but will use random combinations of parameters
from sklearn.metrics import confusion_matrix,recall_score,precision_recall_curve,auc,roc_curve,roc_auc_score,classification_report
# STEP2------------------# Importing the DATASET ------------
#------------------------------------------------------------
# Loading data from the iMotions the path to csv file directory
os.chdir("\\ML4TakeOver\\Data\\RawData")
directory = os.getcwd()
#dataFrame_takeover_feature = pd.read_csv('takeover_cleaned_feature4ML.csv', index_col=[0])
dataFrame_takeover_feature = pd.read_csv('takeover4ML.csv', index_col=[0])
dataset = dataFrame_takeover_feature
chunk_users = ['015_M3', '015_m2', '015_M1', '014_M3', #Select a handful of ppl for saving resource
'014_M2', '014_m1']
chunk_dataset = dataset[dataset['Name'].isin(chunk_users)]
dataset = chunk_dataset
dataset.shape
###### ======================================Encoding notes=======================================
# Alarm Type: TA =2, NoA =1, FA = 0 , Z = 3
# TakeOver : TK =1 , NTK= 0
# Alarm : 339.0 =339.0, 103.0= 4, 332.0=14, 259.0=11, 16.0=2, 178.0=6, 284.0=12,
# 213.0=9, 323.0=13, 185.0=7, 84.0=3, 137.0=5, 5.0=1, 191.0=8, 254.0=10
# Mode : +1 (Auto)= +1, -1(Manual)= 0
##### ===========================================================================================
dt_tmp = dataset
dt_tmp['Takeover'] = dt_tmp.Takeover.astype('category')
# Number of "NOT-TAKEOVER" per alarm type
dataset[dataset.Takeover == 'NTK']['Coming_AlarmType'].value_counts()
# Number of "TAKEOVER" per alarm type
dataset[dataset.Takeover == 'TK']['Coming_AlarmType'].value_counts()
## STEP3========================= Eploring the data, mainly the Label (Takeover) ====================
## ===================================================================================================
# let's check the "Takeover" distributions
sns.countplot("Takeover",data=dataset)
# Let's check the Percentage for "TakeOver"
Count_NoTakeOver = len(dataset[dataset["Takeover"]== 0 ]) # Non-TakeOver are repersented by 0
Count_TakeOver = len(dataset[dataset["Takeover"]== 1 ]) # TakeOver by 1
Percentage_of_NoTakeOver = Count_NoTakeOver/(Count_NoTakeOver+Count_TakeOver)
print("percentage of None-TakeOver, 0 = ",Percentage_of_NoTakeOver*100)
Percentage_of_TakeOver= Count_TakeOver/(Count_NoTakeOver+Count_TakeOver)
print("percentage of TakeOver, 1 = ",Percentage_of_TakeOver*100)
# the amount related to valid "TakeOver" and "None-Takeover"
Amount_TakeOver = dataset[dataset["Takeover"]== 1]
Amount_NoTakeOver = dataset[dataset["Takeover"]== 0]
plt.figure(figsize=(10,6))
plt.subplot(121)
Amount_TakeOver.plot.hist(title="TakeOver", legend =None)
plt.subplot(122)
Amount_NoTakeOver.plot.hist(title="No-Takeover",legend =None)
# Pandas offers us out-of-the-box three various correlation coefficients 1) Pearson's 2) Spearman rank 3) Kendall Tau
pearson = dataset.corr(method='pearson')
# assume target attr is the "Takeover or -3", then remove corr with itself
corr_with_target = pearson.iloc[-3][:]
# attributes sorted from the most predictive
predictivity = corr_with_target.sort_values(ascending=False)
## STEP4=========================-# Prepration for Machine Learning algorithms=========================
## ====================================================================================================
# Drop useless features for ML
dataset = dataset.drop(['Timestamp','index','ID', 'Name', 'EventSource', 'ManualGear','EventW','EventN','GazeDirectionLeftY','Alarm',
'GazeDirectionLeftX', 'GazeDirectionRightX', 'GazeDirectionRightY','CurrentBrake',
'PassBy','RangeN'], axis=1) #ManualGear has only "one" value
#EventW is pretty similar to EventN
dataset.shape
#---------------------------------------------------------
# convert categorical value to the number
# convert datatype of object to int and strings
dataset['LeftLaneType'] = dataset.LeftLaneType.astype(object)
dataset['RightLaneType'] = dataset.RightLaneType.astype(object)
dataset['TOT_Class'] = dataset.TOT_Class.astype(object)
dataset['Coming_Alarm'] = dataset.Coming_Alarm.astype(object)
dataset['Takeover'] = dataset.Takeover.astype(object)
dataset['Coming_AlarmType'] = dataset.Coming_AlarmType.astype(object)
dataset['NDTask'] = dataset.NDTask.astype(object)
#****** Drop features that happing after Alarm (anything after alarm interupt takeover prediction)****************
dataset = dataset.drop(['Mode','TOT_Class', 'AlarmDuration','Coming_Alarm','ReactionTime','Coming_AlarmType'], axis=1) # Coming Alarm maybe helpful for ReactionTime
# ------------------------------------------------------.
# takeover (NT, TK) is our target
input_data = dataset.iloc[:, dataset.columns != 'Takeover']
X = input_data
y = dataset[['Takeover']].values.ravel()
# ======================================= Encoding Categorical variables =========================
# # Encoding categorical variables
from sklearn.preprocessing import StandardScaler,LabelEncoder, OneHotEncoder
from sklearn.compose import ColumnTransformer, make_column_transformer #labelencoder class takes cat. var. and assign value to them
# List of all Categorical features
Cat_Features= ['LeftLaneType','RightLaneType','NDTask']
# Get the column index of the categorical features
categorical_features = []
for i in Cat_Features:
position = dataset.columns.get_loc(i)
categorical_features.append(position)
print(categorical_features)
# Get the column index of the Contin. features
conti_features = []
Cont_Filter = dataset.dtypes!=object
Cont_Filter = dataset.columns.where(Cont_Filter).tolist()
Cont_Filter_Cleaned = [name for name in Cont_Filter if str(name) !='nan']
for i in Cont_Filter_Cleaned:
position = dataset.columns.get_loc(i)
conti_features.append(position)
print(conti_features)
# How many columns will be needed for each categorical feature?
print(dataset[Cat_Features].nunique(),
'There are',"--",sum(dataset[Cat_Features].nunique().loc[:]),"--",'groups in the whole dataset')
# ===============================Create pipeline for data transformatin (normalize numeric, and hot encoder categorical)
# =============================================================================
from sklearn.pipeline import make_pipeline
numeric = make_pipeline(
StandardScaler())
categorical = make_pipeline(
# handles categorical features
# sparse = False output an array not sparse matrix
OneHotEncoder(sparse=False)) # Automatically take care of Dummy Trap
# creates a simple preprocessing pipeline (that will be combined in a full prediction pipeline below)
# to scale the numerical features and one-hot encode the categorical features.
preprocess = make_column_transformer((numeric, Cont_Filter_Cleaned),
(categorical, ['LeftLaneType','RightLaneType','Coming_AlarmType','NDTask']),
remainder='passthrough')
# =============================================================================
# Taking care of splitting
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size = 0.20, random_state = 42)
# apply preprocess step (normalize the numeric value and one hot encoding for the categorical)
preprocess.fit_transform(X_train)
# =============================================================================
#SVM is usually optimized using two parameters gamma,C .
# Set the parameters by cross-validation
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}] # C: the Cost parameter, Gamma: Control Bias and variance
# A High value of Gamma leads to more accuracy but biased results and vice-versa.
# Similarly, a large value of Cost parameter (C) indicates poor accuracy but low bias and vice-versa.
tuned_parameters2 = [{'kernel': ['linear'], 'C': [1, 100]}]
model = make_pipeline(
preprocess,
SVC())
##### Try Simple Version ##############
from sklearn import svm
clf = svm.SVC()
X_train = preprocess.fit_transform(X_train)
grid_result = clf.fit(X_train, y_train)
X_test = preprocess.fit_transform(X_test)
clf.predict(X_test)
## we should try this in near future: https://machinelearningmastery.com/multi-class-classification-tutorial-keras-deep-learning-library/
##############
############################
##########################################
########################################################
######################################################################
# the GridSearchCV object with pipeline and the parameter space with 5 folds cross validation.
scores = ['precision', 'recall']
best_params = []
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(
SVC(), tuned_parameters2, scoring='%s_macro' % score
)
X_train = preprocess.fit_transform(X_train)
grid_result = clf.fit(X_train, y_train)
best_params.append(grid_result.best_params_)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
means = clf.cv_results_['mean_test_score']
stds = clf.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, clf.cv_results_['params']):
print("%0.3f (+/-%0.03f) for %r"
% (mean, std * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
X_test = preprocess.fit_transform(X_test)
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
# =============================================================================
# ================= Resampling the imbalanced Label of "TakeOver" ========================================
#==========================================================================================================
# We create the preprocessing pipelines for both numeric and categorical data.
from sklearn.pipeline import Pipeline
from sklearn.utils import resample
numeric_features = Cont_Filter_Cleaned
numeric_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='median')),
('scaler', StandardScaler())])
categorical_features = ['LeftLaneType','RightLaneType','Coming_AlarmType','NDTask']
categorical_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='constant', fill_value='missing')),
('onehot', OneHotEncoder(handle_unknown='ignore'))])
preprocessor = ColumnTransformer(
transformers=[
('num', numeric_transformer, numeric_features),
('cat', categorical_transformer, categorical_features)])
# Append classifier to preprocessing pipeline.
# Separate input features and target
y = dataset.Takeover
X = dataset.drop('Takeover', axis=1)
# setting up testing and training sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=27)
# concatenate our training data back together
X = pd.concat([X_train, y_train], axis=1)
# separate minority and majority classes
take_over = X[X.Takeover=='TK']
not_takeover = X[X.Takeover=='NTK']
# upsample minority
not_takeover_upsampled = resample(not_takeover,
replace=True, # sample with replacement
n_samples=len(take_over), # match number in majority class
random_state=27) # reproducible results
# combine majority and upsampled minority
upsampled = pd.concat([take_over, not_takeover_upsampled])
# check new class counts
upsampled.Takeover.value_counts() #713585
# trying logistic regression again with the balanced dataset
y_train = upsampled.Takeover
X_train = upsampled.drop('Takeover', axis=1)
##### LOGISTIC REGRESSION ###############################
#########################################################
# Now we have a full prediction pipeline.
clf = Pipeline(steps=[('preprocessor', preprocessor),
('classifier', LogisticRegression())])
y_score = clf.fit(X_train, y_train)
print("model score: %.3f" % clf.score(X_test, y_test)) # model score: 0.846
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
##### DECISION TREE ##################################
#########################################################
from sklearn.tree import DecisionTreeClassifier
clf_3 = Pipeline(steps=[('preprocessor', preprocessor),
('reduce_dim', PCA()),
('clf', DecisionTreeClassifier(random_state=0))])
y_score = clf_3.fit(X_train, y_train)
print("model score: %.3f" % clf_3.score(X_test, y_test)) # model score: 0.99
y_true_3, y_pred_3 = y_test, clf_3.predict(X_test)
print(classification_report(y_true_3, y_pred_3))
##### RANDOM FOREST ##################################
#########################################################
clf_2 = Pipeline(steps=[('preprocessor', preprocessor),
('reduce_dim', PCA()),
('clf',RandomForestClassifier(max_depth=2, random_state=0))])
y_score = clf_2.fit(X_train, y_train)
print("model score: %.3f" % clf_2.score(X_test, y_test)) # model score: 0.830
y_true_2, y_pred_2 = y_test, clf_2.predict(X_test)
print(classification_report(y_true_2, y_pred_2))
##### Regularized Greedy Forest (RGF) ##################################
############################################################################
from sklearn.utils.validation import check_random_state
from sklearn.model_selection import StratifiedKFold, cross_val_score
from sklearn.ensemble import GradientBoostingClassifier
from rgf.sklearn import RGFClassifier
y_upsampled = upsampled.Takeover
X_upsampled = upsampled.drop('Takeover', axis=1)
clf_5 = Pipeline(steps=[('preprocessor', preprocessor),
('reduce_dim', PCA()),
('classifier', RGFClassifier(max_leaf=400,
algorithm="RGF_Sib",
test_interval=100,
verbose=True))])
n_folds = 5
rgf_scores = cross_val_score(clf_5,
X_upsampled,
y_upsampled,
cv=StratifiedKFold(n_folds))
rgf_score = sum(rgf_scores)/n_folds
print('RGF Classifier score: {0:.5f}'.format(rgf_score)) #RGF Classifier score: 0.92304
XGBClassifier(class_weight='balanced')
##### Gradiaent Boosting #############################################
############################################################################
from sklearn.ensemble import GradientBoostingClassifier
clf_gb = Pipeline(steps=[('preprocessor', preprocessor),
('reduce_dim', PCA()),
('classifier', GradientBoostingClassifier(n_estimators=20,
learning_rate=0.01,
subsample=0.6,
random_state=127))])
gb_scores = cross_val_score(clf_gb,
X_upsampled,
y_upsampled,
scoring="f1_weighted",
cv=StratifiedKFold(n_folds))
gb_score = sum(gb_scores)/n_folds
print('Gradient Boosting Classifier score: {0:.5f}'.format(gb_score)) #score: 0.79832
print('>> Mean CV score is: ', round(np.mean(gb_scores),3))
pltt = sns.distplot(pd.Series(gb_scores,name='CV scores distribution(Gradiaent Boosting)'), color='r')
##### ADA Boost #########################################################
###########################################################################
from sklearn.ensemble import AdaBoostClassifier
clf_4 = Pipeline(steps=[('preprocessor', preprocessor),
('reduce_dim', PCA()),
('classifier', AdaBoostClassifier(n_estimators=100, random_state=0))])
y_score = clf_4.fit(X_train, y_train)
print("model score: %.3f" % clf_4.score(X_test, y_test)) # model score: 0.887
y_true_4, y_pred_4 = y_test, clf_4.predict(X_test)
print(classification_report(y_true_4, y_pred_4))
##### GAUSSIAN PROCESS #################################
#########################################################
from | |
"""
Wrapper to get species from a summary dictionary and split them
into train/val/test using the scaffold split in ChemProp.
"""
import csv
import os
import numpy as np
import json
import shutil
import argparse
from rdkit import Chem
from tqdm import tqdm
from nff.utils import bash_command, parse_args, fprint, prop_split
def apply_transfs(props, summary_dic):
"""
Apply transformation to quantities in the dataset. For example,
if a requested property is log_<actual property>, then create
this requested property by taking logs in the dataset.
Args:
props (list[str]): list of property names that you want to predict
summary_dic (dict): dictionary of the form {smiles: sub_dic},
where `sub_dic` is a dictionary with all the species properties
apart from its conformers.
Returns:
None
"""
for prop in props:
prop_present = any([prop in sub_dic for sub_dic
in summary_dic.values()])
if prop_present:
continue
if prop.startswith("log_"):
base_prop = prop.split("log_")[-1]
def transf(x): return np.log(x)
else:
raise Exception((f"{prop} is not in the summary "
"dictionary and doesn't have a prefix "
"corresponding to a known transformation, "
"such as log."))
base_present = any([base_prop in sub_dic for sub_dic
in summary_dic.values()])
if not base_present:
raise Exception((f"{base_prop} is not in the summary "
"dictionary."))
# update summary dictionary with transformed keys
for smiles, sub_dic in summary_dic.items():
if base_prop in sub_dic:
sub_dic.update({prop: transf(sub_dic[base_prop])})
def to_csv(summary_dic,
props,
csv_file):
"""
Write the SMILES and properties in the summary dictionary
to a csv file.
Args:
summary_dic (dict): dictionary of the form {smiles: sub_dic},
where `sub_dic` is a dictionary with all the species properties
apart from its conformers.
props (list[str]): list of property names that you want to predict
csv_file (str): path to csv file that you want to write to
Returns:
None
"""
columns = ['smiles'] + props
dict_data = []
for smiles, sub_dic in summary_dic.items():
dic = {}
for prop in props:
if prop.startswith("log_"):
base_prop = prop.split("log_")[-1]
if base_prop in sub_dic:
dic[prop] = np.log(sub_dic[base_prop])
dic = {prop: sub_dic[prop] for prop in props}
dic["smiles"] = smiles
dict_data.append(dic)
with open(csv_file, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=columns)
writer.writeheader()
for data in dict_data:
writer.writerow(data)
def filter_prop_and_pickle(sample_dic, props):
"""
Filter the SMILES strings to exclude those that don't have a known value
of all `props`, or do not have a known path to a pickle file with conformer
information.
Args:
sample_dic (dict): Sample of `summary_dic` that will be used in this dataset
props (list[str]): list of property names that you want to predict
Returns:
sample_dic (dict): Updated `sample_dic` with the above filters applied.
"""
smiles_list = [key for key, sub_dic in sample_dic.items()
if all([prop in sub_dic for prop in props])
and sub_dic.get("pickle_path") is not None]
sample_dic = {key: sample_dic[key] for key in smiles_list}
return sample_dic
def filter_atoms(sample_dic, max_atoms):
"""
Filter the SMILES strings to exclude those whose atom count is above
`max_atoms`.
Args:
sample_dic (dict): Sample of `summary_dic` that will be used in this dataset
max_atoms (int): Maximum number of atoms allowed in a species
Returns:
sample_dic (dict): Updated `sample_dic` with the above filter applied.
"""
# if `max_atoms` is unspecified then the default is no limit
if max_atoms is None:
max_atoms = float("inf")
smiles_list = list(sample_dic.keys())
good_smiles = []
for smiles in tqdm(smiles_list):
mol = Chem.MolFromSmiles(smiles)
mol = Chem.AddHs(mol)
num_atoms = mol.GetNumAtoms()
if num_atoms <= max_atoms:
good_smiles.append(smiles)
sample_dic = {smiles: sample_dic[smiles] for
smiles in good_smiles}
return sample_dic
def subsample(summary_dic,
props,
max_specs,
max_atoms,
dataset_type,
seed):
"""
Reduce the number of species according to `props`, `max_specs`,
and `max_atoms`.
Args:
summary_dic (dict): dictionary of the form {smiles: sub_dic},
where `sub_dic` is a dictionary with all the species properties
apart from its conformers.
props (list[str]): list of property names that you want to predict
max_specs (int): maximum number of species allowed in dataset
max_atoms (int): Maximum number of atoms allowed in a species
dataset_type (str): type of problem, e.g. "classification" or
"regression".
seed (int): random seed for split
Returns:
sample_dic (dict): Updated `sample_dic` with the above filter applied.
"""
# filter to only include species with the requested props
sample_dic = filter_prop_and_pickle(summary_dic, props)
# filter to only include species with less than `max_atoms` atoms
sample_dic = filter_atoms(sample_dic, max_atoms)
# If you set a limit for `max_specs` and are doing classification,
# try to keep as many of the underrepresented class as possible.
# If you set a limit but aren't doing classification, select them
# randomly.
keep_smiles = prop_split(max_specs=max_specs,
dataset_type=dataset_type,
props=props,
sample_dic=sample_dic,
seed=seed)
sample_dic = {smiles: sample_dic[smiles] for smiles in keep_smiles}
return sample_dic
def make_split(summary_path,
csv_folder,
cp_folder,
props,
split_sizes,
split_type,
max_specs,
max_atoms,
dataset_type,
seed):
"""
Split the species into train, test, and validation sets.
Args:
summary_path (str): path to the JSON file that summarizes
all of the information about the species, apart from their
conformers.
csv_folder (str): path to the folder in which we will save our
csv files with the SMILES, properties and training splits.
cp_folder (str): path to the ChemProp folder on your computer
props (list[str]): list of property names that you want to predict
split_sizes (list[float]): list of the form [train_split_size, val_split_size,
test_split_size].
split_type (str): how to split the data. Options can be found in the Chemprop
script `split_data.py`. A good choice is usually `scaffold_balanced`, which splits
in such a way that similar scaffolds are in the same split.
max_specs (int): maximum number of species allowed in dataset
max_atoms (int): Maximum number of atoms allowed in a species
dataset_type (str): type of problem, e.g. "classification" or
"regression".
seed (int): random seed for split
Returns:
None
"""
with open(summary_path, "r") as f:
summary_dic = json.load(f)
# apply any transformations to the data, e.g. wanting a
# dataset that has the log of a value instead of the
# value itself
apply_transfs(props, summary_dic)
# filter based on props, max species and max number of atoms
summary_dic = subsample(summary_dic=summary_dic,
props=props,
max_specs=max_specs,
max_atoms=max_atoms,
dataset_type=dataset_type,
seed=seed)
# path csv file with SMILES and properties
all_csv = os.path.join(csv_folder, "all.csv")
if not os.path.isdir(csv_folder):
os.makedirs(csv_folder)
# write the contents of `summary_dic` to the csv
to_csv(summary_dic, props, all_csv)
# run the chemprop script `split_data.py` to make the splits
# from `all.csv`
script = os.path.join(cp_folder, "scripts", "split_data.py")
split_str = " ".join(np.array(split_sizes).astype("str"))
cmd = (f"source activate chemprop && "
f"python {script} --split_type {split_type} "
f"--split_sizes {split_str} "
f"--data_path {all_csv} "
f"--save_dir {csv_folder} "
f"--seed {seed}")
p = bash_command(cmd)
p.wait()
def add_just_smiles(csv_folder):
"""
Take csv files with SMILES + properties and use them to crea files
with just the SMILES strings.
Args:
csv_folder (str): path to the folder in which we will save oru
csv files with the SMILES, properties and training splits.
Returns:
None
"""
for name in ['train', 'val', 'test']:
path = os.path.join(csv_folder, name + '.csv')
smiles_list = []
with open(path) as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
for i, row in enumerate(readCSV):
if i == 0:
continue
smiles_list.append(row[0])
# save to "train_smiles.csv", "val_smiles.csv", etc.
smiles_path = os.path.join(csv_folder, f"{name}_smiles.csv")
columns = ["smiles"]
dict_data = [{"smiles": smiles} for smiles in smiles_list]
with open(smiles_path, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=columns)
writer.writeheader()
for data in dict_data:
writer.writerow(data)
def rename_csvs(csv_folder):
"""
Rename the csvs saved by the chemprop split function to distinguish
between what is just SMILES and what is SMILES + properties.
Args:
csv_folder (str): path to the folder in which we will save oru
csv files with the SMILES, properties and training splits.
Returns:
None
"""
for name in ['train', 'val', 'test']:
path = os.path.join(csv_folder, name + '.csv')
# "train_full.csv", "val_full.csv", etc.
new_path = os.path.join(csv_folder, name + "_full.csv")
shutil.move(path, new_path)
def summarize(csv_folder, dataset_type):
"""
Summarize where the splits have been saved and what their contents are.
Args:
csv_folder (str): path to the folder in which we will save oru
csv files with the SMILES, properties and training splits.
dataset_type (str): type of problem, e.g. "classification" or
"regression".
Returns:
None
"""
msgs = []
for name in ['train', 'val', 'test', 'all']:
if name == 'all':
path = os.path.join(csv_folder, f"{name}.csv")
else:
path = os.path.join(csv_folder, f"{name}_full.csv")
with open(path, "r") as f:
lines = f.readlines()[1:]
num_specs = len(lines)
this_msg = f"{num_specs} species"
if dataset_type == | |
import subprocess as sp
import os
import speech_recognition as sr
import webbrowser as wb
r = sr.Recognizer()
while True:
os.system("tput setaf 10")
print("\t\tWELCOME TO VOICE CONTROLLED AUTOMATION MENU")
print("\t\t--------------------------------")
print()
print()
print("which services would you like to automate:")
print("""
01. Linux
02. AWS
03. Hadoop
04. Docker
05. Exit
""")
input("Press enter....")
with r.Microphone() as source:
print("We are listening to you.......")
audio = r.listen(source)
print("done...")
record = r.recognize_google(audio)
print(record)
# linux frontend
if ('linux' in record) or ('Linux' in record):
print("""
01. Create a directory
02. List files in current directory
03. Create an empty file
04. Create a file and open for editing
05. Add a new user
06. Set/change password
07. Show current RAM usage
08. Show the disk usage
09. Check if package is present
10. Remove a package
11. Lists the network configuration
12. Checks the java running services
13. List the CPU info
14. Show currenty running processes
15. Show running time of device
16. Clear the cache
17. Check which package provide the command
18. Checks the connectivity to IP
19. Create a user for running a specific command
00. Go to previous menu
""")
print("What next?")
with r.Microphone() as source:
print("listening....")
audio = r.listen(source)
print("done..")
ch = r.recognize_google(audio)
print(ch)
if ('directory' in ch):
print("Name of directory")
with r.Microphone() as source:
print("listening....")
audio = r.listen(source)
print("done..")
z = r.recognize_google(audio)
str = "mkdir " + z
os.system(str)
elif ('list' in ch) or ('show all' in ch):
os.system("ls")
elif ('create' or 'make' in ch) and ('empty' or 'blank' in ch) and ('file' in ch):
print("Give the name of file")
with r.Microphone() as source:
print("listening....")
audio = r.listen(source)
print("done..")
z = r.recognize_google(audio)
#wb.open('ip:80/cgi-bin/backend.py?x=linux&y=3&z={}'.format(z))
str = "touch "+ z
os.system(str)
elif ('create' in ch) and ('file' in ch):
print("Give the name of file")
with r.Microphone() as source:
print("listening....")
audio = r.listen(source)
print("done..")
z = r.recognize_google(audio)
#wb.open('ip:80/cgi-bin/backend.py?x=linux&y=4&z={}'.format(z))
str = "cat " + z
os.system(str)
elif ('add' or 'make' in ch) and ('user' in ch):
print("Give the name of user")
with r.Microphone() as source:
print("listening....")
audio = r.listen(source)
print("done..")
z = r.recognize_google(audio)
#wb.open('ip:80/cgi-bin/backend.py?x=linux&y=5&z={}'.format(z))
str = "useradd " + z
sp.getoutput(str)
elif ('set' or 'make' in ch) and ('password' or 'passcode' in ch):
print("Give the password")
with r.Microphone() as source:
print("listening....")
audio = r.listen(source)
print("done..")
z = r.recognize_google(audio)
#wb.open('ip:80/cgi-bin/backend.py?x=linux&y=6&z={}'.format(z))
str = "passwd " + z
os.system(str)
elif ('show' or 'what' in ch) and ('ram' in ch) and ('usage' or 'utilization' in ch):
#wb.open('ip:80/cgi-bin/backend.py?x=linux&y=7')
os.system("free -m")
elif ('show' or 'what' in ch) and ('disk' in ch) and ('usage' or 'utilization' in ch):
#wb.open('ip:80/cgi-bin/backend.py?x=linux&y=8')
os.system("df -hT")
elif ('package' or 'software' in ch) and ('present' in ch):
print("give name of package")
with r.Microphone() as source:
print("listening....")
audio = r.listen(source)
print("done..")
z = r.recognize_google(audio)
#wb.open('ip:80/cgi-bin/backend.py?x=linux&y=9&z={}'.format(z))
os.system("rpm -q {}".format(z))
elif ('package' or 'software' in ch) and ('delete' in ch):
print("give name of package")
with r.Microphone() as source:
print("listening....")
audio = r.listen(source)
print("done..")
z = r.recognize_google(audio)
#wb.open('ip:80/cgi-bin/backend.py?x=linux&y=10&z={}'.format(z))
os.system("rpm -e {}".format(z))
elif ("ip" or "network configuration" in ch):
#wb.open('ip:80/cgi-bin/backend.py?x=linux&y=11')
os.system("ifconfig")
elif ('java' in ch) and ('processes' or 'process' in ch):
#wb.open('ip:80/cgi-bin/backend.py?x=linux&y=12')
os.system("jps")
elif ('cpu' or 'CPU' in ch) and ('information' or 'info' or 'data' in ch):
#wb.open('ip:80/cgi-bin/backend.py?x=linux&y=13')
os.system("lscpu")
elif ('running' in ch) and ('processes' or 'process' in ch):
#wb.open('ip:80/cgi-bin/backend.py?x=linux&y=14')
os.system("ps -aux")
elif ('running' in ch) and ('time' in ch):
#wb.open('ip:80/cgi-bin/backend.py?x=linux&y=15')
os.system("uptime")
elif ('clear' or 'clean') and ('chache' or 'chaches' in ch):
#wb.open('ip:80/cgi-bin/backend.py?x=linux&y=16')
os.system("echo 3 > /proc/sys/vm/drop_caches")
elif ('which' or 'what' in ch) and ('package' or 'software' in ch) and ('provides' in ch):
print("give name of package")
with r.Microphone() as source:
print("listening....")
audio = r.listen(source)
print("done..")
z = r.recognize_google(audio)
#wb.open('ip:80/cgi-bin/backend.py?x=linux&y=17&z={}'.format(z))
os.system("yum whatprovides {}".format(z))
elif ('ping' in ch) or ('check connectivity' in ch):
print("give the ip of system")
with r.Microphone() as source:
print("listening....")
audio = r.listen(source)
print("done..")
z = r.recognize_google(audio)
#wb.open('ip:80/cgi-bin/backend.py?x=linux&y=18&z={}'.format(z))
os.system("ping {}".format(z))
elif ('create user' in ch) and ('command' in ch):
print("give the name of user")
with r.Microphone() as source:
print("listening....")
audio = r.listen(source)
print("done..")
z = r.recognize_google(audio)
print("give the command to be run")
with r.Microphone() as source:
print("listening....")
audio1 = r.listen(source)
print("done..")
a = r.recognize_google(audio1)
#wb.open('ip:80/cgi-bin/backend.py?x=linux&y=19&z={}&a={}'.format(z,a))
os.system("useradd -s {} {}".format(z,a))
elif ('go back' or 'previous' in ch):
#wb.open('ip:80/cgi-bin/backend.py?x=linux&y=20')
continue
else:
print("ERROR: invalid search")
# docker frontend
elif ("docker" in record):
print("""
01. Checks the docker version
02. Launching a container
03. Pull image from dockerhub
04. List running docker containers
05. List all docker containers
06. List all images present in system
07. Starting a docker container
08. Stopping a docker container
09. Deleting a docker container
10. Stop all docker containers
11. Delete all docker containers
00. Go to previous menu
""")
print("What next?")
with r.Microphone() as source:
print("listening....")
audio = r.listen(source)
print("done..")
ch = r.recognize_google(audio)
print(ch)
if ('docker version' or 'version' in ch):
#wb.open('ip:80/cgi-bin/backend.py?x=docker&y=1')
os.system("docker --version")
elif ('container' in ch) and ('launch' in ch):
print('Give the name of the container')
with r.Microphone() as source:
print("listening....")
audio = r.listen(source)
print("done..")
z = r.recognize_google(audio)
print('Give the name of the image to be used')
with r.Microphone() as source:
print("listening....")
audio = r.listen(source)
print("done..")
a = r.recognize_google(audio)
#wb.open('ip:80/cgi-bin/backend.py?x=docker&y=2&z={}&a={}'.format(z,a))
os.system("docker run -it --name {} {}".format(z,a))
elif ('image' in ch) and ('pull' in ch):
print('Give the name of the image')
with r.Microphone() as source:
print("listening....")
audio = r.listen(source)
print("done..")
z = r.recognize_google(audio)
#wb.open('ip:80/cgi-bin/backend.py?x=docker&y=3&z={}'.format(z))
os.system("docker pull {}".format(z))
elif ('list' in ch) and ('running' in ch) and ('containers' in ch):
#wb.open('ip:80/cgi-bin/backend.py?x=docker&y=4')
os.system("docker ps")
elif ('list' and 'all' in ch) and ('containers' in ch):
#wb.open('ip:80/cgi-bin/backend.py?x=docker&y=5')
os.system("docker ps -a")
elif ('list' in ch) and ('images' in ch):
#wb.open('ip:80/cgi-bin/backend.py?x=docker&y=6')
os.system("docker images")
elif ('container' in ch) and ('start' in ch):
print('Give the name/id of the container')
with r.Microphone() as source:
print("listening....")
audio = r.listen(source)
print("done..")
z = r.recognize_google(audio)
#wb.open('ip:80/cgi-bin/backend.py?x=docker&y=7&z={}'.format(z))
os.system("docker start {}".format(z))
elif ('container' in ch) and ('stop' in ch):
print('Give the name/id of the container')
with r.Microphone() as source:
print("listening....")
audio = r.listen(source)
print("done..")
z = r.recognize_google(audio)
#wb.open('ip:80/cgi-bin/backend.py?x=docker&y=8&z={}'.format(z))
os.system("docker stop {}".format(z))
elif ('container' in ch) and ('delete' in ch):
print('Give the name/id of the container')
with r.Microphone() as source:
print("listening....")
audio = r.listen(source)
print("done..")
z = r.recognize_google(audio)
#wb.open('ip:80/cgi-bin/backend.py?x=docker&y=9&z={}'.format(z))
os.system("docker rm {}".format(z))
elif ('container' in ch) and ('stop' in ch) and ('all' in ch):
#wb.open('ip:80/cgi-bin/backend.py?x=docker&y=11')
os.system("docker container rm $(docker container ls –aq)")
elif ('container' in ch) and ('delete' in ch) and ('all' in ch):
#wb.open('ip:80/cgi-bin/backend.py?x=docker&y=10')
os.system("docker container stop $(docker container ls –aq) && docker system prune –af ––volumes")
elif ('go' or 'show' in ch) and ('back' or 'previous' in ch):
#wb.open('ip:80/cgi-bin/backend.py?x=docker&y=12')
continue
else:
print("ERROR: Couldn't understand the command")
# hadoop fronend
elif ("hadoop" in record):
print("""
01. Checks the hadoop version
02. Format a hadoop namenode
03. Start the namenode service
04. Start the datanode service
05. Show the hadoop report in namenode
06. List all files present in hadoop filesystem
07. Upload the file to hadoop filesystem
08. Remove the file from hadoop filesystem
09. List the contents of a file in hdfs
10. Upload the file with a defined block size
11. Create an empty file in hadoop filesystem
00. Go to previous menu
""")
print("What next?")
with r.Microphone() as source:
print("listening....")
audio = r.listen(source)
print("done..")
ch = r.recognize_google(audio)
print(ch)
if ("show" in ch) and ("hadoop" in ch) and ("version" in ch):
#wb.open("ip:80/cgi-bin/backend.py?x=hadoop&y=1")
os.system("hadoop version")
elif ("namenode" in ch) and ("format" in ch):
#wb.open("ip:80/cgi-bin/backend.py?x=hadoop&y=2")
os.system("hadoop namenode -format")
elif ("start" in ch) and ("namenode" in ch):
#wb.open("ip:80/cgi-bin/backend.py?x=hadoop&y=3")
os.system("hadoop-daemon.sh start namenode")
elif ("start" in ch) and ("datanode" in ch):
#wb.open("ip:80/cgi-bin/backend.py?x=hadoop&y=4")
os.system("hadoop-daemon.sh start datanode")
elif ("hadoop" or "namenode" in ch) and ("report" in ch):
#wb.open("ip:80/cgi-bin/backend.py?x=hadoop&y=5")
os.system("hadoop dfsadmin -report")
elif ("list" in ch) and ("cluster" or "filesystem" in ch):
#wb.open("ip:80/cgi-bin/backend.py?x=hadoop&y=6")
os.system("hadoop fs -ls /")
elif ("upload" or "put" in ch) and ("files" or "file" in ch):
print("File name?")
with r.Microphone() as source:
print("listening....")
audio = r.listen(source)
print("done..")
z = r.recognize_google(audio)
print("File destination in cluster?")
with r.Microphone() as source:
print("listening....")
audio1 = r.listen(source)
print("done..")
a = r.recognize_google(audio1)
#wb.open("ip:80/cgi-bin/backend.py?x=hadoop&y=7&z={}&a={}".format(z,a))
os.system("hadoop fs -put {} /{} ".format(z,a))
elif ("remove" or "delete" in ch) and ("hadoop" or "cluster" or "file system" in ch):
print("File name?")
with r.Microphone() as source:
print("listening....")
audio = r.listen(source)
print("done..")
z = r.recognize_google(audio)
#wb.open("ip:80/cgi-bin/backend.py?x=hadoop&y=8&z={}".format(z))
os.system("hadoop fs -rm /{}".format(z))
elif ("read" or "show" in ch) and ("file" in ch):
print("File name?")
with r.Microphone() as source:
print("listening....")
audio = r.listen(source)
print("done..")
z = r.recognize_google(audio)
#wb.open("ip:80/cgi-bin/backend.py?x=hadoop&y=9&z={}".format(z))
os.system("hadoop fs -cat /{}".format(z))
elif ("block size" in ch) and ("upload" or "load" in ch) and ("file system" or "cluster" in ch):
print("Block size?")
with r.Microphone() as source:
print("listening....")
audio = r.listen(source)
print("done..")
z = r.recognize_google(audio)
print("File name?")
with r.Microphone() as source:
print("listening....")
audio1 = r.listen(source)
print("done..")
a = r.recognize_google(audio1)
#wb.open("ip:80/cgi-bin/backend.py?x=hadoop&y=10&z={}&a={}".format(z,a))
os.system("hadoop fs -Ddfs.block.size={} -put {} /".format(z))
elif ("create" or "make" in ch) and ("empty" in ch) ("file" in ch):
print("File name?")
with r.Microphone() as source:
print("listening....")
audio = r.listen(source)
print("done..")
z = r.recognize_google(audio)
#wb.open("ip:80/cgi-bin/backend.py?x=hadoop&y=11&z={}".format(z))
os.system("hadoop fs -touchz {} /".format(z))
elif ("go back" or "show previous" in ch):
#wb.open("ip:80/cgi-bin/backend.py?x=hadoop&y=12")
continue
else:
print("ERROR: Couldn't find anything")
#AWS frontend
elif ("aws" or "AWS" or "A W S" in record):
print("""
01. Configure AWS
02. Create a Key-Pair
03. Create a Security Group
04. Launching an instance
05. Creating EBS
06. Attaching EBS
07. Creating S3 Bucket
08. | |
<reponame>afourmy/e-napalm
from ast import literal_eval
from atexit import register
from contextlib import contextmanager
from flask_login import current_user
from importlib.util import module_from_spec, spec_from_file_location
from json import loads
from logging import error, info, warning
from operator import attrgetter
from os import getenv, getpid
from pathlib import Path
from re import search
from sqlalchemy import (
Boolean,
Column,
create_engine,
event,
ForeignKey,
Float,
inspect,
Integer,
PickleType,
String,
Table,
Text,
)
from sqlalchemy.dialects.mysql.base import MSMediumBlob
from sqlalchemy.exc import InvalidRequestError, OperationalError
from sqlalchemy.ext.associationproxy import ASSOCIATION_PROXY
from sqlalchemy.ext.declarative import declarative_base, DeclarativeMeta
from sqlalchemy.ext.mutable import MutableDict, MutableList
from sqlalchemy.orm import aliased, configure_mappers, scoped_session, sessionmaker
from sqlalchemy.orm.collections import InstrumentedList
from sqlalchemy.types import JSON
from time import sleep
from traceback import format_exc
from uuid import getnode
from eNMS.variables import vs
class Database:
def __init__(self):
for setting in vs.database.items():
setattr(self, *setting)
self.database_url = getenv("DATABASE_URL", "sqlite:///database.db")
self.dialect = self.database_url.split(":")[0]
self.rbac_error = type("RbacError", (Exception,), {})
self.configure_columns()
self.engine = create_engine(
self.database_url,
**self.engine["common"],
**self.engine.get(self.dialect, {}),
)
self.session = scoped_session(sessionmaker(autoflush=False, bind=self.engine))
self.base = declarative_base(metaclass=self.create_metabase())
self.configure_associations()
self.configure_events()
self.field_conversion = {
"bool": bool,
"dict": self.dict_conversion,
"float": float,
"int": int,
"integer": int,
"json": loads,
"list": str,
"str": str,
"date": str,
}
for retry_type, values in self.transactions["retry"].items():
for parameter, number in values.items():
setattr(self, f"retry_{retry_type}_{parameter}", number)
register(self.cleanup)
def _initialize(self, env):
self.register_custom_models()
try:
self.base.metadata.create_all(bind=self.engine)
except OperationalError:
info(f"Bypassing metadata creation for process {getpid()}")
configure_mappers()
self.configure_model_events(env)
if env.detect_cli():
return
first_init = not self.fetch("user", allow_none=True, name="admin")
if first_init:
admin_user = vs.models["user"](name="admin", is_admin=True)
self.session.add(admin_user)
self.session.commit()
if not admin_user.password:
admin_user.update(password="<PASSWORD>")
self.factory(
"server",
**{
"name": vs.server,
"description": vs.server,
"mac_address": str(getnode()),
"ip_address": vs.server_ip,
"status": "Up",
},
)
parameters = self.factory(
"parameters",
**{
f"banner_{property}": vs.settings["notification_banner"][property]
for property in ("active", "deactivate_on_restart", "properties")
},
)
self.session.commit()
for run in self.fetch(
"run", all_matches=True, allow_none=True, status="Running"
):
run.status = "Aborted (RELOAD)"
run.service.status = "Idle"
parameters = self.fetch("parameters")
if parameters.banner_deactivate_on_restart:
parameters.banner_active = False
self.session.commit()
return first_init
def create_metabase(self):
class SubDeclarativeMeta(DeclarativeMeta):
def __init__(cls, *args): # noqa: N805
DeclarativeMeta.__init__(cls, *args)
if hasattr(cls, "database_init") and "database_init" in cls.__dict__:
cls.database_init()
self.set_custom_properties(cls)
return SubDeclarativeMeta
@staticmethod
def dict_conversion(input):
try:
return literal_eval(input)
except Exception:
return loads(input)
def configure_columns(self):
class CustomPickleType(PickleType):
cache_ok = True
if self.dialect.startswith(("mariadb", "mysql")):
impl = MSMediumBlob
self.Dict = MutableDict.as_mutable(CustomPickleType)
self.List = MutableList.as_mutable(CustomPickleType)
if self.dialect == "postgresql":
self.LargeString = Text
else:
self.LargeString = Text(self.columns["length"]["large_string_length"])
self.SmallString = String(self.columns["length"]["small_string_length"])
self.TinyString = String(self.columns["length"]["tiny_string_length"])
default_ctypes = {
self.Dict: {},
self.List: [],
self.LargeString: "",
self.SmallString: "",
self.TinyString: "",
Text: "",
}
def init_column(column_type, *args, **kwargs):
if "default" not in kwargs and column_type in default_ctypes:
kwargs["default"] = default_ctypes[column_type]
return Column(column_type, *args, **kwargs)
self.Column = init_column
def configure_events(self):
if self.dialect == "sqlite":
@event.listens_for(self.engine, "connect")
def do_begin(connection, _):
def regexp(pattern, value):
return search(pattern, str(value)) is not None
connection.create_function("regexp", 2, regexp)
@event.listens_for(self.base, "mapper_configured", propagate=True)
def model_inspection(mapper, model):
name = model.__tablename__
for col in inspect(model).columns:
if not col.info.get("model_properties", True):
continue
if col.type == PickleType:
is_list = isinstance(col.default.arg, list)
property_type = "list" if is_list else "dict"
else:
property_type = {
Boolean: "bool",
Integer: "int",
Float: "float",
JSON: "dict",
}.get(type(col.type), "str")
vs.model_properties[name][col.key] = property_type
for descriptor in inspect(model).all_orm_descriptors:
if descriptor.extension_type is ASSOCIATION_PROXY:
property = (
descriptor.info.get("name")
or f"{descriptor.target_collection}_{descriptor.value_attr}"
)
vs.model_properties[name][property] = "str"
if hasattr(model, "parent_type"):
vs.model_properties[name].update(vs.model_properties[model.parent_type])
if "service" in name and name != "service":
vs.model_properties[name].update(vs.model_properties["service"])
vs.models.update({name: model, name.lower(): model})
vs.model_properties[name].update(model.model_properties)
for relation in mapper.relationships:
if getattr(relation.mapper.class_, "private", False):
continue
property = str(relation).split(".")[1]
vs.relationships[name][property] = {
"model": relation.mapper.class_.__tablename__,
"list": relation.uselist,
}
def configure_model_events(self, env):
@event.listens_for(self.base, "after_insert", propagate=True)
def log_instance_creation(mapper, connection, target):
if hasattr(target, "name") and target.type != "run":
env.log("info", f"CREATION: {target.type} '{target.name}'")
@event.listens_for(self.base, "before_delete", propagate=True)
def log_instance_deletion(mapper, connection, target):
name = getattr(target, "name", str(target))
env.log("info", f"DELETION: {target.type} '{name}'")
@event.listens_for(self.base, "before_update", propagate=True)
def log_instance_update(mapper, connection, target):
state, changelog = inspect(target), []
for attr in state.attrs:
hist = state.get_history(attr.key, True)
if (
getattr(target, "private", False)
or not getattr(target, "log_changes", True)
or not getattr(state.class_, attr.key).info.get("log_change", True)
or attr.key in vs.private_properties_set
or not hist.has_changes()
):
continue
change = f"{attr.key}: "
property_type = type(getattr(target, attr.key))
if property_type in (InstrumentedList, MutableList):
if property_type == MutableList:
added = [x for x in hist.added[0] if x not in hist.deleted[0]]
deleted = [x for x in hist.deleted[0] if x not in hist.added[0]]
else:
added, deleted = hist.added, hist.deleted
if deleted:
change += f"DELETED: {deleted}"
if added:
change += f"{' / ' if deleted else ''}ADDED: {added}"
else:
change += (
f"'{hist.deleted[0] if hist.deleted else None}' => "
f"'{hist.added[0] if hist.added else None}'"
)
changelog.append(change)
if changelog:
name, changes = (
getattr(target, "name", target.id),
" | ".join(changelog),
)
env.log("info", f"UPDATE: {target.type} '{name}': ({changes})")
for model in vs.models.values():
if "configure_events" in vars(model):
model.configure_events()
if env.use_vault:
for model in vs.private_properties:
@event.listens_for(vs.models[model].name, "set", propagate=True)
def vault_update(target, new_name, old_name, *_):
if new_name == old_name:
return
for property in vs.private_properties[target.class_type]:
path = f"secret/data/{target.type}"
data = env.vault_client.read(f"{path}/{old_name}/{property}")
if not data:
return
env.vault_client.write(
f"{path}/{new_name}/{property}",
data={property: data["data"]["data"][property]},
)
env.vault_client.delete(f"{path}/{old_name}")
def configure_associations(self):
for name, association in self.relationships["associations"].items():
model1, model2 = association["model1"], association["model2"]
setattr(
self,
f"{name}_table",
Table(
f"{name}_association",
self.base.metadata,
Column(
model1["column"],
Integer,
ForeignKey(
f"{model1['foreign_key']}.id", **model1.get("kwargs", {})
),
primary_key=True,
),
Column(
model2["column"],
Integer,
ForeignKey(
f"{model2['foreign_key']}.id", **model2.get("kwargs", {})
),
primary_key=True,
),
),
)
def query(self, model, rbac="read", username=None, properties=None):
if properties:
entity = [getattr(vs.models[model], property) for property in properties]
else:
entity = [vs.models[model]]
query = self.session.query(*entity)
if rbac and model != "user":
user = current_user or self.fetch("user", name=username or "admin")
if user.is_authenticated and not user.is_admin:
if model in vs.rbac["advanced"]["admin_models"].get(rbac, []):
raise self.rbac_error
if (
rbac == "read"
and vs.rbac["advanced"]["deactivate_rbac_on_read"]
and model != "pool"
):
return query
query = vs.models[model].rbac_filter(query, rbac, user)
return query
def fetch(
self,
instance_type,
allow_none=False,
all_matches=False,
rbac="read",
username=None,
**kwargs,
):
query = self.query(instance_type, rbac, username=username).filter(
*(
getattr(vs.models[instance_type], key) == value
for key, value in kwargs.items()
)
)
for index in range(self.retry_fetch_number):
try:
result = query.all() if all_matches else query.first()
break
except Exception as exc:
self.session.rollback()
if index == self.retry_fetch_number - 1:
error(f"Fetch n°{index} failed ({format_exc()})")
raise exc
else:
warning(f"Fetch n°{index} failed ({str(exc)})")
sleep(self.retry_fetch_time * (index + 1))
if result or allow_none:
return result
else:
raise self.rbac_error(
f"There is no {instance_type} in the database "
f"with the following characteristics: {kwargs}"
)
def delete(self, model, **kwargs):
instance = self.fetch(model, **{"rbac": "edit", **kwargs})
return self.delete_instance(instance)
def fetch_all(self, model, **kwargs):
return self.fetch(model, allow_none=True, all_matches=True, **kwargs)
def objectify(self, model, object_list, **kwargs):
return [self.fetch(model, id=object_id, **kwargs) for object_id in object_list]
def delete_instance(self, instance):
try:
instance.delete()
except Exception as exc:
return {"alert": f"Unable to delete {instance.name} ({exc})."}
serialized_instance = instance.serialized
self.session.delete(instance)
return serialized_instance
def delete_all(self, *models):
for model in models:
for instance in self.fetch_all(model):
self.delete_instance(instance)
self.session.commit()
def export(self, model, private_properties=False):
return [
instance.to_dict(export=True, private_properties=private_properties)
for instance in self.fetch_all(model)
]
def factory(self, _class, commit=False, no_fetch=False, rbac="edit", **kwargs):
def transaction(_class, **kwargs):
characters = set(kwargs.get("name", "") + kwargs.get("scoped_name", ""))
if set("/\\'" + '"') & characters:
raise Exception("Names cannot contain a slash or a quote.")
instance, instance_id = None, kwargs.pop("id", 0)
if instance_id:
instance = self.fetch(_class, id=instance_id, rbac=rbac)
elif "name" in kwargs and not no_fetch:
instance = self.fetch(
_class, allow_none=True, name=kwargs["name"], rbac=rbac
)
if instance and not kwargs.get("must_be_new"):
instance.update(**kwargs)
else:
instance = vs.models[_class](rbac=rbac, **kwargs)
self.session.add(instance)
return instance
if not commit:
instance = transaction(_class, **kwargs)
else:
for index in range(self.retry_commit_number):
try:
instance = transaction(_class, **kwargs)
self.session.commit()
break
except Exception as exc:
self.session.rollback()
if index == self.retry_commit_number - 1:
error(f"Commit n°{index} failed ({format_exc()})")
raise exc
else:
warning(f"Commit n°{index} failed ({str(exc)})")
sleep(self.retry_commit_time * (index + 1))
return instance
def get_credential(
self, username, name=None, device=None, credential_type="any", optional=False
):
pool_alias = aliased(vs.models["pool"])
query = (
self.session.query(vs.models["credential"])
.join(vs.models["pool"], vs.models["credential"].user_pools)
.join(vs.models["user"], vs.models["pool"].users)
)
if device:
query = query.join(pool_alias, vs.models["credential"].device_pools).join(
vs.models["device"], pool_alias.devices
)
query = query.filter(vs.models["user"].name == username)
if name:
query = query.filter(vs.models["credential"].name == name)
if device:
query = query.filter(vs.models["device"].name == device.name)
if credential_type != "any":
query = query.filter(vs.models["credential"].role == credential_type)
credentials = max(query.all(), key=attrgetter("priority"), default=None)
if not credentials and not optional:
raise Exception(f"No matching credentials found for DEVICE '{device.name}'")
return credentials
def register_custom_models(self):
for model in ("device", "link", "service"):
paths = [vs.path / "eNMS" / "models" / f"{model}s"]
load_examples = vs.settings["app"].get("startup_migration") == "examples"
if vs.settings["paths"][f"custom_{model}s"]:
paths.append(Path(vs.settings["paths"][f"custom_{model}s"]))
for path in paths:
for file in path.glob("**/*.py"):
if "init" in str(file):
continue
if not load_examples and "examples" in str(file):
continue
info(f"Loading {model}: {file}")
spec = spec_from_file_location(file.stem, str(file))
try:
spec.loader.exec_module(module_from_spec(spec))
except InvalidRequestError:
error(f"Error loading {model} '{file}'\n{format_exc()}")
@contextmanager
def session_scope(self):
try:
| |
noteNumber: A list of note references to the notes provided below.
adjudication: The adjudications results.
subDetail: Third tier of goods and services.
"""
__name__ = 'ExplanationOfBenefit_Detail'
def __init__(self, dict_values=None):
self.sequence = None
# type: int
self.type = None
# reference to CodeableConcept
self.revenue = None
# reference to CodeableConcept
self.category = None
# reference to CodeableConcept
self.service = None
# reference to CodeableConcept
self.modifier = None
# type: list
# reference to CodeableConcept
self.programCode = None
# type: list
# reference to CodeableConcept
self.quantity = None
# reference to Quantity
self.unitPrice = None
# reference to Money
self.factor = None
# type: int
self.net = None
# reference to Money
self.udi = None
# type: list
# reference to Reference: identifier
self.noteNumber = None
# type: list
self.adjudication = None
# type: list
# reference to ExplanationOfBenefit_Adjudication
self.subDetail = None
# type: list
# reference to ExplanationOfBenefit_SubDetail
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'ExplanationOfBenefit_SubDetail',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Detail',
'child_variable': 'subDetail'},
{'parent_entity': 'ExplanationOfBenefit_Adjudication',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Detail',
'child_variable': 'adjudication'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Detail',
'child_variable': 'service'},
{'parent_entity': 'Money',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Detail',
'child_variable': 'unitPrice'},
{'parent_entity': 'Money',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Detail',
'child_variable': 'net'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Detail',
'child_variable': 'programCode'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Detail',
'child_variable': 'type'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'ExplanationOfBenefit_Detail',
'child_variable': 'udi'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Detail',
'child_variable': 'revenue'},
{'parent_entity': 'Quantity',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Detail',
'child_variable': 'quantity'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Detail',
'child_variable': 'category'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_Detail',
'child_variable': 'modifier'},
]
class ExplanationOfBenefit_SubDetail(fhirbase):
"""
This resource provides: the claim details; adjudication details from
the processing of a Claim; and optionally account balance information,
for informing the subscriber of the benefits provided.
Args:
sequence: A service line number.
type: The type of product or service.
revenue: The type of reveneu or cost center providing the product
and/or service.
category: Health Care Service Type Codes to identify the
classification of service or benefits.
service: A code to indicate the Professional Service or Product
supplied (eg. CTP, HCPCS,USCLS,ICD10, NCPDP,DIN,ACHI,CCI).
modifier: Item typification or modifiers codes, eg for Oral whether
the treatment is cosmetic or associated with TMJ, or for medical
whether the treatment was outside the clinic or out of office hours.
programCode: For programs which require reson codes for the inclusion,
covering, of this billed item under the program or sub-program.
quantity: The number of repetitions of a service or product.
unitPrice: The fee for an addittional service or product or charge.
factor: A real number that represents a multiplier used in determining
the overall value of services delivered and/or goods received. The
concept of a Factor allows for a discount or surcharge multiplier to
be applied to a monetary amount.
net: The quantity times the unit price for an addittional service or
product or charge. For example, the formula: unit Quantity * unit
Price (Cost per Point) * factor Number * points = net Amount.
Quantity, factor and points are assumed to be 1 if not supplied.
udi: List of Unique Device Identifiers associated with this line item.
noteNumber: A list of note references to the notes provided below.
adjudication: The adjudications results.
"""
__name__ = 'ExplanationOfBenefit_SubDetail'
def __init__(self, dict_values=None):
self.sequence = None
# type: int
self.type = None
# reference to CodeableConcept
self.revenue = None
# reference to CodeableConcept
self.category = None
# reference to CodeableConcept
self.service = None
# reference to CodeableConcept
self.modifier = None
# type: list
# reference to CodeableConcept
self.programCode = None
# type: list
# reference to CodeableConcept
self.quantity = None
# reference to Quantity
self.unitPrice = None
# reference to Money
self.factor = None
# type: int
self.net = None
# reference to Money
self.udi = None
# type: list
# reference to Reference: identifier
self.noteNumber = None
# type: list
self.adjudication = None
# type: list
# reference to ExplanationOfBenefit_Adjudication
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'Money',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_SubDetail',
'child_variable': 'net'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_SubDetail',
'child_variable': 'programCode'},
{'parent_entity': 'Money',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_SubDetail',
'child_variable': 'unitPrice'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_SubDetail',
'child_variable': 'modifier'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'ExplanationOfBenefit_SubDetail',
'child_variable': 'udi'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_SubDetail',
'child_variable': 'service'},
{'parent_entity': 'ExplanationOfBenefit_Adjudication',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_SubDetail',
'child_variable': 'adjudication'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_SubDetail',
'child_variable': 'revenue'},
{'parent_entity': 'Quantity',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_SubDetail',
'child_variable': 'quantity'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_SubDetail',
'child_variable': 'type'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_SubDetail',
'child_variable': 'category'},
]
class ExplanationOfBenefit_AddItem(fhirbase):
"""
This resource provides: the claim details; adjudication details from
the processing of a Claim; and optionally account balance information,
for informing the subscriber of the benefits provided.
Args:
sequenceLinkId: List of input service items which this service line is
intended to replace.
revenue: The type of reveneu or cost center providing the product
and/or service.
category: Health Care Service Type Codes to identify the
classification of service or benefits.
service: If this is an actual service or product line, ie. not a
Group, then use code to indicate the Professional Service or Product
supplied (eg. CTP, HCPCS,USCLS,ICD10, NCPDP,DIN,ACHI,CCI). If a
grouping item then use a group code to indicate the type of thing
being grouped eg. 'glasses' or 'compound'.
modifier: Item typification or modifiers codes, eg for Oral whether
the treatment is cosmetic or associated with TMJ, or for medical
whether the treatment was outside the clinic or out of office hours.
fee: The fee charged for the professional service or product.
noteNumber: A list of note references to the notes provided below.
adjudication: The adjudications results.
detail: The second tier service adjudications for payor added
services.
"""
__name__ = 'ExplanationOfBenefit_AddItem'
def __init__(self, dict_values=None):
self.sequenceLinkId = None
# type: list
self.revenue = None
# reference to CodeableConcept
self.category = None
# reference to CodeableConcept
self.service = None
# reference to CodeableConcept
self.modifier = None
# type: list
# reference to CodeableConcept
self.fee = None
# reference to Money
self.noteNumber = None
# type: list
self.adjudication = None
# type: list
# reference to ExplanationOfBenefit_Adjudication
self.detail = None
# type: list
# reference to ExplanationOfBenefit_Detail1
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_AddItem',
'child_variable': 'category'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_AddItem',
'child_variable': 'service'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_AddItem',
'child_variable': 'modifier'},
{'parent_entity': 'Money',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_AddItem',
'child_variable': 'fee'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_AddItem',
'child_variable': 'revenue'},
{'parent_entity': 'ExplanationOfBenefit_Adjudication',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_AddItem',
'child_variable': 'adjudication'},
{'parent_entity': 'ExplanationOfBenefit_Detail1',
'parent_variable': 'object_id',
'child_entity': 'ExplanationOfBenefit_AddItem',
'child_variable': 'detail'},
]
class ExplanationOfBenefit_Detail1(fhirbase):
"""
This resource provides: the claim details; adjudication details from
the processing of a Claim; and optionally account balance information,
for informing the subscriber of the benefits provided.
Args:
revenue: The type of reveneu or cost center providing the product
and/or service.
category: Health Care Service Type Codes to identify the
classification of service or benefits.
service: A code to indicate the Professional Service or Product
supplied (eg. CTP, HCPCS,USCLS,ICD10, NCPDP,DIN,ACHI,CCI).
modifier: Item typification or modifiers codes, eg for Oral whether
the treatment is cosmetic or associated with TMJ, or for medical
whether the treatment was outside the clinic or out of office hours.
fee: The fee charged for the professional service or product.
noteNumber: A list of note references to the notes provided below.
adjudication: The adjudications results.
"""
__name__ = 'ExplanationOfBenefit_Detail1'
def __init__(self, dict_values=None):
self.revenue = None
# reference to CodeableConcept
self.category = None
# reference to CodeableConcept
self.service = None
# reference to CodeableConcept
self.modifier = None
# type: list
# reference to CodeableConcept
self.fee = None
# reference to Money
self.noteNumber = None
# type: list
self.adjudication = None
# type: list
# reference to ExplanationOfBenefit_Adjudication
| |
#!/usr/bin/env python3
"""Generate a random GURPS Dungeon Fantasy character."""
import argparse
from collections import Counter
import copy
from enum import Enum, auto
import os
import random
import re
from typing import Dict, List, Set, Tuple
import typing
import xml.etree.ElementTree as et
class TraitType(Enum):
PRIMARY_ATTRIBUTE = auto()
SECONDARY_ATTRIBUTE = auto()
ADVANTAGE = auto()
DISADVANTAGE = auto()
FEATURE = auto()
SKILL = auto()
SPELL = auto()
PA = TraitType.PRIMARY_ATTRIBUTE
SA = TraitType.SECONDARY_ATTRIBUTE
AD = TraitType.ADVANTAGE
DI = TraitType.DISADVANTAGE
FE = TraitType.FEATURE
SK = TraitType.SKILL
SP = TraitType.SPELL
def list_levels(
name: str,
cost: int,
trait_type: TraitType,
num_levels: int,
min_level: int = 1,
) -> List[Tuple[str, int, TraitType]]:
"""Return a list of num_levels tuples, each with the name,
cost, and trait_type of that level.
name should have a %d in it for the level number.
cost is per level
"""
lst = []
for level in range(min_level, min_level + num_levels):
tup = (name % level, cost * level, trait_type)
lst.append(tup)
return lst
def list_self_control_levels(
name: str, base_cost: int
) -> List[Tuple[str, int, TraitType]]:
"""Return a list of num_levels tuples, each with the name,
cost, and trait_type (always DI) of that level.
name is just the base name.
base_cost is for a self-control number of 12.
"""
lst = []
lst.append(("%s (15)" % name, int(0.5 * base_cost), DI))
lst.append(("%s (12)" % name, base_cost, DI))
lst.append(("%s (9)" % name, int(1.5 * base_cost), DI))
lst.append(("%s (6)" % name, 2 * base_cost, DI))
return lst
def list_self_control_levels2(
name1: str, base_cost1: int, name2: str, base_cost2: int
) -> List[Tuple[str, int, TraitType]]:
"""Return a list of num_levels tuples, each with the name and
cost of that level, for two mutually-exclusive disads.
"""
return list_self_control_levels(
name1, base_cost1
) + list_self_control_levels(name2, base_cost2)
def pick_from_list(
lst: List[List[Tuple[str, int, TraitType]]], points: int
) -> List[Tuple[str, int, TraitType]]:
"""Pick traits totaling exactly points from the list.
chosen traits are removed from lst
"""
original_lst = copy.deepcopy(lst)
traits = []
points_left = points
while lst and points_left != 0:
lst2 = random.choice(lst)
lst.remove(lst2)
while lst2:
tup = random.choice(lst2)
trait, cost, trait_type = tup
if abs(cost) <= abs(points_left):
traits.append((trait, cost, trait_type))
points_left -= cost
break
else:
lst2.remove(tup)
if points_left != 0:
# If we made a pick that couldn't get the points right, retry.
return pick_from_list(original_lst, points)
return traits
def pick_from_list_enforcing_prereqs(
lst: List[List[Tuple[str, int, TraitType]]],
points: int,
original_traits: List[Tuple[str, int, TraitType]],
) -> List[Tuple[str, int, TraitType]]:
"""Pick traits totaling exactly points from the list.
Return a list of tuples (trait name, cost, trait_type)
chosen traits are removed from lst
"""
original_lst = copy.deepcopy(lst)
traits: List[Tuple[str, int, TraitType]] = []
points_left = points
while lst and points_left != 0:
lst2 = random.choice(lst)
lst.remove(lst2)
while lst2:
tup = random.choice(lst2)
trait, cost, trait_type = tup
if abs(cost) <= abs(points_left) and prereq_satisfied(
trait, original_traits + traits
):
traits.append((trait, cost, trait_type))
points_left -= cost
break
else:
lst2.remove(tup)
if points_left != 0:
# If we made a pick that couldn't get the points right, retry.
return pick_from_list(original_lst, points)
return traits
def next_skill_cost(cost: int) -> int:
"""Return the next higher skill cost after cost."""
if cost == 0:
return 1
elif cost == 1:
return 2
elif cost == 2:
return 4
else:
return cost + 4
def pick_or_improve_skills_from_list(
skills: Set[str],
points: int,
traits: List[Tuple[str, int, TraitType]],
min_cost: int = 1,
) -> None:
"""Add points to skills, and modify traits in place.
If a skill is already in traits then bring it up to the next
level, if enough points remain.
Otherwise add it at the 1-point level.
"""
points_left = points
skills_lst = list(skills)
while skills_lst and points_left > 0:
skill_name = random.choice(skills_lst)
for ii, skill_tup in enumerate(traits):
(skill_name2, cost, trait_type) = skill_tup
if skill_name2 == skill_name:
cost2 = next_skill_cost(cost)
difference = cost2 - cost
if difference <= points_left:
traits[ii] = (skill_name2, cost2, trait_type)
points_left -= difference
break
else:
cost2 = min_cost
traits.append((skill_name, cost2, trait_type))
points_left -= cost2
def fix_language_talent(traits: List[Tuple[str, int, TraitType]]) -> None:
"""If traits includes Language Talent and any Language, fix the language's
proficiency level or cost to take it into account."""
trait_names = set((tup[0] for tup in traits))
if "Language Talent" in trait_names:
for ii, (trait_name, cost, trait_type) in enumerate(traits):
if "Spoken: Broken" in trait_name:
trait_name = trait_name.replace(
"Spoken: Broken", "Spoken: Accented"
)
elif "Spoken: Accented" in trait_name:
trait_name = trait_name.replace(
"Spoken: Accented", "Spoken: Native"
)
elif "Spoken: Native" in trait_name:
cost -= 1
if "Written: Broken" in trait_name:
trait_name = trait_name.replace(
"Written: Broken", "Written: Accented"
)
elif "Written: Accented" in trait_name:
trait_name = trait_name.replace(
"Written: Accented", "Written: Native"
)
elif "Written: Native" in trait_name:
cost -= 1
traits[ii] = (trait_name, cost, trait_type)
def print_traits(traits: List[Tuple[str, int, TraitType]]) -> None:
total_cost = 0
# Print primary and secondary attributes in fixed order
print("\nPrimary Attributes")
for name, cost, trait_type in traits:
if trait_type == PA:
total_cost += cost
print("%s [%d]" % (name, cost))
print("\nSecondary Attributes")
for name, cost, trait_type in traits:
if trait_type == SA:
total_cost += cost
print("%s [%d]" % (name, cost))
# Print the rest in sorted order
traits = sorted(traits)
print("\nAdvantages")
for name, cost, trait_type in traits:
if trait_type == AD:
total_cost += cost
print("%s [%d]" % (name, cost))
print("\nDisadvantages")
for name, cost, trait_type in traits:
if trait_type == DI:
total_cost += cost
print("%s [%d]" % (name, cost))
print("\nSkills")
for name, cost, trait_type in traits:
if trait_type == SK:
total_cost += cost
print("%s [%d]" % (name, cost))
printed_spells_header = False
for name, cost, trait_type in traits:
if trait_type == SP:
if not printed_spells_header:
print("\nSpells")
printed_spells_header = True
total_cost += cost
print("%s [%d]" % (name, cost))
print("\ntotal points: %d" % total_cost)
def generate_barbarian() -> List[Tuple[str, int, TraitType]]:
traits = [
("ST 17", 63, PA),
("DX 13", 60, PA),
("IQ 10", 0, PA),
("HT 13", 30, PA),
("HP 22", 9, SA),
("Will 10", 0, SA),
("Per 12", 10, SA),
("FP 13", 0, SA),
("Basic Speed 6.0", -10, SA),
("Basic Move 7", 0, SA),
("High Pain Threshold", 10, AD),
("Outdoorsman 4", 40, AD),
("Gigantism", 0, FE),
("Social Stigma (Minority Group)", -10, DI),
("Camouflage", 1, SK),
("Navigation (Land)", 2, SK),
("Tracking", 1, SK),
("Brawling", 1, SK),
("Stealth", 2, SK),
("Wrestling", 2, SK),
("Naturalist", 1, SK),
("Swimming", 1, SK),
("Hiking", 1, SK),
("Running", 1, SK),
("Fishing", 1, SK),
("Animal Handling (any)", 2, SK),
("Disguise (Animals)", 2, SK),
("Weather Sense", 2, SK),
("Intimidation", 2, SK),
]
ads1 = [
list_levels("ST +%d", 9, PA, 3),
list_levels("HT +%d", 10, PA, 3),
list_levels("Per +%d", 5, SA, 6),
[("Absolute Direction", 5, AD)],
list_levels("Acute Hearing %d", 2, AD, 5),
list_levels("Acute Taste and Smell %d", 2, AD, 5),
list_levels("Acute Touch %d", 2, AD, 5),
list_levels("Acute Vision %d", 2, AD, 5),
[("Alcohol Tolerance", 1, AD)],
[("Animal Empathy", 5, AD)],
list_levels("Animal Friend %d", 5, AD, 4),
[("Combat Reflexes", 15, AD)],
[("Fit", 5, AD), ("Very Fit", 15, AD)],
list_levels("Hard to Kill %d", 2, AD, 5),
list_levels("Hard to Subdue %d", 2, AD, 5),
list_levels("Lifting ST %d", 3, AD, 3),
[("Luck", 15, AD), ("Extraordinary Luck", 30, AD)],
list_levels("Magic Resistance %d", 2, AD, 5),
[("Rapid Healing", 5, AD), ("Very Rapid Healing", 15, AD)],
[("Recovery", 10, AD)],
[("Resistant to Disease 3", 3, AD), ("Resistant to Disease 8", 5, AD)],
[("Resistant to Poison 3", 5, AD)],
list_levels("Signature Gear %d", 1, AD, 10),
[("Striking ST 1", 5, SA), ("Striking ST 2", 9, SA)],
list_levels("Temperature Tolerance %d", 1, AD, 2),
[("Weapon Bond", 1, AD)],
]
traits.extend(pick_from_list(ads1, 30))
disads1 = [
[("Easy to Read", -10, DI)],
list_self_control_levels("Gullibility", -10),
[("Language: Spoken (Native) / Written (None)", -3, DI)],
list_levels("Low TL %d", -5, DI, 2),
[("Odious Personal Habit (Unrefined manners)", -5, DI)],
list_self_control_levels("Phobia (Machinery)", -5),
[("Wealth (Struggling)", -10, DI)],
]
traits.extend(pick_from_list(disads1, -10))
disads2 = [
[("Appearance: Unattractive", -4, DI), ("Appearance: Ugly", -8, DI)],
list_self_control_levels("Bad Temper", -10),
list_self_control_levels("Berserk", -10),
list_self_control_levels("Bloodlust", -10),
list_self_control_levels2(
"Compulsive Carousing", -5, "Phobia (Crowds)", -15
),
list_self_control_levels("Gluttony", -5),
list_levels("Ham-Fisted %d", -5, DI, 2),
[("Horrible Hangovers", -1, DI)],
list_self_control_levels("Impulsiveness", -10),
list_self_control_levels("Overconfidence", -5),
[("Sense of Duty (Adventuring companions)", -5, DI)],
]
disads2.extend(disads1)
traits.extend(pick_from_list(disads2, -20))
skills1 = [
[("Survival (Arctic)", 1, SK)],
[("Survival (Desert)", 1, SK)],
[("Survival (Island/Beach)", 1, SK)],
[("Survival (Jungle)", 1, SK)],
[("Survival (Mountain)", 1, SK)],
| |
False
context = self.retrieve_context(keys[1])
if context is None:
return False
if len(keys) == 3:
if keys[2] not in ('max-trades', 'mode'):
return False
if context.trade_quantity_type == context.TRADE_QUANTITY_MANAGED:
# in this mode it must be managed by its handler
return False
if keys[2] == 'max-trades':
try:
v = int(value)
if 0 <= v <= 999:
context.max_trades = v
return True
else:
return False
except ValueError:
return False
elif keys[2] == 'mode':
if value not in context.MODE:
return False
context.mode = context.MODE[value]
return True
elif len(keys) == 4:
if keys[2] in ('stop-loss', 'dynamic-stop-loss', 'take-profit', 'dynamic-take-profit'):
if keys[3] not in ('distance', 'orientation', 'depth', 'multi', 'timeout-distance'):
return False
# retrieve the related target
ex_entry_exit = None
if keys[2] == 'stop-loss':
if not hasattr(context, 'stop_loss'):
return False
ex_entry_exit = context.stop_loss
if keys[2] == 'dynamic-stop-loss':
if not hasattr(context, 'dynamic_stop_loss'):
return False
ex_entry_exit = context.dynamic_stop_loss
if keys[2] == 'take-profit':
if not hasattr(context, 'take_profit'):
return False
ex_entry_exit = context.take_profit
if keys[2] == 'dynamic-take-profit':
if not hasattr(context, 'dynamic_take_profit'):
return False
ex_entry_exit = context.dynamic_take_profit
if not ex_entry_exit:
return False
# update value
if keys[3] == 'distance':
try:
if type(value) is str and value.endswith('%'):
v = float(value[:-1])
if v <= -100:
return False
elif type(value) is str and value.endswith('pip'):
v = float(value[:-3])
else:
v = float(value)
except ValueError:
return False
if not hasattr(ex_entry_exit, 'modify_distance'):
return False
ex_entry_exit.modify_distance(self, value)
return True
elif keys[3] == 'timeout-distance':
try:
if type(value) is str and value.endswith('%'):
v = float(value[:-1])
if v <= -100:
return False
elif type(value) is str and value.endswith('pip'):
v = float(value[:-3])
else:
v = float(value)
except ValueError:
return False
if not hasattr(ex_entry_exit, 'modify_timeout_distance'):
return False
ex_entry_exit.modify_timeout_distance(self, value)
return True
elif keys[3] == "multi":
if value not in (0, 1):
return False
if not hasattr(ex_entry_exit, 'multi'):
return False
ex_entry_exit.multi = True if value else False
return True
elif keys[3] == "depth":
try:
v = int(value)
except ValueError:
return False
if not hasattr(ex_entry_exit, 'depth'):
return False
ex_entry_exit.depth = v
return True
elif keys[3] == "orientation":
if value not in ('up', 'upper', 'high', 'higher', 'dn', 'down', 'low', 'lower', 'both'):
return False
if not hasattr(ex_entry_exit, 'modify_orientation'):
return False
ex_entry_exit.modify_orientation(value)
return True
elif keys[2] == 'trade-quantity':
if keys[3] not in ('type', 'quantity', 'step'):
return False
if context.trade_quantity_type == context.TRADE_QUANTITY_MANAGED:
# this mode is globally managed and cannot be locally modified
return False
if keys[3] == 'type':
# other choices use keys[4]
if value not in ('normal', 'reinvest-max-last'):
return False
try:
quantity = float(value)
except ValueError:
return False
if quantity < 0:
return False
return context.modify_trade_quantity_type(self.instrument, value, quantity)
elif keys[3] == 'quantity':
try:
quantity = float(value)
except ValueError:
return False
return context.modify_trade_quantity(quantity)
elif keys[3] == 'step':
try:
step = float(value)
except ValueError:
return False
return context.modify_trade_step(step)
else:
return False
elif len(keys) == 4:
if keys[2] == 'trade-quantity':
if keys[3] not in ('type',):
return False
if context.trade_quantity_type == context.TRADE_QUANTITY_MANAGED:
# this mode is globally managed and cannot be locally modified
return False
if keys[3] == 'type':
# other choices use keys[3]
if keys[4] not in ('specific', 'increment-step'):
return False
try:
quantity = float(value)
except ValueError:
return False
if quantity < 0:
return False
return context.modify_trade_quantity_type(self.instrument, keys[4], quantity)
elif keys[0] == 'max-trades':
try:
v = int(value)
if 0 <= v <= 999:
self.max_trades = v
return True
else:
return False
except ValueError:
return False
return False
#
# processing
#
@property
def activity(self) -> bool:
"""
Strategy trader Local state.
"""
return self._activity
def set_activity(self, status: bool):
"""
Enable/disable execution of the automated orders.
"""
if self._activity != status:
self._activity = status
if self._global_streamer:
self._global_streamer.member('activity').update(self._activity)
@property
def affinity(self) -> int:
"""
Strategy trader affinity rate.
"""
return self._affinity
@affinity.setter
def affinity(self, affinity: int):
"""
Set strategy trader affinity rate.
"""
if self._affinity != affinity:
self._affinity = affinity
if self._global_streamer:
self._global_streamer.member('affinity').update(self._affinity)
@property
def max_trades(self):
return self._max_trades
@max_trades.setter
def max_trades(self, max_trades):
if self._max_trades != max_trades:
self._max_trades = max_trades
if self._global_streamer:
self._global_streamer.member('max-trades').update(self._max_trades)
def restart(self):
"""
Needed on a reconnection to reset some states.
"""
with self._mutex:
self._initialized = 1
self._checked = 1
self._preprocessing = 1
self._bootstrapping = 1
def recheck(self):
"""
Query for recheck any trades of the trader.
"""
with self._mutex:
self._checked = 1
#
# pre-processing
#
def preprocess_load_cache(self, from_date: datetime, to_date: datetime):
"""
Override this method to load the cached data before performing preprocess.
"""
pass
def preprocess(self, trade: TickType):
"""
Override this method to preprocess trade per trade each most recent data than the cache.
"""
pass
def preprocess_store_cache(self, from_date: datetime, to_date: datetime):
"""
Override this method to store in cache the preprocessed data.
"""
pass
#
# processing
#
def prepare(self):
"""
Prepare before entering live or backtest data stream.
Prepare indicators, signals states and flags.
It is called before bootstrap iterations and before process iterations.
"""
pass
def bootstrap(self, timestamp: float):
"""
Override this method to do all the initial strategy work using the preloaded OHLCs history.
No trade must be done here, but signal pre-state could be computed.
This is useful for strategies having pre trigger signal, in way to don't miss the comings
signals validations.
"""
pass
def process(self, timestamp: float):
"""
Override this method to do her all the strategy work.
You must call the update_trades method during the process in way to manage the trades.
@param timestamp Current timestamp (or in backtest the processed time in past).
"""
pass
def check_trades(self, timestamp: float):
"""
Recheck actives or pending trades. Useful after a reconnection.
"""
with self._mutex:
if self._checked != 1:
# only if waiting to check trades
return
# process
self._checked = 2
trader = self.strategy.trader()
if not trader.paper_mode:
# do not process in paper-mode
with self._trade_mutex:
for trade in self._trades:
try:
# check and update from order info orders/position/quantity
result = trade.check(trader, self.instrument)
# do not repair automatically because of free quantity can be necessary to another trade
# and could results to error another trade
# if result == 0:
# # try to repair it else stay in error status
# trade.repair(trader, self.instrument)
time.sleep(1.0) # do not saturate API
except Exception as e:
error_logger.error(repr(e))
traceback_logger.error(traceback.format_exc())
with self._mutex:
# done
self._checked = 0
def terminate(self):
"""
Delete any non realized trades (new or open) or remaining closed but not closing.
"""
trader = self.strategy.trader()
mutated = False
trades_list = []
with self._mutex:
with self._trade_mutex:
for trade in self._trades:
if trade.can_delete() or trade.is_closed() or not trade.is_active():
# cleanup if necessary before deleting the trade related refs
if trade.remove(trader, self.instrument):
mutated = True
else:
# error during canceling orders, potential API or response error : keep for persistence
trades_list.append(trade)
else:
# keep for persistence
trades_list.append(trade)
# updated trade list, the ones we would save
if mutated:
self._trades = trades_list
#
# persistence
#
def save(self):
"""
Trader and trades persistence (might occurs only for live mode on real accounts).
@note Must be called only after terminate.
"""
trader = self.strategy.trader()
with self._mutex:
with self._trade_mutex:
for trade in self._trades:
t_data = trade.dumps()
ops_data = [operation.dumps() for operation in trade.operations]
# store per trade
Database.inst().store_user_trade((
trader.name, trader.account.name, self.instrument.market_id,
self.strategy.identifier, trade.id, trade.trade_type, t_data, ops_data))
# dumps of trader data, regions and alerts
trader_data = {
'affinity': self._affinity,
# @todo context data or into a distinct column
}
regions_data = [region.dumps() for region in self._regions]
alerts_data = [alert.dumps() for alert in self._alerts]
Database.inst().store_user_trader((
trader.name, trader.account.name, self.instrument.market_id,
self.strategy.identifier, self.activity, trader_data, regions_data, alerts_data))
def dumps(self) -> dict:
"""
Trader state, context and trades persistence.
"""
trader = self.strategy.trader()
trades_data = []
contexts_data = [] # @todo
with self._mutex:
with self._trade_mutex:
for trade in self._trades:
t_data = trade.dumps()
t_data['operations'] = [operation.dumps() for operation in trade.operations]
trades_data.append(t_data)
# dumps of trader data, regions and alerts
trader_data = {
'activity': self._activity,
'affinity': self._affinity,
'next-trade-id': self._next_trade_id,
'next-alert-id': self._next_alert_id,
'next-region-id': self._next_region_id,
'contexts': contexts_data
}
regions_data = [region.dumps() for region in self._regions]
alerts_data = [alert.dumps() for alert in self._alerts]
return {
'trader-name': trader.name,
'account-name': trader.account.name,
'strategy': self.strategy.identifier,
| |
m
except:
raise
else:
dumpkeys = []
for _,v in porttype.items():
dumpkeys.extend(v.get('portkeys'))
for m in self._dumpkeys(dumpkeys):
yield m
def updatephysicalport(self,name,vhost='',systemid='%',bridge='%',**args):
"update physicalport info that id, return updated info"
if not name:
raise ValueError("must speclial physicalport name")
port = {'name':name,'vhost':vhost,'systemid':systemid,'bridge':bridge}
port.update(args)
for m in self.updatephysicalports([port]):
yield m
def updatephysicalports(self,ports):
"update multi physicalport info that ids, return updated infos"
# ports [{'name':eth0,'vhost':'',systemid:'%'},{.....}]
self._reqid += 1
reqid = ('viperflow',self._reqid)
max_try = 1
fphysicalportkeys = set()
newports = []
typeport = dict()
for port in ports:
port = copy.deepcopy(port)
if 'name' not in port :
raise ValueError("must speclial physicalport name")
if 'physicalnetwork' in port:
raise ValueError("physicalnetwork can not be change")
port.setdefault("vhost","")
port.setdefault("systemid","%")
port.setdefault("bridge","%")
portkey = PhysicalPort.default_key(port.get('vhost'),
port.get('systemid'),port.get('bridge'),port.get('name'))
if portkey not in fphysicalportkeys:
fphysicalportkeys.add(portkey)
else:
raise ValueError("key repeat "+ portkey)
newports.append(port)
fphysicalportkeys = list(fphysicalportkeys)
for m in callAPI(self.app_routine,'objectdb','mget',{'keys':fphysicalportkeys,'requestid':reqid}):
yield m
fphysicalportvalues = self.app_routine.retvalue
if None in fphysicalportvalues:
with watch_context(fphysicalportkeys,fphysicalportvalues,reqid,self.app_routine):
pass
raise ValueError(" physical ports is not existed "+ fphysicalportkeys[fphysicalportvalues.index(None)])
physicalportdict = dict(zip(fphysicalportkeys,fphysicalportvalues))
try:
while True:
for port in newports:
portobj = physicalportdict[PhysicalPort.default_key(port['vhost'],port['systemid'],
port['bridge'],port['name'])]
porttype = portobj.physicalnetwork.type
if porttype not in typeport:
typeport.setdefault(porttype,{"ports":[port]})
else:
typeport.get(porttype).get("ports").append(port)
for k,v in typeport.items():
try:
for m in callAPI(self.app_routine,'public','updatephysicalports',
{"phynettype":k,'ports':v.get('ports')},timeout = 1):
yield m
except:
raise
updater = self.app_routine.retvalue
portkeys = list(set([PhysicalPort.default_key(p.get('vhost'),p.get('systemid'),p.get('bridge'),
p.get('name')) for p in v.get('ports')]))
v["updater"] = updater
v["portkeys"] = portkeys
keys = []
typesortvalues = []
for _,v in typeport.items():
keys.extend(v.get('portkeys'))
typesortvalues.extend(physicalportdict[key] for key in v.get('portkeys'))
def update(keys,values):
start = 0
index = 0
retkeys = []
retvalues = []
for k,v in typeport.items():
typekeys = keys[start:start + len(v.get("portkeys"))]
typevalues = values[start:start + len(v.get("portkeys"))]
if [n.physicalnetwork.getkey() if n is not None else None for n in typevalues] !=\
[n.physicalnetwork.getkey() for n in typesortvalues[index:index + len(v.get('portkeys'))]]:
raise UpdateConflictException
rettypekeys,rettypevalues = v.get('updater')(typekeys,typevalues)
retkeys.extend(rettypekeys)
retvalues.extend(rettypevalues)
start = start + len(v.get('portkeys'))
index = index + len(v.get('portkeys'))
return keys,values
try:
for m in callAPI(self.app_routine,'objectdb','transact',
{"keys":keys,"updater":update}):
yield m
except UpdateConflictException:
max_try -= 1
if max_try < 0:
raise
else:
logger.info(" cause UpdateConflict Exception try once")
continue
except:
raise
else:
break
except:
raise
else:
for m in self._dumpkeys(keys):
yield m
finally:
with watch_context(fphysicalportkeys,fphysicalportvalues,reqid,self.app_routine):
pass
def deletephysicalport(self,name,vhost='',systemid='%',bridge='%'):
"delete physicalport that id, return status OK"
if not name:
raise ValueError("must speclial physicalport name")
port = {'name':name,'vhost':vhost,'systemid':systemid,'bridge':bridge}
for m in self.deletephysicalports([port]):
yield m
def deletephysicalports(self,ports):
"delete physicalports that ids, return status OK"
self._reqid += 1
reqid = ('viperflow',self._reqid)
max_try = 1
typeport = dict()
fphysicalportkeys = set()
newports = []
for port in ports:
port = copy.deepcopy(port)
if 'name' not in port :
raise ValueError("must speclial physicalport name")
port.setdefault('vhost',"")
port.setdefault('systemid',"%")
port.setdefault('bridge',"%")
portkey = PhysicalPort.default_key(port.get('vhost'),port.get("systemid"),
port.get("bridge"),port.get("name"))
if portkey not in fphysicalportkeys:
fphysicalportkeys.add(portkey)
else:
raise ValueError("key repeat "+portkey)
newports.append(port)
fphysicalportkeys = list(fphysicalportkeys)
for m in callAPI(self.app_routine,'objectdb','mget',{'keys':fphysicalportkeys,'requestid':reqid}):
yield m
fphysicalportvalues = self.app_routine.retvalue
if None in fphysicalportvalues:
with watch_context(fphysicalportkeys,fphysicalportvalues,reqid,self.app_routine):
pass
raise ValueError(" physical ports is not existed "+ fphysicalportkeys[fphysicalportvalues.index(None)])
physicalportdict = dict(zip(fphysicalportkeys,fphysicalportvalues))
try:
while True:
for port in newports:
portobj = physicalportdict[PhysicalPort.default_key(port['vhost'],port['systemid'],
port['bridge'],port['name'])]
porttype = portobj.physicalnetwork.type
phynetid = portobj.physicalnetwork.id
port["phynetid"] = phynetid
if porttype not in typeport:
typeport.setdefault(porttype,{"ports":[port]})
else:
typeport.get(porttype).get("ports").append(port)
for k,v in typeport.items():
try:
for m in callAPI(self.app_routine,"public","deletephysicalports",
{"phynettype":k,"ports":v.get("ports")},timeout = 1):
yield m
except:
with watch_context(fphysicalportkeys,fphysicalportvalues,reqid,self.app_routine):
pass
raise
updater = self.app_routine.retvalue
portkeys = [PhysicalPort.default_key(p.get('vhost'),p.get('systemid'),
p.get('bridge'),p.get('name')) for p in v.get('ports')]
phynetkeys = list(set([PhysicalNetwork.default_key(p.get("phynetid")) for p in v.get('ports')]))
phynetmapkeys = list(set([PhysicalNetworkMap.default_key(p.get("phynetid")) for p in v.get('ports')]))
v["updater"] = updater
v["portkeys"] = portkeys
v["phynetmapkeys"] = phynetmapkeys
keys = [PhysicalPortSet.default_key()]
typesortvalues = []
for _,v in typeport.items():
keys.extend(v.get("portkeys"))
keys.extend(v.get("phynetmapkeys"))
typesortvalues.extend(physicalportdict[key] for key in v.get('portkeys'))
def update(keys,values):
start = 1
index = 0
portset = values[0]
retkeys = [keys[0]]
retvalues = [None]
for k,v in typeport.items():
typekeylen = len(v['portkeys']) + len(v['phynetmapkeys'])
sortkeylen = len(v['portkeys'])
if [n.physicalnetwork.getkey() if n is not None else None
for n in values[start:start + sortkeylen]]!=\
[n.physicalnetwork.getkey() for n in typesortvalues[index:index+sortkeylen]]:
raise UpdateConflictException
try:
typeretkeys,typeretvalues = v['updater'](keys[0:1]+keys[start:start+typekeylen],
[portset]+values[start:start+typekeylen])
except:
raise
else:
retkeys.extend(typeretkeys[1:])
retvalues.extend(typeretvalues[1:])
portset = typeretvalues[0]
start = start + typekeylen
index = index + sortkeylen
retvalues[0] = portset
return retkeys,retvalues
try:
for m in callAPI(self.app_routine,"objectdb","transact",
{"keys":keys,"updater":update}):
yield m
except UpdateConflictException:
max_try -= 1
if max_try < 0:
raise
else:
logger.info(" cause UpdateConflict Exception try once")
continue
except:
raise
else:
break
except:
raise
else:
self.app_routine.retvalue = {"status":'OK'}
finally:
with watch_context(fphysicalportkeys,fphysicalportvalues,reqid,self.app_routine):
pass
def listphysicalports(self,name = None,physicalnetwork = None,vhost='',
systemid='%',bridge='%',**args):
"list physicalports info"
def set_walker(key,set,walk,save):
if set is None:
return
for weakobj in set.dataset():
phyportkey = weakobj.getkey()
try:
phyport = walk(phyportkey)
except:
pass
if not physicalnetwork:
if all(getattr(phyport,k,None) == v for k,v in args.items()):
save(phyportkey)
else:
try:
phynet = walk(phyport.physicalnetwork.getkey())
except:
pass
else:
if phynet.id == physicalnetwork:
if all(getattr(phyport,k,None) == v for k,v in args.items()):
save(phyportkey)
def walker_func(set_func):
def walker(key,obj,walk,save):
if obj is None:
return
set_walker(key,set_func(obj),walk,save)
return walker
if not name:
# get all physical port
phyportsetkey = PhysicalPortSet.default_key()
# an unique id used to unwatch
self._reqid += 1
reqid = ('viperflow',self._reqid)
for m in callAPI(self.app_routine,'objectdb','walk',{'keys':[phyportsetkey],
'walkerdict':{phyportsetkey:walker_func(lambda x:x.set)},
'requestid':reqid}):
yield m
keys,values = self.app_routine.retvalue
# dump will get reference
with watch_context(keys,values,reqid,self.app_routine):
self.app_routine.retvalue = [dump(r) for r in values]
else:
phyportkey = PhysicalPort.default_key(vhost,systemid,bridge,name)
for m in self._getkeys([phyportkey]):
yield m
retobj = self.app_routine.retvalue
if len(retobj) == 0 or retobj[0] is None:
self.app_routine.retvalue = []
else:
if all(getattr(retobj,k,None) == v for k,v in args.items()):
self.app_routine.retvalue = dump(retobj)
else:
self.app_routine.retvalue = []
def createlogicalnetwork(self,physicalnetwork,id = None,**kwargs):
"create logicalnetwork info,return creared info"
if not id:
id = str(uuid1())
network = {'physicalnetwork':physicalnetwork,'id':id}
network.update(kwargs)
for m in self.createlogicalnetworks([network]):
yield m
def createlogicalnetworks(self,networks):
"create logicalnetworks info,return creared infos"
# networks [{'physicalnetwork':'id','id':'id' ...},{'physicalnetwork':'id',...}]
idset = set()
phynetkeys = []
newnetworks = []
for network in networks:
network = copy.deepcopy(network)
if 'physicalnetwork' not in network:
raise ValueError("create logicalnet must special physicalnetwork id")
if 'id' in network:
if network['id'] not in idset:
idset.add(network['id'])
else:
raise ValueError("key repeat "+network['id'])
else:
network.setdefault('id',str(uuid1()))
phynetkeys.append(PhysicalNetwork.default_key(network.get('physicalnetwork')))
newnetworks.append(network)
phynetkeys = list(set(phynetkeys))
for m in self._getkeys(phynetkeys):
yield m
phynetvalues = self.app_routine.retvalue
if None in phynetvalues:
raise ValueError("physicalnetwork key not existed " +\
PhysicalNetwork._getIndices(phynetkeys[phynetvalues.index(None)])[1][0])
phynetdict = dict(zip(phynetkeys,phynetvalues))
typenetwork = dict()
for network in newnetworks:
phynetobj = phynetdict[PhysicalNetwork.default_key(network.get('physicalnetwork'))]
phynettype = phynetobj.type
if phynettype not in typenetwork:
typenetwork.setdefault(phynettype,{'networks':[network]})
else:
typenetwork[phynettype]['networks'].append(network)
for k, v in typenetwork.items():
try:
for m in callAPI(self.app_routine,'public','createlogicalnetworks',
{'phynettype':k,'networks':v.get('networks')},timeout = 1):
yield m
except:
raise
updater = self.app_routine.retvalue
lgnetkey = [LogicalNetwork.default_key(n.get('id'))
for n in v.get('networks')]
lgnetmapkey = [LogicalNetworkMap.default_key(n.get('id'))
for n in v.get('networks')]
phynetkey = list(set([PhysicalNetwork.default_key(n.get('physicalnetwork'))
for n in v.get('networks')]))
#
# if we use map default key , to set , it will be disorder with
# phynetkey, so we create map key use set(phynetkey)
#
phynetmapkey = [PhysicalNetworkMap.default_key(PhysicalNetwork.\
_getIndices(n)[1][0]) for n in phynetkey]
v['lgnetkeys'] = lgnetkey
v['lgnetmapkeys'] = lgnetmapkey
#
# will have more logicalnetwork create on one phynet,
# so we should reduce phynetkey , phynetmapkey
#
v['phynetkeys'] = phynetkey
v['phynetmapkeys'] = phynetmapkey
v['updater'] = updater
keys = [LogicalNetworkSet.default_key()]
for _,v in typenetwork.items():
keys.extend(v.get('lgnetkeys'))
keys.extend(v.get('lgnetmapkeys'))
keys.extend(v.get('phynetkeys'))
keys.extend(v.get('phynetmapkeys'))
def updater(keys,values):
retkeys = [keys[0]]
retvalues = [None]
lgnetset = values[0]
start = 1
for k,v in typenetwork.items():
typekeylen = len(v['lgnetkeys']) + len(v['lgnetmapkeys']) +\
len(v['phynetkeys'])+len(v['phynetmapkeys'])
try:
typeretkeys,typeretvalues = v['updater'](keys[0:1]+keys[start:start+typekeylen],
[lgnetset]+values[start:start+typekeylen])
except:
raise
else:
retkeys.extend(typeretkeys[1:])
retvalues.extend(typeretvalues[1:])
lgnetset = typeretvalues[0]
start = start + typekeylen
retvalues[0] = lgnetset
return retkeys,retvalues
try:
for m in callAPI(self.app_routine,'objectdb','transact',
{'keys':keys,'updater':updater}):
yield m
except:
raise
dumpkeys = []
for _,v in typenetwork.items():
dumpkeys.extend(v.get('lgnetkeys'))
for m in self._dumpkeys(dumpkeys):
yield m
def updatelogicalnetwork(self,id,**kwargs):
"update logicalnetwork info that id, return updated info"
# update phynetid is disabled
if not id:
raise ValueError("must special logicalnetwork id")
network = {'id':id}
network.update(kwargs)
for m in self.updatelogicalnetworks([network]):
yield m
def updatelogicalnetworks(self,networks):
"update logicalnetworks info that ids, return updated infos"
#networks [{'id':id,....},{'id':id,....}]
self._reqid += 1
reqid = ('viperflow',self._reqid)
maxtry = 1
flgnetworkkeys = set()
newnetworks = []
for network in networks:
network = copy.deepcopy(network)
key = LogicalNetwork.default_key(network['id'])
if key not in flgnetworkkeys:
flgnetworkkeys.add(key)
else:
raise ValueError("key repeate "+key)
newnetworks.append(network)
flgnetworkkeys = list(flgnetworkkeys)
for m in callAPI(self.app_routine,'objectdb','mget',{'keys':flgnetworkkeys,'requestid':reqid}):
yield m
flgnetworkvalues = self.app_routine.retvalue
if None in flgnetworkvalues:
raise ValueError ("logical net id " + LogicalNetwork._getIndices(flgnetworkkeys[flgnetworkvalues.index(None)])[1][0] + " not existed")
lgnetworkdict = dict(zip(flgnetworkkeys,flgnetworkvalues))
try:
| |
`pandas.DataFrame`
## Example
```python
>>> fred = FRED(api_key='<KEY>')
>>> fred.category_related_tags(category_id=125, tag_names=['services', 'quarterly']).head()
group_id notes created popularity series_count
name
discontinued gen 2012-02-27 16:18:19+00:00 67 4
nsa seas Not Seasonally Adjusted 2012-02-27 16:18:19+00:00 100 6
sa seas Seasonally Adjusted 2012-02-27 16:18:19+00:00 88 6
goods gen 2012-02-27 16:18:19+00:00 68 8
balance gen 2012-02-27 16:18:19+00:00 47 12
```
"""
allowed_orders = [
enums.OrderBy.series_count,
enums.OrderBy.popularity,
enums.OrderBy.created,
enums.OrderBy.name,
enums.OrderBy.group_id
]
if order_by not in allowed_orders:
raise ValueError('Variable order_by ({}) is not one of the values: {}'.format(order_by, ', '.join(map(str, allowed_orders))))
if realtime_start is not None and realtime_start < date(1776, 7, 4):
raise ValueError('Variable realtime_start ("{}") is before min date 1776-07-04.'.format(realtime_start))
if realtime_start is not None and realtime_end is not None and realtime_start > realtime_end:
raise ValueError('The date set by variable realtime_start ("{}") can not be after the date set by variable realtime_end ("{}").'.format(realtime_start, realtime_end))
df = pd.DataFrame(
self._client.get(
'/fred/category/related_tags',
'tags',
limit=1000,
category_id=category_id,
realtime_start=realtime_start,
realtime_end=realtime_end,
tag_names=tag_names,
exclude_tag_names=exclude_tag_names,
tag_group_id=tag_group_id,
search_text=search_text,
order_by=order_by,
sort_order=sort_order
)
)
if not df.empty:
df.created = pd.to_datetime(df.created + '00', utc=True, format='%Y-%m-%d %H:%M:%S%z')
df = df.astype(dtype={
'name': 'string',
'notes': 'string',
'group_id': 'category'
}).set_index('name')
return df
"""
Releases
https://fred.stlouisfed.org/releases
"""
def releases(
self,
realtime_start: date = None,
realtime_end: date = None,
order_by: enums.OrderBy = enums.OrderBy.release_id,
sort_order: enums.SortOrder = enums.SortOrder.asc
) -> pd.DataFrame:
"""
## Parameters
`realtime_start`
The start of the real-time period. For more information, see [Real-Time Periods](https://fred.stlouisfed.org/docs/api/fred/realtime_period.html).
`realtime_end`
The end of the real-time period. For more information, see [Real-Time Periods](https://fred.stlouisfed.org/docs/api/fred/realtime_period.html).
`order_by`
Order results by values of the specified attribute.
`sort_order`
Sort results is ascending or descending order for attribute values specified by order_by.
## Description
https://fred.stlouisfed.org/docs/api/fred/releases.html
Get all releases of economic data.
## API Request (HTTPS GET)
https://api.stlouisfed.org/fred/releases?api_key=abcdefghijklmnopqrstuvwxyz123456&file_type=json
## API Response
```json
{
"realtime_start": "2013-08-13",
"realtime_end": "2013-08-13",
"order_by": "release_id",
"sort_order": "asc",
"count": 158,
"offset": 0,
"limit": 1000,
"releases": [
{
"id": 9,
"realtime_start": "2013-08-13",
"realtime_end": "2013-08-13",
"name": "Advance Monthly Sales for Retail and Food Services",
"press_release": true,
"link": "http://www.census.gov/retail/"
},
...
]
}
```
## Returns
`pandas.DataFrame`
## Example
```python
>>> fred = FRED(api_key='<KEY>')
>>> fred.releases().head()
realtime_start realtime_end name press_release link notes
id
9 2022-02-05 2022-02-05 Advance Monthly Sales for Retail and Food Serv... True http://www.census.gov/retail/ The U.S. Census Bureau conducts the Advance Mo...
10 2022-02-05 2022-02-05 Consumer Price Index True http://www.bls.gov/cpi/ <NA>
11 2022-02-05 2022-02-05 Employment Cost Index True http://www.bls.gov/ncs/ect/ <NA>
13 2022-02-05 2022-02-05 G.17 Industrial Production and Capacity Utiliz... True http://www.federalreserve.gov/releases/g17/ <NA>
14 2022-02-05 2022-02-05 G.19 Consumer Credit True http://www.federalreserve.gov/releases/g19/ <NA>
```
"""
allowed_orders = [
enums.OrderBy.release_id,
enums.OrderBy.name,
enums.OrderBy.press_release,
enums.OrderBy.realtime_start,
enums.OrderBy.realtime_end
]
if order_by not in allowed_orders:
raise ValueError('Variable order_by ({}) is not one of the values: {}'.format(order_by, ', '.join(map(str, allowed_orders))))
if realtime_start is not None and realtime_start < date(1776, 7, 4):
raise ValueError('Variable realtime_start ("{}") is before min date 1776-07-04.'.format(realtime_start))
if realtime_start is not None and realtime_end is not None and realtime_start > realtime_end:
raise ValueError('The date set by variable realtime_start ("{}") can not be after the date set by variable realtime_end ("{}").'.format(realtime_start, realtime_end))
df = pd.DataFrame(
self._client.get(
'/fred/releases',
'releases',
limit=1000,
realtime_start=realtime_start,
realtime_end=realtime_end,
order_by=order_by,
sort_order=sort_order
)
)
date_columns = ['realtime_start', 'realtime_end']
if not df.empty:
df[date_columns] = df[date_columns].apply(pd.to_datetime, format='%Y-%m-%d')
df = df.astype(dtype={
'name': 'string',
'link': 'string',
'notes': 'string',
'press_release': 'bool'
}).set_index('id')
return df
def releases_dates(
self,
realtime_start: date = None,
realtime_end: date = None,
order_by: enums.OrderBy = enums.OrderBy.release_id,
sort_order: enums.SortOrder = enums.SortOrder.desc,
include_release_dates_with_no_data: bool = False
) -> pd.DataFrame:
"""
## Parameters
`realtime_start`
The start of the real-time period. For more information, see [Real-Time Periods](https://fred.stlouisfed.org/docs/api/fred/realtime_period.html).
`realtime_end`
The end of the real-time period. For more information, see [Real-Time Periods](https://fred.stlouisfed.org/docs/api/fred/realtime_period.html).
`order_by`
Order results by values of the specified attribute.
`sort_order`
Sort results is ascending or descending order for attribute values specified by order_by.
`include_release_dates_with_no_data`
Determines whether release dates with no data available are returned.
The defalut value 'false' excludes release dates that do not have data.
In particular, this excludes future release dates which may be available in the FRED release calendar or the ALFRED release calendar.
If include_release_dates_with_no_data is set to true, the XML tag release_date has an extra attribute release_last_updated that can be compared to the release date to determine if data has been updated.
## Description
https://fred.stlouisfed.org/docs/api/fred/releases_dates.html
Get release dates for all releases of economic data.
Note that release dates are published by data sources and do not necessarily represent when data will be available on the FRED or ALFRED websites.
## API Request (HTTPS GET)
https://api.stlouisfed.org/fred/releases/dates?api_key=abcdefghijklmnopqrstuvwxyz123456&file_type=json
## API Response
```json
{
"realtime_start": "2013-01-01",
"realtime_end": "9999-12-31",
"order_by": "release_date",
"sort_order": "desc",
"count": 1129,
"offset": 0,
"limit": 1000,
"release_dates": [
{
"release_id": 9,
"release_name": "Advance Monthly Sales for Retail and Food Services",
"date": "2013-08-13"
},
...
]
}
```
## Returns
`pandas.DataFrame`
## Example
```python
>>> fred = FRED(api_key='<KEY>')
>>> fred.releases_dates(realtime_start=date.today() - timedelta(days=1)).head()
release_name date
release_id
502 Euro Short Term Rate 2022-02-04
492 SONIA Interest Rate Benchmark 2022-02-04
484 Key ECB Interest Rates 2022-02-04
483 SOFR Averages and Index Data 2022-02-04
469 State Unemployment Insurance Weekly Claims Report 2022-02-04
```
"""
allowed_orders = [
enums.OrderBy.release_date,
enums.OrderBy.release_id,
enums.OrderBy.release_name,
]
if order_by not in allowed_orders:
raise ValueError('Variable order_by ({}) is not one of the values: {}'.format(order_by, ', '.join(map(str, allowed_orders))))
if realtime_start is not None and realtime_start < date(1776, 7, 4):
raise ValueError('Variable realtime_start ("{}") is before min date 1776-07-04.'.format(realtime_start))
if realtime_start is not None and realtime_end is not None and realtime_start > realtime_end:
raise ValueError('The date set by variable realtime_start ("{}") can not be after the date set by variable realtime_end ("{}").'.format(realtime_start, realtime_end))
df = pd.DataFrame(
self._client.get(
'/fred/releases/dates',
'release_dates',
limit=1000,
realtime_start=realtime_start,
realtime_end=realtime_end,
order_by=order_by,
sort_order=sort_order,
include_release_dates_with_no_data=include_release_dates_with_no_data
)
)
if not df.empty:
df.date = pd.to_datetime(df.date, format='%Y-%m-%d')
df = df.astype(dtype={
'release_name': 'string'
}).set_index('release_id')
return df
def release(self, release_id: int, realtime_start: date = None, realtime_end: date = None) -> models.Release:
"""
## Parameters
`release_id`
The id for a release.
`realtime_start`
The start of the real-time period. For more information, see [Real-Time Periods](https://fred.stlouisfed.org/docs/api/fred/realtime_period.html).
`realtime_end`
The end of the real-time period. For more information, see [Real-Time Periods](https://fred.stlouisfed.org/docs/api/fred/realtime_period.html).
## Description
https://fred.stlouisfed.org/docs/api/fred/release.html
Get a release of economic data.
## API Request (HTTPS GET)
https://api.stlouisfed.org/fred/release?release_id=53&api_key=abcdefghijklmnopqrstuvwxyz123456&file_type=json
## API Response
```json
{
"realtime_start": "2013-08-14",
"realtime_end": "2013-08-14",
"releases": [
{
"id": 53,
"realtime_start": "2013-08-14",
"realtime_end": "2013-08-14",
"name": "Gross Domestic Product",
"press_release": true,
"link": "http://www.bea.gov/national/index.htm"
}
]
};
```
## Returns
`pystlouisfed.models.Release`
## Example
```python
>>> fred = FRED(api_key='<KEY>')
>>> fred.release(release_id=53)
Release(id=53, realtime_start=datetime.date(2022, 1, 14), realtime_end=datetime.date(2022, 1, 14), name='Gross Domestic Product', press_release=True, link='https://www.bea.gov/data/gdp/gross-domestic-product')
```
"""
if int(release_id) <= 0:
raise ValueError('Variable release_id is not 0 or a positive integer.')
if realtime_start is not None and realtime_start < date(1776, 7, 4):
raise ValueError('Variable realtime_start ("{}") is before min date 1776-07-04.'.format(realtime_start))
if realtime_start is not None and realtime_end is not None and realtime_start > realtime_end:
raise ValueError('The date set by variable realtime_start ("{}") can not be after the date set by variable realtime_end ("{}").'.format(realtime_start, realtime_end))
data = self._client.get(
'/fred/release',
'releases',
release_id=release_id,
realtime_start=realtime_start,
realtime_end=realtime_end,
)
return models.Release(**data[0])
def release_dates(
self,
release_id: int,
realtime_start: date = date(1776, 7, 4),
realtime_end: date = date(9999, 12, 31),
sort_order: enums.SortOrder = enums.SortOrder.asc,
include_release_dates_with_no_data: bool = False
) -> pd.DataFrame:
"""
## Parameters
`release_id`
The id for a release.
`realtime_start`
The start of the real-time period. For more information, see [Real-Time Periods](https://fred.stlouisfed.org/docs/api/fred/realtime_period.html).
`realtime_end`
The end of the real-time period. For more information, see [Real-Time Periods](https://fred.stlouisfed.org/docs/api/fred/realtime_period.html).
`order_by`
Order results by values of the specified attribute.
`sort_order`
Sort results is ascending or descending order for attribute values specified by order_by.
## Description
https://fred.stlouisfed.org/docs/api/fred/release_dates.html
Get release dates for a release of economic data.
## API Request (HTTPS GET)
https://api.stlouisfed.org/fred/release/dates?release_id=82&api_key=abcdefghijklmnopqrstuvwxyz123456&file_type=json
## API Response
```json
{
"realtime_start": "1776-07-04",
"realtime_end": "9999-12-31",
"order_by": "release_date",
"sort_order": "asc",
"count": 17,
"offset": 0,
"limit": 10000,
"release_dates": [
{
"release_id": 82,
"date": "1997-02-10"
},
...
]
}
```
## Returns
`pandas.DataFrame`
## Example
```python
>>> fred = FRED(api_key='<KEY>')
>>> fred.release_dates(release_id=82).head()
release_id date
0 82 1997-02-10
1 82 1998-02-10
2 82 1999-02-04
3 82 2000-02-10
4 82 2001-01-16
```
"""
if | |
import discord
import aiohttp
import asyncio
import json
import bs4
from bs4 import BeautifulSoup
from __main__ import send_cmd_help
from .utils import chat_formatting as chat
from discord.ext import commands
IMAGE_SEARCH = 'http://www.dnd.beyond.com/{}?filter-search={}'
schema={
'spells':(
{'id':'string'},
{'index':'int'},
{'name':'string'},
{'level':'int'},
{'casting_time':'string'},
{'range':'string'},
{'components':'list'},
{'duration':'string'},
{'school':'dict'},
{'desc':'list'},
{'higher_level':'list'},
{'material':'string'},
{'ritual':'string'},
{'concentration':'string'},
{'classes':'listdict'},
{'subclasses':'listdict'},
{'page':'string'},),
'equipment':(
{'id':'string'},
{'index':'int'},
{'name':'string'},
{'cost':'dict'},
{'damage':'dict'},
{'weight':'int'},
{'properties':'listdict'},
{'desc':'list'},
{'subtype':''},
{'type':'dict'}, # monster type is string
{'equipment_category':'string'},
{'gear_category':'string'},
{'armor_category':'string'},
{'armor_class':'dict'},
{'str_minimum':'int'},
{'stealth_disadvantage':'string'}, #really boolean
{'weapon_category':'string'},
{'weapon_range':'string'},
{'category_range':'string'},),
'classes':(
{'id':'string'},
{'index':'int'},
{'name':'string'},
{'hit_die':'int'},
{'proficiency_choices':'listdict'},
{'proficiencies':'listdict'},
{'starting_equipment':'dict'},
{'saving_throws':'listdict'},
{'class_levels':'dict'},
{'subclasses':'listdict'},
{'features':'listdict'},
{'spellcasting':'dict'},
{'url':'string'},),
'subclasses':(
{'id':'string'},
{'index':'int'},
{'name':'string'},
{'class':'dict'},
{'subclass_flavor':'string'},
{'desc':'string'},
{'features':'listdict'},),
'monsters':(
{'id':'string'},
{'index':'int'},
{'name':'string'},
{'size':'string'},
{'type':'string'},
{'subtype':'string'},
{'allignment':'string'},
{'strength':'int'},
{'dexterity':'int'},
{'constitution':'int'},
{'intelligence':'int'},
{'wisdom':'int'},
{'charisma':'int'},
{'challenge_rating':'int'},
{'armor_class':'int'},
{'hit_points':'int'},
{'hit_dice':'string'},
{'speed':'string'},
{'dexterity_save':'int'},
{'constitution_save':'int'},
{'wisdom_save':'int'},
{'charisma_save':'int'},
{'perception':'int'},
{'stealth':'int'},
{'damage_vulnerabilities':'string'},
{'damage_resistances':'string'},
{'damage_immunities':'string'},
{'condition_immunities':'string'},
{'senses':'string'},
{'languages':'string'},
{'special_abilities':'listdict'},
{'actions':'listdict'},
{'legendary_actions':'listdict'},
),
'features':(
{'id':'string'},
{'index':'int'},
{'name':'string'},
{'level':'int'},
{'desc':'list'},
{'class':'dict'},
),
'skills':(
{'id':'string'},
{'index':'int'},
{'name':'string'},
{'desc':'list'},
{'ability_score':'dict'},
{'url':'string'},),
'proficiencies':(
{'id':'string'},
{'index':'int'},
{'name':'string'},
{'classes':'listdict'},
{'races':'listdict'},
{'url':'string'},),
'languages': (
{'id':'string'},
{'index':'int'},
{'name':'string'},
{'type':'string'},
{'typical_speakers':'list'},
{'script':'string'},
{'url':'string'},),
'spellcasting':(
{'id':'string'},
{'index':'int'},
{'spellcasting_ability':'dict'},
{'info':'listdict'},
{'url':'string'},
{'class':'dict'},),
'startingequipment':(
{'id':'string'},
{'index':'int'},
{'name':'string'},
{'starting_equipment':'listdict'},
{'choices_to_make':'int'},
{'choice_1':'listdict'},
{'choice_2':'listdict'},
{'url':'string'},
{'class':'dict'},),
'levels':(
{'id':'string'},
{'index':'int'},
{'level':'int'},
{'ability_score_bonuses':'int'},
{'prof_bonus':'int'},
{'feature_choices':'listdict'},
{'features':'listdict'},
{'spellcasting':'object'},
{'class_specific':'object'},
{'class':'dict'},
{'url':'string'},),
'races':(
{'id':'string'},
{'index':'int'},
{'name':'string'},
{'speed':'int'},
{'ability_bonuses':'list'},
{'allignment':'string'},
{'age':'string'},
{'size':'medium'},
{'size_description':'string'},
{'starting_proficiencies':'listdict'},
{'languages':'listdict'},
{'language_desc':'string'},
{'traits':'listdict'},
{'subraces':'listdict'},
{'url':'string'},),
'subraces':(
{'id':'string'},
{'index':'int'},
{'name':'string'},
{'race':'dict'},
{'desc':'string'},
{'ability_bonuses':'list'},
{'starting_proficiencies':'listdct'},
{'languages':'listdict'},
{'racial_traits':'listdict'},),
}
DEFAULTCOLOR=discord.Color.default()
COLORS = {
'spells' : discord.Color.purple(),
'equipment': discord.Color.blue(),
# 'starting-equipment':discord.Color.blue(),
# 'spellcasting':discord.Color.purple(),
'monsters' : discord.Color.red(),
'classes' : discord.Color.orange(),
# 'subclasses':discord.Color.(0xf29214),
'features': discord.Color.orange(),
# 'levels':discord.Color.(0xf29214),
'races': discord.Color.orange(),
# 'subraces':discord.Color.discord.Color.(0xf29214),
# 'traits':discord.Color.(0xf29214),
# 'ability-scores': discord.Color.(0xf29214),
# 'skills' : discord.Color.(0xf29214),
# 'proficiencies' : discord.Color.(0xf29214),
# 'languages': discord.Color.(0xf29214),
}
BASEURL = 'http://dnd5eapi.co/api/'
SELECTION = 'Enter selection for more {}information.'
class DND:
'''D&D Lookup Stuff'''
def __init__(self, bot):
self.bot = bot
@commands.group(pass_context=True)
async def dnd(self, ctx):
if ctx.invoked_subcommand is None:
await send_cmd_help(ctx)
return
@dnd.command(name='spells', pass_context=True)
async def lookup_spells(self, ctx, *, search=None):
'''Lookup Spells'''
CATEGORY = 'Spells'
await self._process_category(ctx, search, CATEGORY)
@dnd.command(name='features', pass_context=True)
async def lookup_features(self, ctx, *, search=None):
'''Lookup Features'''
CATEGORY = 'Features'
await self._process_category(ctx, search, CATEGORY)
@dnd.command(name='classes', pass_context=True)
async def lookup_classes(self, ctx, *, search=None):
'''Lookup classes'''
CATEGORY = 'classes'
await self._process_category(ctx, search, CATEGORY)
@dnd.command(name='monsters', pass_context=True)
async def lookup_monsters(self, ctx, *, search=None):
'''Lookup Monsters'''
CATEGORY = 'Monsters'
await self._process_category(ctx, search, CATEGORY)
@dnd.command(name='equipment', pass_context=True)
async def lookup_equipment(self, ctx, *, search=None):
'''Lookup equipment'''
CATEGORY = 'equipment'
await self._process_category(ctx, search, CATEGORY)
async def _process_category(self, ctx, search=None, CATEGORY=None):
if search is None:
url = '{}{}'.format(BASEURL, CATEGORY)
print(url)
menu_pages = await self._present_list(url, CATEGORY.lower())
if menu_pages is not None:
# await self.bot.say('Press ⏺ to select:')
await self.pages_menu(ctx=ctx, embed_list=menu_pages, category=CATEGORY, message=None, page=0, timeout=30, choice=True)
else:
print('error - no menu pages')
elif search.isnumeric():
url = '{}{}/{}'.format(BASEURL,CATEGORY.lower(),search)
print(url)
# await self.bot.say('{} search: <{}>'.format(CATEGORY, url))
await self._process_item(ctx=ctx,url=url,category=CATEGORY)
# except:
else:
if ' ' in search:
search = search.replace(' ', '+')
search = search.replace(' ','+')
url = '{}{}/?name={}'.format(BASEURL, CATEGORY.lower(), search)
print(url)
json_file = await _get_file(url)
await self.bot.say('{} search: <{}>'.format(CATEGORY, json_file['results'][0]['url']))
async def pages_menu(self, ctx, embed_list: list, category: str='', message: discord.Message=None, page=0, timeout: int=30, choice=False):
"""menu control logic for this taken from
https://github.com/Lunar-Dust/Dusty-Cogs/blob/master/menu/menu.py"""
print('list len = {}'.format(len(embed_list)))
length = len(embed_list)
em = embed_list[page]
if not message:
message = await self.bot.say(embed=em)
if length > 5:
await self.bot.add_reaction(message, '\N{BLACK LEFT-POINTING DOUBLE TRIANGLE}')
await self.bot.add_reaction(message, '\N{BLACK LEFT-POINTING TRIANGLE}')
if choice is True:
await self.bot.add_reaction(message,'\N{}')
await self.bot.add_reaction(message, '\N{CROSS MARK}')
await self.bot.add_reaction(message, '\N{BLACK RIGHT-POINTING TRIANGLE}')
if length > 5:
await self.bot.add_reaction(message, '\N{BLACK RIGHT-POINTING DOUBLE TRIANGLE}')
else:
message = await self.bot.edit_message(message, embed=em)
await asyncio.sleep(1)
react = await self.bot.wait_for_reaction(message=message, timeout=timeout,emoji=['\N{BLACK RIGHT-POINTING TRIANGLE}',
'\N{BLACK LEFT-POINTING TRIANGLE}',
'\N{CROSS MARK}',
'\N{BLACK LEFT-POINTING DOUBLE TRIANGLE}',
'\N{BLACK RIGHT-POINTING DOUBLE TRIANGLE}',
'\N{SQUARED OK}'])
# if react.reaction.me == self.bot.user:
# react = await self.bot.wait_for_reaction(message=message, timeout=timeout,emoji=['\N{BLACK RIGHT-POINTING TRIANGLE}', '\N{BLACK LEFT-POINTING TRIANGLE}', '\N{CROSS MARK}', '\N{BLACK LEFT-POINTING DOUBLE TRIANGLE}', '\N{BLACK RIGHT-POINTING DOUBLE TRIANGLE}','\N{ANTICLOCKWISE DOWNWARDS AND UPWARDS OPEN CIRCLE ARROWS}'])
if react is None:
try:
try:
await self.bot.clear_reactions(message)
except:
await self.bot.remove_reaction(message,'\N{BLACK LEFT-POINTING DOUBLE TRIANGLE}', self.bot.user) #rewind
await self.bot.remove_reaction(message, '\N{BLACK LEFT-POINTING TRIANGLE}', self.bot.user) #previous_page
await self.bot.remove_reaction(message, '\N{CROSS MARK}', self.bot.user) # Cancel
await self.bot.remove_reaction(message,'\N{SQUARED OK}',self.bot.user) #choose
await self.bot.remove_reaction(message, '\N{BLACK RIGHT-POINTING TRIANGLE}', self.bot.user) #next_page
await self.bot.remove_reaction(message,'\N{BLACK RIGHT-POINTING DOUBLE TRIANGLE}', self.bot.user) # fast_forward
except:
pass
return None
elif react is not None:
# react = react.reaction.emoji
if react.reaction.emoji == '\N{BLACK RIGHT-POINTING TRIANGLE}': #next_page
next_page = (page + 1) % len(embed_list)
# await self.bot.remove_reaction(message, '▶', react.reaction.message.author)
return await self.pages_menu(ctx, embed_list, message=message, page=next_page, timeout=timeout)
elif react.reaction.emoji == '\N{BLACK LEFT-POINTING TRIANGLE}': #previous_page
next_page = (page - 1) % len(embed_list)
# await self.bot.remove_reaction(message, '⬅', react.reaction.message.author)
return await self.pages_menu(ctx, embed_list, message=message, page=next_page, timeout=timeout)
elif react.reaction.emoji == '\N{BLACK LEFT-POINTING DOUBLE TRIANGLE}': #rewind
next_page = (page - 5) % len(embed_list)
# await self.bot.remove_reaction(message, '⏪', react.reaction.message.author)
return await self.pages_menu(ctx, embed_list, message=message, page=next_page, timeout=timeout)
elif react.reaction.emoji == '\N{BLACK RIGHT-POINTING DOUBLE TRIANGLE}': # fast_forward
next_page = (page + 5) % len(embed_list)
# await self.bot.remove_reaction(message, '⬅', react.reaction.message.author)
return await self.pages_menu(ctx, embed_list, message=message, page=next_page, timeout=timeout)
elif react.reaction.emoji == '\N{SQUARED OK}': #choose
if choice is True:
# await self.bot.remove_reaction(message, '⏩', react.reaction.message.author)
prompt = await self.bot.say(SELECTION.format(category+' '))
answer = await self.bot.wait_for_message(timeout=10, author=ctx.message.author)
if answer is not None:
await self.bot.delete_message(prompt)
prompt = await self.bot.say('Process choice : {}'.format(answer.content.lower().strip()))
url = '{}{}/{}'.format(BASEURL,category,answer.content.lower().strip())
await self._process_item(ctx, url=url, category=category)
await self.bot.delete_message(prompt)
else:
pass
else:
try:
return await self.bot.delete_message(message)
except:
pass
async def _process_item(self, ctx=None, url=None, category=None):
print('process_item')
if category.lower() in COLORS:
COLOR = COLORS[category.lower()]
else:
COLOR = discord.Color.default()
json_file = await _get_file(url)
if 'count' in json_file:
await self._process_category(ctx=ctx, url=url, category=category)
else:
keys = json_file.keys()
messages = []
if 'count' in json_file: # Present list
menu_pages = await _present_list(self, url, CATEGORY)
if menu_pages is not None:
await self.pages_menu(ctx, menu_pages, CATEGORY, message=None, page=0, timeout=30)
else:
print('menu_pages is None')
# elif category.lower() in COLORS: #process endpoint
# category=category.lower()
img_available = ['monsters', 'equipment',]
embeds = []
em = discord.Embed(color=COLOR,title=json_file['name'])
if category in img_available:
name = json_file['name']
if category == 'equipment':
gettype = json_file['equipment_category']
else:
gettype = json_file['type']
try:
em.set_image(url=await self.image_search(category.lower(),name.lower(),gettype))
except:
print('cannot find image')
##
said = await self.bot.say(embed=em)
messages.append(said)
# for key in keys:
# if key not in {'_id','index','name','desc','actions','legendary_actions', 'higher_level'}:
# key2 = key.replace('_',' ').title()
# if json_file[key] is not None or json_file[key] != '':
# if isinstance(json_file[key],list):
# try:
# em.add_field(name=key2,value='\n'.join(j['name'] for j in json_file[key]))
# except:
# em.add_field(name=key2,value='\n'.join(j for j in json_file[key]))
# elif isinstance(json_file[key],tuple):
# try:
# em.add_field(name=key2,value='\n'.join(j['name'] for j in json_file[key]))
# except:
# em.add_field(name=key2,value='\n'.join(j for j in json_file[key]))
# elif isinstance(json_file[key],dict):
# em.add_field(name=key2,value=json_file[key]['name'])
# elif isinstance(json_file[key],str):
# em.add_field(name=key2,value=json_file[key])
# elif isinstance(json_file[key],int):
# em.add_field(name=key2,value=json_file[key])
# else:
# em.add_field(name=key2,value='something else detected')
# embeds.append(em)
listkeys = ('desc')
dictkeys = ('cost', 'damage', 'range' '2h_damage','armor_class')
for key in ('desc', 'actions','legendary_actions', 'higher_level'):
if key in keys:
if isinstance(json_file[key], list):
desc_pages = chat.pagify('\n'.join(json_file[key]), delims=['\n\n'], escape=True, shorten_by=8, page_length=1980)
embed_list = []
i = 0
for page in desc_pages:
if i == 0:
embeds.append(discord.Embed(color=COLORS[category],title=key.replace('_',' ').title(),description=page))
else:
em = discord.Embed(color=COLORS[category],title='',description=page)
embeds.append(em)
i+=1
elif isinstance(json_file[key],dict):
desc_pages = chat.pagify('\n'.join(json_file[key]['desc']), delims=['\n\n'], escape=True, shorten_by=8, page_length=1000)
embed_list = []
i = 0
for page in desc_pages:
if i == 0:
em = discord.Embed(color=COLORS[category],title=key.replace('_',' ').title(),description='')
keys2 = json_file[key].keys()
for k in keys2:
if k != 'desc':
em.add_field(name=k.replace('_',' ').title(),value=json_file[key][k2])
embeds.append(em)
else:
em = discord.Embed(color=COLORS[category],title='',description=page)
embeds.append(em)
i+=1
for em in embeds:
said = await self.bot.say(embed=em)
messages.append(said)
last = len(messages)-1
await self.bot.add_reaction(messages[last], "❌")
react = await self.bot.wait_for_reaction(message=messages[last], user=ctx.message.author, timeout=30, emoji=["❌"])
if react == '❌':
try:
return await self.bot.delete_message(message)
except:
pass
async def _present_list(self, url, category):
'''count = number of list items
results = list of (name, url)'''
print(url)
json_file = await _get_file(url)
length = int(json_file['count'])-1
if json_file is not None:
results = json_file['results']
package = []
for r in results:
name = r['name']
link = r['url'].rsplit('/',1)[1]
package.append('{} {}'.format(link, name))
pages = chat.pagify('\n'.join(package), delims=['\n'], escape=True, shorten_by=8, page_length=350)
menu_pages = []
for page in pages:
em=discord.Embed(color=COLORS[category.lower()], title=category.title(), description=chat.box(page))
em.add_field(name='To select',value='Press OK')
em.set_footer(text='From dnd5eapi.co',icon_url='http://www.dnd5eapi.co/public/favicon.ico')
menu_pages.append(em)
print(len(menu_pages))
return menu_pages
else:
print('json_file returned empty')
return None
async def _get_file(url):
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
print('_get_file('+url+')')
json_file = await response.json()
if json_file is not None:
return json_file
async def image_search(self,category,name,gettype):
plus_name = name.replace(' ','+')
url = IMAGE_SEARCH.format(category,plus_name)
try:
async with aiohttp.get(url) as response:
| |
'''
alyE5071B
Created on Jun, 2015
@author: bmates1
'''
import gpib_instrument
import numpy as np
import pickle
import pylab
import math
import time
class alyE5071B(gpib_instrument.Gpib_Instrument):
'''
The network analyzer Agilent E5071B GPIB communication class
'''
def __init__(self, pad, board_number = 0, name = '', sad = 0, timeout = 13, send_eoi = 1, eos_mode = 0):
'''Constructor The PAD (Primary GPIB Address) is the only required parameter '''
super(alyE5071B, self).__init__(board_number, name, pad, sad, timeout, send_eoi, eos_mode)
# GPIB identity string of the instrument
self.id_string = "HEWLETT PACKARD,E8071B"
self.manufacturer = 'Agilent'
self.model_number = 'E5071B'
self.description = 'Network Analyzer'
self.allowedpts = [3,11,21,26,51,101,201,401,801,1601] # allowed number of x-axis bins for VNA
self.voltage = None
self.twait = 1.0
# Helper functions ---------------------------------------------------------------------------------
def savePickle(self,data,filename):
''' save python data 'd' to a picklefile with filename 'filename.pkl' '''
filename=filename+'.pkl'
print 'writing data to file: ',filename
f=open(filename,'w')
pickle.dump(data,f)
f.close()
def s21mag(self,a_real,a_imag):
return 20*np.log10(np.sqrt(a_real**2+a_imag**2))
def makeFrequencyArray(self,fi,numpts,span):
''' return the frequency array given start frequency fi,
the number of points (numpts), and the frequency span
'''
return fi+np.arange(numpts)*span/float(numpts-1)
def makeTimeArray(self,numpts,sweeptime):
return np.arange(numpts)*sweeptime/float(numpts-1)
def findClosestNpts(self,N):
''' Number of points per window can only take discrete values self.allowedpts. Find
closest number greater than N within self.allowedpts
'''
if N>1601:
print 'Warning Npts max = 1601. returning N=1601.'
Npts = 1601
else:
XX = np.array(self.allowedpts) - N
dex = np.argmin(np.abs(XX))
if XX[dex] < 0:
Npts = self.allowedpts[dex+1]
else:
Npts = self.allowedpts[dex]
return Npts
# Get functions -----------------------------------------------------------------------------------
def getFrequencyArray(self):
''' returns the current frequency array (in MHz) by asking the VNA for the sweep parameters '''
return self.makeFrequencyArray(fi=self.getStartFrequency(),numpts=self.getPoints(),span=self.getSpan())/1.e6
def getPower(self):
ret_str = self.ask(':SOUR1:POW?')
print ret_str
return float(ret_str)
def getSpan(self):
'gets the frequency span in Hz'
F=self.askFloat(':SENS1:FREQ:SPAN?')
return F
def getMarkerVal(self):
'returns freq (Hz) and Power (dB) of marker'
f = self.askFloat(':CALC1:MARK1:X?')
dB = self.askFloat(':CALC1:MARK1:Y?')
return [dB,f]
def getCWfreq(self):
'gets the center frequency (in MHz) in continuous wave mode'
F=self.askFloat(':SENS1:FREQ?')/1.e6
return F
def getPoints(self):
'get the number of points in the trace'
N=self.askFloat(':SENS1:SWE:POIN?')
return int(N)
def getNumPoints(self):
'get the number of points in the trace'
N=self.askFloat(':SENS1:SWE:POIN?')
return int(N)
def getAverages(self):
'get the number of traces being averaged'
N=self.askFloat(':SENS1:AVER:COUN?')
return int(N)
def getStartFrequency(self):
return float(self.ask(':SENS1:FREQ:STAR?'))
def getCenterFrequency(self):
return float(self.ask(':SENS1:FREQ:CENT?'))
def getFrequencySweepParameters(self):
print 'To be written!!'
return False
def getSweepTime(self):
return float(self.ask(':SENS1:SWE:TIME?'))
def getSweepTimeArray(self):
N = self.getPoints()
st = self.getSweepTime()
return self.makeTimeArray(N,st)
# def getMeasurement(self,D=100000):
# self.write('SING')
# return self.getComplexTrace(D)
def getTrace(self):
''' transfer the current trace to the computer using the ascii protocol
output: N x 2 numpy array, 0th column is real, 1st column is imaginary
'''
Npts = self.getPoints()
twait = self.getAverages() * self.getSweepTime() * 1.01
self.write(':ABOR') # Abort running measurements
# Configure single-shot averaging operation
self.write(':INIT1:CONT OFF') # Suspend continuous operation
self.write(':SENS1:AVER ON') # Enable averaging (even if averfact = 1)
self.write(':TRIG:AVER ON') # Lets the trigger start the average
self.write(':INIT1:CONT ON') # Restores continuous operation
self.write(':TRIG:SOUR BUS') # Waits for software trigger
self.write(':TRIG:SING') # Sends software trigger
# Take the data
time.sleep(twait) # Pause for approximate duration of meaurement
self.ask('*OPC?') # Wait until measurement is complete
# Capture the data
self.write(':FORM:DATA ASC') # Set data to ASCII mode
self.write(':CALC1:DATA:SDAT?') # Ask for the acquired trace
res = self.rawread(Npts*40) # Read trace data (40 bytes per frequency bin)
# Process the data
valuestrings = res.split(',') # Split the data into frequency bins
N = len(valuestrings)/2
if N != Npts: # Check for the right number of frequency bins
print 'WARNING: number of output points does not match the current number of frequency bins!'
result = np.zeros((N,2))
for n in range(N): # For each frequency bin
# XX = res[n].split(',') # split the S-parameter into real and imaginary
result[n,0]=float(valuestrings[2*n])
result[n,1]=float(valuestrings[2*n + 1])
# res = res.split('\n')[:-1] # Split the data into frequency bins
# N = len(res)
# if N != Npts: # Check for the right number of frequency bins
# print 'WARNING: number of output points does not match the current number of frequency bins!'
# result = np.zeros((N,2))
# for n in range(N): # For each frequency bin
# XX = res[n].split(',') # split the S-parameter into real and imaginary
# result[n,0]=float(XX[0])
# result[n,1]=float(XX[1])
return result
def getS21(self):
data = self.getTrace()
data_r = data[:,0]
data_i = data[:,1]
s21 = data_r + 1j*data_i
return s21
# def getComplexTrace(self,D=100000,savedata=False,filename='foo'):
# 'sets xfer format to float'
# 'D should be appropriately sized for the number of points in the trace'
# print 'getComplexTrace is deprecated. Consider using getTrace.'
# self.write('FORM4')
# self.write('OUTPDATA')
# res = self.rawread(D)
# split_array = res.split('\x00')
# raw_data_array = split_array[0].split('\n')
# print len(raw_data_array)
# raw_data_array.pop #clip inevitable garbage element
# 'list with form r,i \n'
# N = len(raw_data_array)
# result = np.zeros((N-1,1),'complex')
# for n in range(N-1):
# 'breaks into nx2 matrix with r,i'
# result[n]=float(raw_data_array[n].split(',')[0])+1j*float(raw_data_array[n].split(',')[1])
# if savedata:
# f = open(filename+'.pkl','w')
# pylab.plot(np.real(result))
# pickle.dump(result,f)
# return result
# def getTraceBinary(self):
# ''' grab the data in the trace. Should be twice as fast as getTrace. '''
#
# # FORM3 is 64-bit floating point.
# # 8 bytes per data point. There are two numbers per frequency bin (real and imag)
# # thus a full 1601 bin trace has 1601 x 2 x 8 byte = 3216 bytes
# # header is 4 bytes
# print 'UNFINISHED!'
# self.write('FORM3')
# self.write('OUTPDATA')
# Set functions --------------------------------------------------------------------
def setCWfreq(self,F):
'set the frequency for CW mode'
self.write(':SENS1:FREQ %.3fMHz'%(F))
def setCenter(self,center):
s = 'SENS:FREQ:CENT %f' % float(center)
self.write(s)
def setLinearFrequencySweep(self):
self.write(':SENS1:SWE:TYPE LIN')
def setPowerSwitch(self,P):
if P=='on' or P==1 or P=='ON':
power=' ON'
else:
power=' OFF'
self.write('OUTP'+power)
def setPower(self,P):
self.write(':SOUR1:POW %.2f'%(P))
def setIFbandwidth(self,ifbw):
''' set the intermediate frequency bandwidth (in Hz) '''
allowedvals = [10,30,100,300,1000,3000,3700,6000]
if ifbw in allowedvals:
self.write(':SENS1:BWID %d'%ifbw)
else:
print 'The IF bandwidth can only take the following discrete values:',allowedvals
def setIFBW(self,ifbw):
self.setIFbandwidth(ifbw)
def setupFrequencySweep(self,fi=100,ff=200,power=-45,mtype='S21',displaytype='MLOG',numpts=1601,averfact=1,ifbw=1000):
''' set instrument for a frequency sweep '''
self.setIFbandwidth(ifbw)
self.write(':SENS1:SWE:TIME:AUTO ON')
self.write('SENS1:OFFS OFF')
self.setLinearFrequencySweep()
#self.write('CHAN1;AUTO;AUXCOFF;')
self.write(':CALC1:PAR1:DEF '+mtype)
self.write(':CALC1:PAR1:SEL')
self.write(':CALC1:SEL:FORM '+displaytype)
self.write(':SENS1:FREQ:STAR %.3fE6'%(fi))
self.write(':SENS1:FREQ:STOP %.3fE6'%(ff))
self.setPower(power)
self.setPoints(numpts)
self.setAverages(averfact)
self.setPowerSwitch('ON')
# def setupTDtrace(self,fc,numpts=1601,ifbw=1000,power=-45):
# ''' set up instrument to take a time domain trace, monitoring a single tone '''
# self.setCWfreq(fc)
# self.setPoints(numpts)
# self.setIFbandwidth(ifbw)
# self.setPower(power)
# self.write('AUTO')
def setPoints(self,N):
'sets the number of points in a trace'
self.write(':SENS1:SWE:POIN %d'%(N))
if N not in self.allowedpts:
print 'WARNING: ', N, ' points not allowed. N must be in ', self.allowedpts
print 'Number of points set to: ', self.getPoints()
def setNumPoints(self,N):
self.setPoints(N)
def setSpan(self,span):
'sets the frequency span in Hz'
self.write(':SENS1:FREQ:SPAN %.3f'%(span))
def setAverages(self,averfact):
'sets the number of traces to average'
self.write(':SENS1:AVER:COUN %d'%(averfact))
def setMarkerOn(self,N):
'turns on numbered marker'
self.write(':CALC1:MARK%d ON'%(N))
def setMarkerMax(self):
'sets marker to max point on trace'
self.write(':CALC1:MARK1:FUNC:TYPE MAX')
self.write(':CALC1:MARK1:FUNC:EXEC')
def setMarkerMin(self):
'sets marker to min point on trace'
self.write(':CALC1:MARK1:FUNC:TYPE MIN')
self.write(':CALC1:MARK1:FUNC:EXEC')
def setMarkerCenter(self):
'sets the center frequency to the marker'
self.write(':CALC1:MARK1:SET CENT')
# higher level data acquisition functions --------------------------------------------------------------
def alySnapShot(self,fi=100,ff=200,power=-45,mtype='S21',displaytype='MLOG',numpts=1601,averfact=1,showplot=False,savedata=False,filename='foo'):
''' measure a frequency sweep in one frame of the network analyzer (1601 pts max)
input:
fi = start frequency (MHz)
ff = stop frequency (MHz)
power = (dBm) microwave power
mtype = 'S21' or 'S11'
displaytype = 'LOGM', 'PHAS', etc. What will be displayed on the instrument
numpts = number of points in scan (max = 1601)
output: numpts x 3 array
1st column: frequency (MHz)
2nd column: real part of response
3rd column: imaginary part of response
'''
self.setupFrequencySweep(fi,ff, power, mtype, displaytype, numpts, averfact)
#print 'taking snapshot'
d = self.getTrace()
N,M = np.shape(d)
f = self.getFrequencyArray()
dd = np.zeros((N,3))
dd[:,0]=f
dd[:,1]=d[:,0]
dd[:,2]=d[:,1]
if showplot:
print 'plotting the data'
pylab.plot(dd[:,0],self.s21mag(dd[:,1],dd[:,2]))
pylab.xlabel('Frequency (MHz)')
pylab.ylabel('Response (dB)')
if savedata:
self.savePickle(dd, filename)
return dd
def measureSurvey(self,fi=500,ff=1000,fres=1,power=-45,mtype='S21',displaytype='MLOG',averfact=10,showplot=False,savedata=False,filename='foo'):
''' take a frequency survey
input: fi, start frequency (MHz)
ff, stop frequency (MHz)
fres, frequency resolution (MHz), note this is an upper limit (not exact) because Npoints can only take discrete values
power, (dBm)
mtype, measurement type (S21 or S11, etc)
displaytype, what is shown | |
from subprocess import Popen, PIPE, signal
import os
import re
from modules.webrequests import GetCPUHardwareData, UpdateSupportedDevices
from modules.deviceCacher import GetCachedDeviceData, CacheDevice
from modules.deviceInfoFormat import PrintError
CPUHardwareData = {}
def SetCPUHardwareData():
global CPUHardwareData
CPUHardwareData = GetCPUHardwareData()
class Amazon:
"""
Since Amazon devices are not in the 'supported
Google Play Store devices' list (supported-devices.csv)
a seperate dictionary just for these devices is needed
Dictionary key is 'model_code' and value is 'market_name'
"""
devices = {
'KFOT': 'Kindle Fire (2012)',
'KFJWI': 'Kindle Fire HD 8.9" (2012)',
'KFTT': 'Kindle Fire HD 7" (2012)',
'KFTBWI': 'Kindle Fire HD 10 (2015)',
'KFTHWI': 'Kindle Fire HDX 7" (2013)',
'KFAPWI': 'Kindle Fire HDX 8.9" (2013)',
'KFFOWI': 'Fire (2015)',
'KFMEWI': 'Fire HD 8 (2015)',
'KFSAWA': 'Fire HDX 8.9 (2014)',
'SD4930UR': 'Fire Phone',
}
class DeviceData:
__serial = ""
# General
__manuf = ""
__model_code = ""
__market_name = ""
__os_ver = ""
__fingerprint = ""
__battery_level = ""
__battery_temp = ""
# GPU info
__gpu_renderer = ""
__gpu_manufacturer = ""
__gpu_gles = ""
# CPU info
__cpu_abi = ""
__cpu_soc = ""
__cpu_hardware = ""
def __ExecuteCommand(self, command):
proc = Popen(command, stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = proc.communicate(input=None, timeout=None)
if "Permission denied" in err.decode("utf-8"):
raise Exception("Permission denied", err.decode("utf-8"))
elif "Failure" in err.decode("utf-8"): # in case error is printed in err (most of the time it is printed in output too)
return err.decode("utf-8").rstrip()
try:
return output.decode("utf-8").rstrip()
except UnicodeDecodeError:
try:
return output.decode("windows-1258").rstrip()
except UnicodeDecodeError:
return output.decode("ibm866").rstrip()
def __ExecuteCommandInterruptible(self, command):
proc = Popen(command, stdin=PIPE, stdout=PIPE, stderr=PIPE)
try:
proc.communicate(input=None, timeout=None)
except KeyboardInterrupt:
proc.terminate()
def __SetGPU(self):
data = self.__ExecuteCommand(["adb", "-s", self.__serial, "shell", "dumpsys", "SurfaceFlinger"])
matchObj = re.search(r"(GLES: )(.+ [\w+-]+ .\..)", data, flags=0)
# Expected output: "GLES: Qualcomm, Adreno (TM) 540, OpenGL ES 3.2"
# "GLES: " is in the first group, everything else belongs to second
elements = matchObj.group(2).split(", ")
self.__gpu_manufacturer = elements[0]
self.__gpu_renderer = elements[1]
self.__gpu_gles = elements[2]
def __SetCPUHardware(self):
getprop = self.__ExecuteCommand(["adb", "-s", self.__serial, "shell", "getprop", "ro.product.board"])
procCpuInfo = self.__ExecuteCommand(["adb", "-s", self.__serial, "shell", "cat", "/proc/cpuinfo"])
matchObj = re.search(r"Hardware.+ ([\w\-]+)", procCpuInfo, flags=0)
hardwareLine = ""
if matchObj:
hardwareLine = matchObj.group(1)
# hardwareLine = cpuinfo
# getprop = board
realBoard = ""
if getprop in hardwareLine:
realBoard = hardwareLine
else:
realBoard = getprop
# if CPU Data is not set yet - download it
if len(CPUHardwareData) == 0:
SetCPUHardwareData()
if realBoard.lower() in CPUHardwareData:
self.__cpu_soc = CPUHardwareData[realBoard.lower()]["SoC"]
self.__cpu_hardware = CPUHardwareData[realBoard.lower()]["CPU"]
else:
self.__cpu_soc = "NOT FOUND"
self.__cpu_hardware = "NOT FOUND"
def __SetCPU(self):
self.__SetCPUHardware()
self.__cpu_abi = self.__ExecuteCommand(["adb", "-s", self.__serial, "shell", "getprop", "ro.product.cpu.abi"])
def __SetDeviceNames(self):
self.__manuf = self.__ExecuteCommand(["adb", "-s", self.__serial, "shell", "getprop", "ro.product.manufacturer"]).title()
self.__model_code = self.__ExecuteCommand(["adb", "-s", self.__serial, "shell", "getprop", "ro.product.model"])
if self.__manuf.lower() == 'amazon':
try:
self.__market_name = Amazon.devices[self.__model_code]
return # Early return for amazon devices since they are not in csv
except KeyError:
self.__market_name = "UNKNOWN"
return
tProductName = self.__ExecuteCommand(["adb", "-s", self.__serial, "shell", "getprop", "ro.product.name"])
tProductDevice = self.__ExecuteCommand(["adb", "-s", self.__serial, "shell", "getprop", "ro.product.device"])
self.__SetMarketName(tProductName, tProductDevice)
if(self.__market_name == "-"):
# If failed to set the name, update CSV file and check if it exists by then
print("Certain device name was not found, updating supported devices list")
UpdateSupportedDevices(None)
self.__SetMarketName(tProductDevice, tProductDevice)
def __SetMarketName(self, tProductName, tProductDevice):
supportedDevicesPth = "../res/supported_devices.csv"
scriptPath = os.path.dirname(__file__)
targetPath = os.path.join(scriptPath, supportedDevicesPth)
checkDeviceRange = False
try:
f = open(targetPath, mode='r', buffering=-1, encoding="UTF-16")
except FileNotFoundError:
from modules.webrequests import UpdateSupportedDevices
UpdateSupportedDevices("")
f = open(targetPath, mode='r', buffering=-1, encoding="UTF-16")
for line in f:
# Cache the line split since we use it a few times
splitLine = line.split(",")
tempName = '-'
if self.__manuf.lower() in splitLine[0].lower():
checkDeviceRange = True
if self.__model_code.lower() == splitLine[3].strip().lower():
if(splitLine[1]):
self.__market_name = splitLine[1]
break
if self.__model_code.lower() in splitLine[3].lower():
tempName = splitLine[1]
if tProductName.lower() in line.lower():
tempName = splitLine[1]
if tProductDevice.lower() == splitLine[2].lower():
tempName = splitLine[1]
if checkDeviceRange and not self.__manuf.lower() in splitLine[0].lower():
self.__market_name = tempName
break
f.close()
def __SetDataFromDictionary(self, dictData):
self.__serial = dictData["serial"]
self.__manuf = dictData["manufa"]
self.__model_code = dictData["model_code"]
self.__market_name = dictData["market_name"]
self.__os_ver = dictData["os"]
self.__fingerprint = dictData["fingerprint"]
self.__gpu_renderer = dictData["gpu_renderer"]
self.__gpu_manufacturer = dictData["gpu_manufa"]
self.__gpu_gles = dictData["gpu_gles"]
self.__cpu_abi = dictData["cpu_abi"]
self.__cpu_soc = dictData["cpu_soc"]
self.__cpu_hardware = dictData["cpu_hardware"]
def __CheckDynamicFields(self, isIndepthInfo):
"""
Checks if dynamic fields are still up to date. Dynamic fields are:
* Android OS version
* Fingerprint
"""
os_ver = self.__ExecuteCommand(["adb", "-s", self.__serial, "shell", "getprop", "ro.build.version.release"])
fingerprint = self.__ExecuteCommand(["adb", "-s", self.__serial, "shell", "getprop", "ro.build.fingerprint"])
dataWasChanged = False
if self.__os_ver != os_ver:
self.__os_ver = os_ver
dataWasChanged = True
if self.__fingerprint != fingerprint:
self.__fingerprint = fingerprint
dataWasChanged = True
if dataWasChanged:
CacheDevice(self.GetFullDeviceData(), isIndepthInfo)
def __SetBatteryData(self):
batterydump = self.__ExecuteCommand(["adb", "-s", self.__serial, "shell", "dumpsys", "battery"])
matchObj = re.search(r"level: (\d+)", batterydump, flags=0)
self.__battery_level = matchObj.group(1)
matchObj = re.search(r"temperature: (\d+)", batterydump, flags=0)
# whole codebase works with strings, so after multiplying, I am converting value back to string
batteryTemp = round(float(matchObj.group(1)) * 0.1, 1)
self.__battery_temp = str(batteryTemp)
def __LoadTestData(self, testingData):
self.__manuf = testingData["manufacturer"]
self.__model_code = testingData["model"]
self.__SetMarketName(testingData["name"], testingData["device"])
def __init__(self, deviceID, isIndepthInfo, isForTests=False, testDictData={}):
"""
Constructor requires deviceID and a boolean if it should go for indepth information
If isIndepthInfo is true - adbepy will run several shell commands and gather more information about the device itself
"""
if isForTests:
self.__LoadTestData(testDictData)
return
cachedDeviceData = GetCachedDeviceData(deviceID)
if cachedDeviceData is not None:
if cachedDeviceData["isFullData"] == True or isIndepthInfo == cachedDeviceData["isFullData"]:
self.__SetDataFromDictionary(cachedDeviceData["deviceData"])
self.__SetBatteryData()
self.__CheckDynamicFields(isIndepthInfo)
return
# If device is not cached or cached version != isIndepthInfo
self.__serial = deviceID
self.__SetDeviceNames()
self.__os_ver = self.__ExecuteCommand(["adb", "-s", self.__serial, "shell", "getprop", "ro.build.version.release"])
if isIndepthInfo == True:
self.__SetBatteryData()
self.__SetGPU()
self.__SetCPU()
self.__fingerprint = self.__ExecuteCommand(["adb", "-s", self.__serial, "shell", "getprop", "ro.build.fingerprint"])
CacheDevice(self.GetFullDeviceData(), isIndepthInfo)
# public methods
def GetFullDeviceData(self):
"""
Returns dictionary with device's values. Dictionary keys:\n
serial - serial device number (ID, same as adb devices prints)
manufa - manufacturer (ex. Samsung)
model_code - ex. GT-I880
market_name - readable name (ex. Galaxy S8)
os - OS version (prints only number: 6.0.1)
fingerprint - device fingerprint
gpu_renderer - GPU Renderer, ex. Adreno 420
gpu_manufa - GPU Manufacturer, ex. Qualcomm
gpu_gles - GPU Gles version, ex. GLES 3.0
cpu_abi - CPU ABI version ex. ARMv7-A
cpu_soc - SOC family, ex. Snapdragon 880 MSM8996
cpu_hardware - Detailed hardware info (4x FancyKortex A53 @ 5GHz HP and same for LP)
battery_level- Battery level in percentage
battery_temp - Battery temperature
"""
return {
"serial": self.__serial,
"manufa": self.__manuf,
"model_code": self.__model_code,
"market_name": self.__market_name,
"os": self.__os_ver,
"fingerprint": self.__fingerprint,
"gpu_renderer": self.__gpu_renderer,
"gpu_manufa": self.__gpu_manufacturer,
"gpu_gles": self.__gpu_gles,
"cpu_abi": self.__cpu_abi,
"cpu_soc": self.__cpu_soc,
"cpu_hardware": self.__cpu_hardware,
"battery_level": self.__battery_level,
"battery_temp": self.__battery_temp
}
def InstallApk(self, apkPath):
"""
Installs an apk from apkPath on a device.
If installation fails, message with an error is printed and function returns "False"
otherwise, if installation succeeds - return "True"
"""
isInstalled = True
res = self.__ExecuteCommand(["adb", "-s", self.__serial, "install", "-r", apkPath])
matchObj = re.search(r"Failure ([*[A-Z_0-9]+])", res, flags=0)
if matchObj is not None:
# If an error occured - print it to the user
message = "Failed installing app on %s. Reason: %s" % (self.GetPrintableDeviceName(), matchObj.group(1))
if "NO_MATCHING_ABIS" in matchObj.group(1):
message += "\n\t* Make sure you are not building x86 for ARM architecture or vice versa."
PrintError(message)
isInstalled = False
return isInstalled
def LaunchApk(self, bundleID, mainActivity):
self.__ExecuteCommand(["adb", "-s", self.__serial, "shell", "am", "start", "-n", bundleID + "/" + mainActivity])
def GetThirdPartyApps(self):
apps = self.__ExecuteCommand(["adb", "-s", self.__serial, "shell", "pm", "list", "packages", "-3"]).split("package:")
del apps[0]
output = []
for app in apps:
output.append(app.rstrip())
return output
def TurnOff(self):
cachedName = self.GetPrintableDeviceName()
output = self.__ExecuteCommand(["adb", "-s", self.__serial, "shell", "reboot", "-p"])
if "reboot" in output:
return "Failed to turn off " + cachedName
else:
return cachedName + " turned off"
def RemoveApp(self, appBundleID):
"""
Uninstall specific application
"""
self.__ExecuteCommand(["adb", "-s", self.__serial, "uninstall", appBundleID])
def GetDeviceID(self):
return self.__serial
def GetDeviceSerial(self):
return self.GetDeviceID()
def GetDeviceName(self):
return [self.__manuf, self.__market_name, self.__model_code]
def GetPrintableDeviceName(self):
return self.__manuf + " " + self.__market_name + " (" + self.__model_code + ")"
def __PathExists(self, saveLocation):
if not os.path.exists(saveLocation):
os.makedirs(saveLocation)
print("Created folder " + os.path.abspath(saveLocation))
def TakeScreenshot(self, screenshotName, saveLocation):
self.__PathExists(saveLocation)
screenshotName = screenshotName + "_" + self.__serial + ".png"
self.__ExecuteCommand(["adb", "-s", self.__serial, "shell", "screencap", "-p", "/sdcard/screen.png"])
self.__ExecuteCommand(["adb", | |
import copy
import ipaddress
import json
import logging
from tests.common.errors import RunAnsibleModuleFail
from tests.common.devices.sonic import SonicHost
from tests.common.devices.sonic_asic import SonicAsic
from tests.common.helpers.assertions import pytest_assert
from tests.common.helpers.constants import DEFAULT_ASIC_ID, DEFAULT_NAMESPACE
logger = logging.getLogger(__name__)
class MultiAsicSonicHost(object):
""" This class represents a Multi-asic SonicHost It has two attributes:
sonic_host: a SonicHost instance. This object is for interacting with the SONiC host through pytest_ansible.
asics: a list of SonicAsic instances.
The 'duthost' fixture will return an instance of a MultiAsicSonicHost.
So, even a single asic pizza box is represented as a MultiAsicSonicHost with 1 SonicAsic.
"""
_DEFAULT_SERVICES = ["pmon", "snmp", "lldp", "database"]
def __init__(self, ansible_adhoc, hostname):
""" Initializing a MultiAsicSonicHost.
Args:
ansible_adhoc : The pytest-ansible fixture
hostname: Name of the host in the ansible inventory
"""
self.sonichost = SonicHost(ansible_adhoc, hostname)
self.asics = [SonicAsic(self.sonichost, asic_index) for asic_index in range(self.sonichost.facts["num_asic"])]
# Get the frontend and backend asics in a multiAsic device.
self.frontend_asics = []
self.backend_asics = []
if self.sonichost.is_multi_asic:
for asic in self.asics:
if asic.is_it_frontend():
self.frontend_asics.append(asic)
elif asic.is_it_backend():
self.backend_asics.append(asic)
self.critical_services_tracking_list()
def __repr__(self):
return '<MultiAsicSonicHost> {}'.format(self.hostname)
def critical_services_tracking_list(self):
"""Get the list of services running on the DUT
The services on the sonic devices are:
- services running on the host
- services which are replicated per asic
Returns:
[list]: list of the services running the device
"""
service_list = []
service_list+= self._DEFAULT_SERVICES
for asic in self.asics:
service_list += asic.get_critical_services()
self.sonichost.reset_critical_services_tracking_list(service_list)
def get_default_critical_services_list(self):
return self._DEFAULT_SERVICES
def _run_on_asics(self, *module_args, **complex_args):
""" Run an asible module on asics based on 'asic_index' keyword in complex_args
Args:
module_args: other ansible module args passed from the caller
complex_args: other ansible keyword args
Raises:
ValueError: if asic_index is specified and it is neither an int or string 'all'.
ValueError: if asic_index is specified and is an int, but greater than number of asics in the SonicHost
Returns:
if asic_index is not specified, then we return the output of the ansible module on global namespace (using SonicHost)
else
if asic_index is an int, the output of the ansible module on that asic namespace
- for single asic SonicHost this would still be the same as the ansible module on the global namespace
else if asic_index is string 'all', then a list of ansible module output for all the asics on the SonicHost
- for single asic, this would be a list of size 1.
"""
if "asic_index" not in complex_args:
# Default ASIC/namespace
return getattr(self.sonichost, self.multi_asic_attr)(*module_args, **complex_args)
else:
asic_complex_args = copy.deepcopy(complex_args)
asic_index = asic_complex_args.pop("asic_index")
if type(asic_index) == int:
# Specific ASIC/namespace
if self.sonichost.facts['num_asic'] == 1:
if asic_index != 0:
raise ValueError("Trying to run module '{}' against asic_index '{}' on a single asic dut '{}'".format(self.multi_asic_attr, asic_index, self.sonichost.hostname))
return getattr(self.asics[asic_index], self.multi_asic_attr)(*module_args, **asic_complex_args)
elif type(asic_index) == str and asic_index.lower() == "all":
# All ASICs/namespace
return [getattr(asic, self.multi_asic_attr)(*module_args, **asic_complex_args) for asic in self.asics]
else:
raise ValueError("Argument 'asic_index' must be an int or string 'all'.")
def get_frontend_asic_ids(self):
if self.sonichost.facts['num_asic'] == 1:
return [DEFAULT_ASIC_ID]
return [asic.asic_index for asic in self.frontend_asics]
def get_frontend_asic_namespace_list(self):
if self.sonichost.facts['num_asic'] == 1:
return [DEFAULT_NAMESPACE]
return [asic.namespace for asic in self.frontend_asics]
def get_sonic_host_and_frontend_asic_instance(self):
if self.sonichost.facts['num_asic'] == 1:
return [self.sonichost]
return [self.sonichost] + [asic for asic in self.frontend_asics]
def get_backend_asic_ids(self):
if self.sonichost.facts['num_asic'] == 1:
return [DEFAULT_ASIC_ID]
return [asic.asic_index for asic in self.backend_asics]
def get_backend_asic_namespace_list(self):
if self.sonichost.facts['num_asic'] == 1:
return [DEFAULT_NAMESPACE]
return [asic.namespace for asic in self.backend_asics]
def asic_instance(self, asic_index):
if asic_index is None:
return self.asics[0]
return self.asics[asic_index]
def asic_instance_from_namespace(self, namespace=DEFAULT_NAMESPACE):
if not namespace:
return self.asics[0]
for asic in self.asics:
if asic.namespace == namespace:
return asic
return None
def get_asic_ids(self):
if self.sonichost.facts['num_asic'] == 1:
return [DEFAULT_ASIC_ID]
return [asic.asic_index for asic in self.asics]
def get_asic_namespace_list(self):
if self.sonichost.facts['num_asic'] == 1:
return [DEFAULT_NAMESPACE]
return [asic.namespace for asic in self.asics]
def get_asic_id_from_namespace(self, namespace):
if self.sonichost.facts['num_asic'] == 1 or namespace == DEFAULT_NAMESPACE:
return DEFAULT_ASIC_ID
for asic in self.asics:
if namespace == asic.namespace:
return asic.asic_index
# Raise an error if we reach here
raise ValueError("Invalid namespace '{}' passed as input".format(namespace))
def get_namespace_from_asic_id(self, asic_id):
if self.sonichost.facts['num_asic'] == 1 or asic_id == DEFAULT_ASIC_ID:
return DEFAULT_NAMESPACE
for asic in self.asics:
if asic_id == asic.asic_index:
return asic.namespace
# Raise an error if we reach here
raise ValueError("Invalid asic_id '{}' passed as input".format(asic_id))
def get_vtysh_cmd_for_namespace(self, cmd, namespace):
asic_id = self.get_asic_id_from_namespace(namespace)
if asic_id == DEFAULT_ASIC_ID:
return cmd
ns_cmd = cmd.replace('vtysh', 'vtysh -n {}'.format(asic_id))
return ns_cmd
def get_linux_ip_cmd_for_namespace(self, cmd, namespace):
if not namespace:
return cmd
ns_cmd = cmd.replace('ip', 'ip -n {}'.format(namespace))
return ns_cmd
def get_route(self, prefix, namespace=DEFAULT_NAMESPACE):
asic_id = self.get_asic_id_from_namespace(namespace)
if asic_id == DEFAULT_ASIC_ID:
ns_prefix = ''
else:
ns_prefix = '-n ' + str(asic_id)
cmd = 'show bgp ipv4' if ipaddress.ip_network(unicode(prefix)).version == 4 else 'show bgp ipv6'
return json.loads(self.shell('vtysh {} -c "{} {} json"'.format(ns_prefix, cmd, prefix))['stdout'])
def __getattr__(self, attr):
""" To support calling an ansible module on a MultiAsicSonicHost.
Args:
attr: attribute to get
Returns:
if attr doesn't start with '_' and is a method of SonicAsic, attr will be ansible module that has dependency on ASIC,
return the output of the ansible module on asics requested - using _run_on_asics method.
else
return the attribute from SonicHost.
"""
sonic_asic_attr = getattr(SonicAsic, attr, None)
if not attr.startswith("_") and sonic_asic_attr and callable(sonic_asic_attr):
self.multi_asic_attr = attr
return self._run_on_asics
else:
return getattr(self.sonichost, attr) # For backward compatibility
def get_asic_or_sonic_host(self, asic_id):
if asic_id == DEFAULT_ASIC_ID:
return self.sonichost
return self.asics[asic_id]
def get_asic_or_sonic_host_from_namespace(self, namespace=DEFAULT_NAMESPACE):
if not namespace:
return self.sonichost
for asic in self.asics:
if asic.namespace == namespace:
return asic
return None
def start_service(self, service):
if service in self._DEFAULT_SERVICES:
return self.sonichost.start_service(service, service)
for asic in self.asics:
asic.start_service(service)
def stop_service(self, service):
if service in self._DEFAULT_SERVICES:
return self.sonichost.stop_service(service, service)
for asic in self.asics:
asic.stop_service(service)
def restart_service(self, service):
if service in self._DEFAULT_SERVICES:
return self.sonichost.restart_service(service, service)
for asic in self.asics:
asic.restart_service(service)
def delete_container(self, service):
if service in self._DEFAULT_SERVICES:
return self.sonichost.delete_container(service)
for asic in self.asics:
asic.delete_container(service)
def is_container_running(self, service):
if service in self._DEFAULT_SERVICES:
return self.sonichost.is_container_running(service)
for asic in self.asics:
if asic.is_container_running(service):
return True
return False
def is_bgp_state_idle(self):
return self.sonichost.is_bgp_state_idle()
def is_service_running(self, service_name, docker_name=None):
docker_name = service_name if docker_name is None else docker_name
if docker_name in self._DEFAULT_SERVICES:
return self.sonichost.is_service_running(service_name, docker_name)
for asic in self.asics:
if not asic.is_service_running(service_name, docker_name):
return False
return True
def get_asic_index_for_portchannel(self, portchannel):
for asic in self.asics:
if asic.portchannel_on_asic(portchannel):
return asic.asic_index
return None
def get_port_asic_instance(self, port):
"""
Returns the ASIC instance to which the port belongs
Args:
port: Port ID
Returns:
returns the ASIC instance if found, else None
"""
for asic in self.asics:
if asic.port_exists(port):
return asic
pytest_assert(
False,
"ASIC instance not found for port {}".format(port)
)
def get_queue_oid_asic_instance(self, queue_oid):
"""
Returns the ASIC instance which has the queue OID saved.
Queue OIDs are saved only when requested for a given port and queue.
Args:
queue_oid: Queue OID
Returns:
returns the ASIC instance if found, else None
"""
asic = None
for asic in self.asics:
if queue_oid in asic.queue_oid:
return asic
pytest_assert(
False,
"ASIC instance not found for queue OID {}".format(queue_oid)
)
def get_queue_oid(self, port, queue_num):
"""
Get the queue OID of given port and queue number. The queue OID is
saved for the purpose of returning the ASIC instance of the
queue OID
Args:
port: Port ID
queue_num: Queue
Returns:
Queue OID
"""
asic = self.get_port_asic_instance(port)
return asic.get_queue_oid(port, queue_num)
def has_config_subcommand(self, command):
"""
Check if a config/show subcommand exists on the device
It is up to the caller of the function to ensure that `command`
does not have any unintended side effects when run
Args:
command (str): the command to be checked, which should begin with 'config' or 'show'
Returns:
(bool) True if the command exists, false otherwise
"""
try:
self.shell(command)
# If the command executes successfully, we can assume it exists
return True
except RunAnsibleModuleFail as e:
# If 'No such command' is found in stderr, the command doesn't exist
return 'No such command' not in e.results['stderr']
def disable_syslog_rate_limit(self, feature):
"""
Disable Rate limit for a given service
"""
services = [feature]
if (feature in self.sonichost.DEFAULT_ASIC_SERVICES):
services = [asic.get_docker_name(feature) for asic in self.asics]
for docker in services:
cmd_disable_rate_limit = (
r"docker exec | |
#!/usr/bin/env python
"""
Parser for FCS 2.0, 3.0, 3.1 files. Python 2/3 compatible.
`
Distributed under the MIT License.
Useful documentation for dtypes in numpy
http://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.byteswap.html?highlight=byteswap#numpy.ndarray.byteswap # noqa
http://docs.scipy.org/doc/numpy/user/basics.types.html
http://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html
"""
from __future__ import division
import contextlib
import logging
from io import BytesIO
import string
import sys
import warnings
import re
import numpy
import pandas as pd
import six
logger = logging.getLogger(__name__)
def fromfile(file, dtype, count, *args, **kwargs):
"""Wrapper around np.fromfile to support any file-like object."""
dtypes = dtype.split(',')
field_width = []
for dt in dtypes:
num_bytes = int(dt[2:])
field_width.append(num_bytes)
try:
ret = numpy.fromfile(file,
dtype=",".join(['u1'] * sum(field_width)),
count=count,
*args,
**kwargs)
except (TypeError, IOError):
ret = numpy.frombuffer(file.read(count * sum(field_width)),
dtype=",".join(['u1'] * sum(field_width)),
count=count,
*args,
**kwargs)
ret = ret.view('u1').reshape((count, sum(field_width)))
ret_dtypes = []
for field, dt in enumerate(dtypes):
dtype_type = dt[1]
dtype_endian = dt[0]
num_bytes = int(dt[2:])
while num_bytes & (num_bytes - 1) != 0:
ret = np.insert(ret, sum(field_width[0:field]), np.zeros(count), axis = 1)
num_bytes = num_bytes + 1
ret_dtypes.append(dtype_endian + dtype_type + str(num_bytes))
return ret.view(','.join(ret_dtypes)).ravel()
class ParserFeatureNotImplementedError(Exception):
"""Raise when encountering fcs files for which parsing hasn't been implemented."""
class FCSParser(object):
def __init__(self, path=None, read_data=True, channel_naming='$PnS', data_set=0, encoding='utf-8'):
"""Parse FCS files.
Compatible with most FCS 2.0, 3.0, 3.1 files.
self.annotation: a dictionary holding the parsed content of the TEXT segment
In addition, a key called __header__ has been added to this dictionary
It specifies the information parsed from the FCS file HEADER segment.
(This won't be necessary for most users.)
self.data holds the parsed DATA segment
self.analysis holds the ANALYSIS segment as read from the file.
After the data segment is read:
self.channel_names holds the chosen names of the channels
self.channel_names_alternate holds the alternate names of the channels
Args:
path : str
Path of .fcs file
read_data : bool
If True, reads the data immediately.
Otherwise, use read_data method to read in the data from the fcs file.
channel_naming: '$PnS' | '$PnN'
Determines which meta data field is used for naming the channels.
The default should be $PnS (even though it is not guaranteed to be unique)
$PnN stands for the short name (guaranteed to be unique).
Will look like 'FL1-H'
$PnS stands for the actual name (not guaranteed to be unique).
Will look like 'FSC-H' (Forward scatter)
The chosen field will be used to population self.channels.
If the chosen field does not exist in the fcs file.
The program attempts to use the alternative field by default.
Note: These names are not flipped in the implementation.
It looks like they were swapped for some reason in the official FCS specification.
data_set: int
Index of retrieved data set in the fcs file.
This value specifies the data set being retrieved from an fcs file with
multiple data sets.
encoding: str
Specify encoding of the text section of the fcs data
"""
self._encoding = encoding
self._data = None
self._channel_naming = channel_naming
self.channel_names_s = []
self.channel_names_n = []
# Attributes parsed from fcs file
self._data_start = -1
self._data_end = -1
self.channel_numbers = []
self._analysis = None
self._file_size = 0
if channel_naming not in ('$PnN', '$PnS'):
raise ValueError(u'channel_naming must be either "$PnN" or "$PnS"')
self.annotation = {}
self.path = path
if path:
with open(path, 'rb') as f:
self.load_file(f, data_set=data_set, read_data=read_data)
def load_file(self, file_handle, data_set=0, read_data=True):
"""Load the requested parts of the file into memory."""
file_handle.seek(0, 2)
self._file_size = file_handle.tell()
file_handle.seek(0)
data_segments = 0
# seek the correct data set in fcs
nextdata_offset = 0
while data_segments <= data_set:
self.read_header(file_handle, nextdata_offset)
self.read_text(file_handle)
if '$NEXTDATA' in self.annotation:
data_segments += 1
nextdata_offset = self.annotation['$NEXTDATA']
file_handle.seek(nextdata_offset)
if nextdata_offset == 0 and data_segments < data_set:
warnings.warn("File does not contain the number of data sets.")
break
else:
if data_segments != 0:
warnings.warn('File does not contain $NEXTDATA information.')
break
if read_data:
self.read_data(file_handle)
@classmethod
def from_data(cls, data):
"""Load an FCS file from a bytes-like object.
Args:
data: buffer containing contents of an FCS file.
Returns:
FCSParser instance with data loaded
"""
obj = cls()
with contextlib.closing(BytesIO(data)) as file_handle:
obj.load_file(file_handle)
return obj
def read_header(self, file_handle, nextdata_offset=0):
"""Read the header of the FCS file.
The header specifies where the annotation, data and analysis are located inside the binary
file.
Args:
file_handle: buffer containing FCS file.
nextdata_offset: byte offset of a set header from file start specified by $NEXTDATA
"""
header = {'FCS format': file_handle.read(6)}
file_handle.read(4) # 4 space characters after the FCS format
for field in ('text start', 'text end', 'data start', 'data end', 'analysis start',
'analysis end'):
s = file_handle.read(8)
try:
field_value = int(s)
except ValueError:
field_value = 0
header[field] = field_value + nextdata_offset
# In some .fcs files, 'text end' and 'data start' are equal, e.g.,
# http://flowrepository.org/experiments/2241/download_ziped_files
# and this would lead to a mistake when run @_extract_text_dict
# We should avoid this situation.
if header['text end'] == header['data start']:
header['text end'] = header['text end'] - 1
# Checking that the location of the TEXT segment is specified
for k in ('text start', 'text end'):
if header[k] == 0:
raise ValueError(u'The FCS file "{}" seems corrupted. (Parser cannot locate '
u'information about the "{}" segment.)'.format(self.path, k))
elif header[k] > self._file_size:
raise ValueError(u'The FCS file "{}" is corrupted. "{}" segment '
u'is larger than file size'.format(self.path, k))
else:
# All OK
pass
self._data_start = header['data start']
self._data_end = header['data start']
if header['analysis end'] - header['analysis start'] != 0:
warnings.warn(u'There appears to be some information in the ANALYSIS segment of file '
u'{0}. However, it might not be read correctly.'.format(self.path))
self.annotation['__header__'] = header
def _extract_text_dict(self, raw_text):
"""Parse the TEXT segment of the FCS file into a python dictionary."""
delimiter = raw_text[0]
if raw_text[-1] != delimiter:
raw_text = raw_text.strip()
if raw_text[-1] != delimiter:
msg = (u'The first two characters were:\n {}. The last two characters were: {}\n'
u'Parser expects the same delimiter character in beginning '
u'and end of TEXT segment. This file may be parsed incorrectly!'.format(raw_text[:2], raw_text[-2:]))
warnings.warn(msg)
raw_text = raw_text[1:]
else:
raw_text = raw_text[1:-1]
else:
raw_text = raw_text[1:-1]
# 1:-1 above removes the first and last characters which are reserved for the delimiter.
# The delimiter is escaped by being repeated (two consecutive delimiters). This code splits
# on the escaped delimiter first, so there is no need for extra logic to distinguish
# actual delimiters from escaped delimiters.
nested_split_list = [x.split(delimiter) for x in raw_text.split(delimiter * 2)]
# Flatten the nested list to a list of elements (alternating keys and values)
raw_text_elements = nested_split_list[0]
for partial_element_list in nested_split_list[1:]:
# Rejoin two parts of an element that was split by an escaped delimiter (the end and
# start of two successive sub-lists in nested_split_list)
raw_text_elements[-1] += (delimiter + partial_element_list[0])
raw_text_elements.extend(partial_element_list[1:])
keys, values = raw_text_elements[0::2], raw_text_elements[1::2]
return dict(zip(keys, values))
def read_text(self, file_handle):
"""Parse the TEXT segment of the FCS file.
The TEXT segment contains meta data associated with the FCS file.
Converting all meta keywords to lower case.
"""
header = self.annotation['__header__'] # For convenience
#####
# Read in the TEXT segment of the FCS file
# There are some differences in how the
file_handle.seek(header['text start'], 0)
raw_text = file_handle.read(header['text end'] - header['text start'] + 1)
try:
raw_text = raw_text.decode(self._encoding)
except UnicodeDecodeError as e:
# Catching the exception and logging it in this way kills the traceback, but
# we can worry about this later.
logger.warning(u'Encountered an illegal utf-8 byte in the header.\n Illegal utf-8 '
u'characters will be ignored.\n{}'.format(e))
raw_text = raw_text.decode(self._encoding, errors='ignore')
text = self._extract_text_dict(raw_text)
##
# Extract channel names and convert some of the channel properties
# and other fields into numeric data types (from string)
# Note: do not use regular expressions for manipulations here.
# Regular expressions are too heavy in terms of computation time.
pars = int(text['$PAR'])
if '$P0B' in text.keys(): # Checking whether channel number count starts from 0 or from 1
self.channel_numbers = range(0, pars) # Channel number count starts from 0
else:
self.channel_numbers = range(1, pars + 1) # Channel numbers start from 1
# Extract parameter names
| |
'geometries')
slice_ = self.get(ix, src)
if view is None:
slice_lens = np.array([len(item) for item in slice_])
axis = np.argmin(slice_lens)
else:
mapping = {0: 0, 1: 1, 2: 2,
'i': 0, 'x': 1, 'h': 2,
'iline': 0, 'xline': 1, 'height': 2, 'depth': 2}
axis = mapping[view]
if axis == 0:
crop = self.__load_h5py_i(geom, *slice_)
elif axis == 1 and 'cube_x' in geom.h5py_file:
crop = self.__load_h5py_x(geom, *slice_)
elif axis == 2 and 'cube_h' in geom.h5py_file:
crop = self.__load_h5py_h(geom, *slice_)
else: # backward compatibility
crop = self.__load_h5py_i(geom, *slice_)
pos = self.get_pos(None, dst, ix)
getattr(self, dst)[pos] = crop
return self
def __load_h5py_i(self, geom, ilines, xlines, heights):
h5py_cube = geom.h5py_file['cube']
dtype = h5py_cube.dtype
crop = np.zeros((len(ilines), len(xlines), len(heights)), dtype=dtype)
for i, iline in enumerate(ilines):
slide = self.__load_slide(h5py_cube, iline)
crop[i, :, :] = slide[xlines, :][:, heights]
return crop
def __load_h5py_x(self, geom, ilines, xlines, heights):
h5py_cube = geom.h5py_file['cube_x']
dtype = h5py_cube.dtype
crop = np.zeros((len(ilines), len(xlines), len(heights)), dtype=dtype)
for i, xline in enumerate(xlines):
slide = self.__load_slide(h5py_cube, xline)
crop[:, i, :] = slide[heights, :][:, ilines].transpose([1, 0])
return crop
def __load_h5py_h(self, geom, ilines, xlines, heights):
h5py_cube = geom.h5py_file['cube_h']
dtype = h5py_cube.dtype
crop = np.zeros((len(ilines), len(xlines), len(heights)), dtype=dtype)
for i, height in enumerate(heights):
slide = self.__load_slide(h5py_cube, height)
crop[:, :, i] = slide[ilines, :][:, xlines]
return crop
def __load_slide(self, cube, index):
""" (Potentially) cached function for slide loading.
Notes
-----
One must use thread-safe cache implementation.
"""
return cube[index, :, :]
@action
@inbatch_parallel(init='_init_component', target='threads')
def create_masks(self, ix, dst, src='slices', mode='horizon', width=3, src_labels='labels', n_horizons=-1):
""" Create masks from labels-dictionary in given positions.
Parameters
----------
src : str
Component of batch with positions of crops to load.
dst : str
Component of batch to put loaded masks in.
mode : str
Either `horizon` or `stratum`.
Type of created mask. If `horizon` then only horizons, i.e. borders
between geological strata will be loaded. In this case binary is created.
If `stratum` then every stratum between horizons in the point-cloud
dictionary will be labeled with different class. Classes are in range from
1 to number_of_horizons + 1.
width : int
Width of horizons in the `horizon` mode.
src_labels : str
Component of batch with labels dict.
n_horizons : int or array-like of ints
Maximum number of horizons per crop.
If -1, all possible horizons will be added.
If array-like then elements are interpreted as indices of the desired horizons
and must be ints in range [0, len(horizons) - 1].
Note if you want to pass an index of a single horizon it must a list with one
element.
Returns
-------
SeismicCropBatch
Batch with loaded masks in desired components.
Notes
-----
Can be run only after labels-dict is loaded into labels-component.
"""
#pylint: disable=protected-access
geom = self.get(ix, 'geometries')
il_xl_h = self.get(ix, src_labels)
slice_ = self.get(ix, src)
ilines_, xlines_, hs_ = slice_[0], slice_[1], slice_[2]
if not hasattr(il_xl_h._dict_type.value_type, '__len__'):
if isinstance(n_horizons, int):
horizons_idx = [-1]
else:
horizons_idx = n_horizons
n_horizons = -1
mask = create_mask(ilines_, xlines_, hs_, il_xl_h,
geom.ilines_offset, geom.xlines_offset, geom.depth,
mode, width, horizons_idx, n_horizons)
else:
mask = create_mask_f(ilines_, xlines_, hs_, il_xl_h,
geom.ilines_offset, geom.xlines_offset, geom.depth)
pos = self.get_pos(None, dst, ix)
getattr(self, dst)[pos] = mask
return self
@action
@inbatch_parallel(init='indices', target='threads', post='_stitch_clouds')
def get_point_cloud(self, ix, src_masks='masks', src_slices='slices', dst='predicted_labels',
threshold=0.5, averaging='mean', coordinates='cubic', order=(2, 0, 1),
height_margin=2, border_margin=1):
""" Convert labels from horizons-mask into point-cloud format. Fetches point-clouds from
a batch of masks, then merges resulting clouds to those stored in `dst`, whenever possible.
Parameters
----------
src_masks : str
component of batch that stores masks.
src_slices : str
component of batch that stores slices of crops.
dst : str/object
component of batch to store the resulting labels, o/w a storing object.
threshold : float
parameter of mask-thresholding.
averaging : str
method of pandas.groupby used for finding the center of a horizon.
coordinates : str
coordinates-mode to use for keys of point-cloud. Can be either 'cubic'
or 'lines'. In case of `lines`-option, `geometries` must be loaded as
a component of batch.
order : tuple of int
axes-param for `transpose`-operation, applied to a mask before fetching point clouds.
Default value of (2, 0, 1) is applicable to standart pipeline with one `rotate_axes`
applied to images-tensor.
height_margin : int
if adjacent horizons do not diverge for more than this distance, they can be merged together.
border_margin : int
max distance between a pair of horizon-borders when the horizons can be adjacent.
Returns
-------
SeismicCropBatch
batch with fetched labels.
"""
_, _, _ = dst, height_margin, border_margin
# threshold the mask, reshape and rotate the mask if needed
mask = (getattr(self, src_masks)[self.get_pos(None, src_masks, ix)] > threshold).astype(np.float32)
mask = np.reshape(mask, mask.shape[:3])
mask = np.transpose(mask, axes=order)
# prepare args
if isinstance(dst, str):
dst = getattr(self, dst)
i_shift, x_shift, h_shift = [self.get(ix, src_slices)[k][0] for k in range(3)]
geom = self.get(ix, 'geometries')
if coordinates == 'lines':
transforms = (lambda i_: geom.ilines[i_ + i_shift], lambda x_: geom.xlines[x_ + x_shift],
lambda h_: h_ + h_shift)
else:
transforms = (lambda i_: i_ + i_shift, lambda x_: x_ + x_shift,
lambda h_: h_ + h_shift)
# get horizons and merge them with matching aggregated ones
horizons = mask_to_horizon(mask, threshold, averaging, transforms, separate=True)
return horizons
@action
@inbatch_parallel(init='_init_component', target='threads')
def filter_out(self, ix, src=None, dst=None, mode=None, expr=None, low=None, high=None, length=None):
""" Cut mask for horizont extension task.
Parameters
----------
src : str
Component of batch with mask
dst : str
Component of batch to put cut mask in.
mode : str
Either point, line, iline or xline.
If point, then only only one point per horizon will be labeled.
If iline or xline then single iline or xline with labeled.
If line then randomly either single iline or xline will be
labeled.
expr : callable, optional.
Some vectorized function. Accepts points in cube, returns either float.
If not None, low or high/length should also be supplied.
"""
if not (src and dst):
raise ValueError('Src and dst must be provided')
pos = self.get_pos(None, src, ix)
mask = getattr(self, src)[pos]
coords = np.where(mask > 0)
if len(coords[0]) == 0:
getattr(self, dst)[pos] = mask
return self
if mode is not None:
new_mask = np.zeros_like(mask)
point = np.random.randint(len(coords))
if mode == 'point':
new_mask[coords[0][point], coords[1][point], :] = mask[coords[0][point], coords[1][point], :]
elif mode == 'iline' or (mode == 'line' and np.random.binomial(1, 0.5)) == 1:
new_mask[coords[0][point], :, :] = mask[coords[0][point], :, :]
elif mode in ['xline', 'line']:
new_mask[:, coords[1][point], :] = mask[:, coords[1][point], :]
else:
raise ValueError('Mode should be either `point`, `iline`, `xline` or `line')
mask = new_mask
if expr is not None:
coords = np.where(mask > 0)
new_mask = np.zeros_like(mask)
coords = np.array(coords).astype(np.float).T
cond = np.ones(shape=coords.shape[0]).astype(bool)
coords /= np.reshape(mask.shape, newshape=(1, 3))
if low is not None:
cond &= np.greater_equal(expr(coords), low)
if high is not None:
cond &= np.less_equal(expr(coords), high)
if length is not None:
low = 0 if not low else low
cond &= np.less_equal(expr(coords), low + length)
coords *= np.reshape(mask.shape, newshape=(1, 3))
coords = np.round(coords).astype(np.int32)[cond]
new_mask[coords[:, 0], coords[:, 1], coords[:, 2]] = mask[coords[:, 0], coords[:, 1], coords[:, 2]]
mask = new_mask
pos = self.get_pos(None, dst, ix)
getattr(self, dst)[pos] = mask
return self
@action
@inbatch_parallel(init='indices', target='threads')
def scale(self, ix, mode, src=None, dst=None):
""" Scale values in crop. """
pos = self.get_pos(None, src, ix)
comp_data = getattr(self, src)[pos]
geom = self.get(ix, 'geometries')
if mode == 'normalize':
new_data = geom.scaler(comp_data)
elif mode == 'denormalize':
new_data = geom.descaler(comp_data)
else:
raise ValueError('Scaling mode is not recognized.')
dst = dst or src
if not hasattr(self, dst):
setattr(self, dst, np.array([None] * len(self)))
pos = self.get_pos(None, dst, ix)
getattr(self, dst)[pos] = new_data
return self
@action
@inbatch_parallel(init='_init_component', post='_assemble', target='threads')
def concat_components(self, ix, src, dst, axis=-1):
""" Concatenate a list of components and save results to `dst` component.
Parameters
----------
src : array-like
List of components to concatenate of length more than one.
dst : str
Component of batch to put results in.
axis : int
The axis along which the arrays will be joined.
"""
_ = dst
if not isinstance(src, (list, | |
"""ダイナミクスを導出"""
import sympy as sy
#from sympy import sqrt
import tqdm
import kinematics
import utils
def operate_T2(A):
"""T2演算"""
return A[0,0] + A[1,1]
def operate_V(A):
"""ベクトル化演算"""
V_A = A.reshape(len(A), 1)
return V_A
def operate_tilde(A):
"""行列の最後の列を0埋め"""
m, _ = A.shape
tilde_A = A
for i in range(m):
tilde_A[i, -1] = 0
return tilde_A
class Dynamics(kinematics.Global):
"""動力学の導出
全セクションで同一のパラメータであると仮定
"""
m = 0.13
Ixx = 1.0
g = sy.Matrix([[0, 0, -9.81]]).T
k_e = 1700 # 弾性係数
def __init__(self, N,):
"""
Parameters
---
N : int
セクションの数
N = 3くらいでも計算にかなり時間がかかる
"""
super().__init__(N)
# self.set_M_omega()
# self.set_M_omega_dot()
# self.set_M_v()
# self.set_M_v_dot()
# self.set_M()
# self.set_M_dot()
# self.set_C()
# self.set_G_g()
# self.set_G_e()
# self.set_G()
def set_M_omega(self,):
"""回転方向の慣性行列をセット"""
print("computing M_omega ...")
def M_omega(i, j, k):
if j < 3*i and k < 3*i:
Mijk = self.Ixx * \
operate_T2(
self.J_OMEGA_s[i-1][:, 3*j:3*j+3].T *\
self.J_OMEGA_s[i-1][:, 3*k:3*k+3].T
)
return Mijk.subs(self.xi_large[i, 0], 1)
elif j < 3*i and 3*i <= k <= 3*i+2:
Ri = self.R_s[i]
Ri_diff_k = sy.diff(Ri, self.q_large[k, 0])
z = operate_tilde(Ri) * operate_tilde(Ri_diff_k).T
A = sy.integrate(z, (self.xi_large[i, 0], 0, 1))
A = operate_V(A).T
B = operate_V(self.J_OMEGA_s[i-1][:, 3*j:3*j+3].T)
B.subs(self.xi_large[i, 0], 1)
return self.Ixx * A * B
elif 3*i <= j <= 3*i+2 and 3*i <= k <= 3*i+2:
Ri = self.R_s[i]
Ri_diff_j = sy.diff(Ri, self.q_large[j, 0])
Ri_diff_k = sy.diff(Ri, self.q_large[k, 0])
z = Ri_diff_j.T * Ri_diff_k
A = sy.integrate(z, (self.xi_large[i, 0], 0, 1))
A = operate_T2(A)
return self.Ixx * A
else:
return 0
M_omega_s = []
for i in tqdm.tqdm(range(self.N)):
#print("i = ", i)
M_omega_s_i = []
for j in tqdm.tqdm(range(3*self.N), leave=False):
#print("j = ", j)
M_omega_s_ij = []
for k in tqdm.tqdm(range(3*self.N), leave=False):
#print("k = ", k)
M_omega_s_ij.append(M_omega(i, j, k))
M_omega_s_i.append(M_omega_s_ij)
M_omega_s.append(sy.Matrix(M_omega_s_i))
self.M_omega_s = M_omega_s
print("computing M_omega done!")
def set_M_omega_dot(self,):
"""回転方向慣性行列の微分をセット"""
print("computing M_omega_dot ...")
def M_omega_dot(i, j, k, s):
if j < 3*i and k < 3*i:
if s < 3*i:
A = self.H_OMEGA_s[i-1][3*j:3*j+3, 3*s:3*s+3].T *\
self.J_OMEGA_s[i-1][:, 3*k:3*k+3]
B = self.J_OMEGA_s[i-1][:, 3*j:3*j+3].T *\
self.H_OMEGA_s[i-1][3*k:3*k+3, 3*s:3*s+3]
Z = self.Ixx * operate_T2(A + B)
return Z.subs(self.xi_large[i, 0], 1)
elif 3*i <= s <= 3*i+2:
return 0
else:
return 0
elif j < 3*i and 3*i <= k <= 3*i+2:
Ri = self.R_s[i]
Ri_diff_k = sy.diff(Ri, self.q_large[k, 0])
RR_T = operate_tilde(Ri) * operate_tilde(Ri_diff_k).T
if s < 3*i:
A = sy.integrate(RR_T, (self.xi_large[i, 0], 0, 1))
A = operate_V(A).T
B = operate_V(self.H_OMEGA_s[i-1][3*j:3*j+3, 3*s:3*s+3].T)
Z = self.Ixx * A * B
return Z.subs(self.xi_large[i, 0], 1)
elif 3*i <= s <= 3*i+2:
RR_T_diff_s = sy.diff(RR_T, self.q_large[s, 0])
A = sy.integrate(RR_T_diff_s, (self.xi_large[i, 0], 0, 1))
A = operate_V(A).T
B = operate_V(self.J_OMEGA_s[i-1][:, 3*j:3*j+3].T)
Z = self.Ixx * A * B
return Z.subs(self.xi_large[i, 0], 1)
else:
return 0
elif 3*i <= j <= 3*i+2 and 3*i <= k <= 3*i+2:
if s < 3*i:
return 0
elif 3*i <= s <= 3*i+2:
Ri = self.R_s[i]
Ri_diff_j = sy.diff(Ri, self.q_large[j, 0])
Ri_diff_k = sy.diff(Ri, self.q_large[k, 0])
z = Ri_diff_j.T * Ri_diff_k
A = sy.integrate(z, (self.xi_large[i, 0], 0, 1))
A = operate_T2(A)
A = sy.diff(A, self.q_large[s, 0])
return self.Ixx * A
else:
return 0
else:
return 0
M_omega_dot_s = []
for s in tqdm.tqdm(range(3*self.N)):
#print("s = ", s)
M_omega_dot_s_diff_by_s = []
for i in tqdm.tqdm(range(self.N), leave=False):
#print(" i = ", i)
M_omega_dot_s_i = []
for j in tqdm.tqdm(range(3*self.N), leave=False):
#print(" j = ", j)
M_omega_dot_s_ij = []
for k in tqdm.tqdm(range(3*self.N), leave=False):
#print(" k = ", k)
M_omega_dot_s_ij.append(M_omega_dot(i, j, k, s))
M_omega_dot_s_i.append(M_omega_dot_s_ij)
M_omega_dot_s_diff_by_s.append(sy.Matrix(M_omega_dot_s_i))
M_omega_dot_s.append(M_omega_dot_s_diff_by_s)
self.M_omega_dot_s = M_omega_dot_s
print("M_omega_dot done!")
def set_M_v(self,):
"""並進運動の慣性行列をセット"""
print("computing M_v ...")
def M_v(i, j, k):
if j < 3*i and k < 3*i:
integrated_Pi = sy.integrate(
self.P_s[i],
(self.xi_large[i, 0], 0, 1)
)
A = self.m * self.J_v_s[i-1][:, j:j+1].T *\
(self.J_v_s[i-1][:, k:k+1] +\
self.J_OMEGA_s[i-1][:, 3*k:3*k+3] * integrated_Pi)
B = self.m * integrated_Pi.T *\
self.J_OMEGA_s[i-1][:, 3*j:3*j+3].T *\
self.J_v_s[i-1][:, k:k+1]
integrated_PiPi_T = sy.integrate(
self.P_s[i] * self.P_s[i].T,
(self.xi_large[i, 0], 0, 1)
)
C = self.m *\
operate_V(integrated_PiPi_T).T *\
operate_V(
self.J_OMEGA_s[i-1][:, 3*j:3*j+3].T * self.J_OMEGA_s[i-1][:, 3*k:3*k+3]
)
return (A + B + C).subs(self.xi_large[i, 0], 1)
elif j < 3*i and 3*i <= k <= 3*i+2:
Pi_diff_k = sy.diff(self.P_s[i], self.q_large[k, 0])
integrated_Pi_diff_k = sy.integrate(
Pi_diff_k,
(self.xi_large[i, 0], 0, 1)
)
A = self.m * self.J_v_s[i-1][:, j:j+1].T *\
integrated_Pi_diff_k
integrated_PiPi_diff_k_T = sy.integrate(
self.P_s[i] * Pi_diff_k.T,
(self.xi_large[i, 0], 0, 1)
)
B = self.m *\
operate_V(integrated_PiPi_diff_k_T).T *\
operate_V(self.J_OMEGA_s[i-1][:, 3*j:3*j+3].T)
return (A + B).subs(self.xi_large[i, 0], 1)
elif 3*i <= j <= 3*i+2 and 3*i <= k <= 3*i+2:
Pi_diff_j = sy.diff(self.P_s[i], self.q_large[j, 0])
Pi_diff_k = sy.diff(self.P_s[i], self.q_large[k, 0])
integrated = sy.integrate(
Pi_diff_j.T * Pi_diff_k,
(self.xi_large[i, 0], 0, 1)
)
A = self.m * integrated
return A.subs(self.xi_large[i, 0], 1)
else:
return 0
M_v_s = []
for i in tqdm.tqdm(range(self.N)):
#print("i = ", i)
M_v_s_i = []
for j in tqdm.tqdm(range(3*self.N), leave=False):
#print(" j = ", j)
M_v_s_ij = []
for k in tqdm.tqdm(range(3*self.N), leave=False):
#print(" k = ", k)
M_v_s_ij.append(M_v(i, j, k))
M_v_s_i.append(M_v_s_ij)
M_v_s.append(sy.Matrix(M_v_s_i))
self.M_v_s = M_v_s
print("done computing M_v!")
def set_M_v_dot(self,):
"""並進運動慣性行列の微分をセット"""
print("computing M_v_dot ...")
def M_v_dot(i, j, k, s):
if j < 3*i and k < 3*i:
if s < 3*i:
integrated_Pi = sy.integrate(
self.P_s[i],
(self.xi_large[i, 0], 0, 1)
)
A = self.m * self.H_v_s[i-1][3*j:3*j+3, s:s+1].T *\
(self.J_v_s[i-1][:, k:k+1] + \
self.J_OMEGA_s[i-1][:, 3*k:3*k+3] * integrated_Pi)
B = self.m * self.J_v_s[i-1][:, j:j+1].T *\
(self.H_v_s[i-1][3*k:3*k+3, s:s+1] +\
self.H_OMEGA_s[i-1][3*k:3*k+3, 3*s:3*s+3] * integrated_Pi)
C = self.m * integrated_Pi.T *\
self.H_OMEGA_s[i-1][3*j:3*j+3, 3*s:3*s+3].T *\
self.J_v_s[i-1][:, k:k+1]
D = self.m * integrated_Pi.T *\
self.J_OMEGA_s[i-1][:, 3*j:3*j+3].T *\
self.H_v_s[i-1][3*k:3*k+3, s:s+1]
integrated_PiPi_T = sy.integrate(
self.P_s[i] * self.P_s[i].T,
(self.xi_large[i, 0], 0, 1)
)
E = self.m *\
operate_V(integrated_PiPi_T).T *\
operate_V(
self.H_OMEGA_s[i-1][3*j:3*j+3, 3*s:3*s+3].T *\
self.J_OMEGA_s[i-1][:, 3*k:3*k+3] +\
self.J_OMEGA_s[i-1][:, 3*j:3*j+3].T *\
self.H_OMEGA_s[i-1][3*k:3*k+3, 3*s:3*s+3]
)
Z = A + B + C + E
return Z.subs(self.xi_large[i, 0], 1)
elif 3*i <= s <= 3*i+2:
Pi_diff_s = sy.diff(self.P_s[i], self.q_large[s, 0])
integrated_Pi_diff_s = sy.integrate(
Pi_diff_s,
(self.xi_large[i, 0], 0, 1)
)
PiPi_T_diff_s = sy.diff(
self.P_s[i] * self.P_s[i].T,
self.q_large[s, 0]
)
integrated_PiPi_T_diff_s = sy.integrate(
PiPi_T_diff_s,
(self.xi_large[i, 0], 0, 1)
)
A = self.m * self.J_v_s[i-1][:, j:j+1].T *\
self.J_OMEGA_s[i-1][:, 3*k:3*k+3] *\
integrated_Pi_diff_s
B = self.m * integrated_Pi_diff_s.T *\
self.J_OMEGA_s[i-1][:, 3*j:3*j+3].T *\
self.J_v_s[i-1][:, k:k+1]
C = self.m *\
operate_V(integrated_PiPi_T_diff_s).T *\
operate_V(
self.J_OMEGA_s[i-1][:, 3*j:3*j+3].T *\
self.J_OMEGA_s[i-1][:, 3*k:3*k+3]
)
Z = A * B + C
return Z.subs(self.xi_large[i, 0], 1)
else:
return 0
elif j < 3*i and 3*i <= k <= 3*i+2:
Pi_diff_k = sy.diff(self.P_s[i], self.q_large[k, 0])
if s < 3*i:
integrated_Pi_diff_k = sy.integrate(
Pi_diff_k,
(self.xi_large[i, 0], 0, 1)
)
A = self.m * self.H_v_s[i-1][3*j:3*j+3, s:s+1].T *\
integrated_Pi_diff_k
integrated_PiPi_diff_k_T = sy.integrate(
self.P_s[i] * Pi_diff_k.T,
(self.xi_large[i, 0], 0, 1)
)
B = self.m *\
operate_V(integrated_PiPi_diff_k_T).T *\
operate_V(self.H_OMEGA_s[i-1][3*j:3*j+3, 3*s:3*s+3])
Z = A + B
return Z.subs(self.xi_large[i, 0], 1)
elif 3*i <= s <= 3*i+2:
Pi_diff_ks = sy.diff(Pi_diff_k, self.q_large[s, 0])
integrated_Pi_diff_ks = sy.integrate(
Pi_diff_ks,
(self.xi_large[i, 0], 0, 1)
)
A = self.m * self.J_v_s[i-1][:, j:j+1].T *\
integrated_Pi_diff_ks
PiPi_diff_k_T_diff_s = sy.diff(
self.P_s[i] * Pi_diff_k.T,
self.q_large[s, 0]
)
integrated_PiPi_diff_k_T_diff_s = sy.integrate(
PiPi_diff_k_T_diff_s,
(self.xi_large[i, 0], 0, 1)
)
B = self.m *\
operate_V(integrated_PiPi_diff_k_T_diff_s).T *\
operate_V(self.J_OMEGA_s[i-1][:, 3*j:3*j+3].T)
Z = A + B
return Z.subs(self.xi_large[i, 0], 1)
else:
return 0
elif 3*i <= j <= 3*i+2 and 3*i <= k <= 3*i+2:
if s < 3*i:
return 0
elif 3*i <= s <= 3*i+2:
Pi_diff_j = sy.diff(self.P_s[i], self.q_large[j, 0])
Pi_diff_k = sy.diff(self.P_s[i], self.q_large[k, 0])
Z = self.m *\
sy.diff(
Pi_diff_j.T | |
<gh_stars>1-10
import abc
import asyncio
import cProfile
import functools
import ipaddress
import json
import logging
import pstats
import re
import string
import types
import typing
import uuid
from email.utils import parseaddr
import acme.jws
import acme.messages
import aiohttp_jinja2
import josepy
import yarl
from aiohttp import web
from aiohttp.helpers import sentinel
from aiohttp.web_middlewares import middleware
from cryptography import x509
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa, ec
import acmetk.util
from acmetk import models
from acmetk.client import CouldNotCompleteChallenge, AcmeClientException, AcmeClient
from acmetk.database import Database
from acmetk.models import messages
from acmetk.server import ChallengeValidator
from acmetk.server.external_account_binding import AcmeEABMixin
from acmetk.server.management import AcmeManagementMixin
from acmetk.server.routes import routes
from acmetk.version import __version__
from acmetk.plugin_base import PluginRegistry
logger = logging.getLogger(__name__)
async def handle_get(request):
return web.Response(status=405)
class AcmeResponse(web.Response):
def __init__(self, nonce, directory_url, *args, links=None, **kwargs):
super().__init__(*args, **kwargs)
if links is None:
links = []
links.append(f'<{directory_url}>; rel="index"')
self.headers.extend(("Link", link) for link in links)
self.headers.update(
{
"Server": f"acmetk Server {__version__}",
"Replay-Nonce": nonce,
"Cache-Control": "no-store",
}
)
class AcmeServerBase(AcmeEABMixin, AcmeManagementMixin, abc.ABC):
"""Base class for an ACME compliant server.
Implementations must also be registered with the plugin registry via
:meth:`~acmetk.plugin_base.PluginRegistry.register_plugin`, so that the CLI script knows which configuration
option corresponds to which server class.
"""
ORDERS_LIST_CHUNK_LEN = 10
"""Number of order links to include per request."""
SUPPORTED_JWS_ALGORITHMS = (
josepy.jwa.RS256,
josepy.jwa.RS384,
josepy.jwa.RS512,
josepy.jwa.PS256,
josepy.jwa.PS384,
josepy.jwa.PS512,
josepy.jwa.ES256,
josepy.jwa.ES384,
josepy.jwa.ES512,
)
"""The JWS signing algorithms that the server supports."""
SUPPORTED_EAB_JWS_ALGORITHMS = (
josepy.jwa.HS256,
josepy.jwa.HS384,
josepy.jwa.HS512,
)
"""The symmetric JWS signing algorithms that the server supports for external account bindings."""
SUPPORTED_ACCOUNT_KEYS = (rsa.RSAPublicKey, ec.EllipticCurvePublicKey)
"""The types of public keys that the server supports when creating ACME accounts."""
SUPPORTED_CSR_KEYS = (rsa.RSAPublicKey, ec.EllipticCurvePublicKey)
"""The types of public keys that the server supports in a certificate signing request."""
VALID_DOMAIN_RE = re.compile(
r"^(((?!-))(xn--|_{1,1})?[a-z0-9-]{0,61}[a-z0-9]{1,1}\.)*"
r"(xn--)?([a-z0-9][a-z0-9\-]{0,60}|[a-z0-9-]{1,30}\.[a-z]{2,})$"
)
"""using from https://stackoverflow.com/questions/
10306690/what-is-a-regular-expression-which-will-match-a-valid-domain-name-without-a-subd
better than nothing, but accepts names ending with -
"""
def __init__(
self,
*,
rsa_min_keysize=2048,
ec_min_keysize=256,
tos_url=None,
mail_suffixes=None,
subnets=None,
use_forwarded_header=False,
require_eab=False,
allow_wildcard=False,
**kwargs,
):
super().__init__()
self._keysize = {
"csr": {
rsa.RSAPublicKey: (rsa_min_keysize, 4096),
ec.EllipticCurvePublicKey: (ec_min_keysize, 384),
},
"account": {
rsa.RSAPublicKey: (rsa_min_keysize, 4096),
ec.EllipticCurvePublicKey: (ec_min_keysize, 521),
},
}
self._tos_url = tos_url
self._mail_suffixes = mail_suffixes
self._subnets = (
[ipaddress.ip_network(subnet) for subnet in subnets] if subnets else None
)
self._use_forwarded_header = use_forwarded_header
self._require_eab = require_eab
self._allow_wildcard = allow_wildcard
self.app = web.Application(
middlewares=[
self.error_middleware,
self.host_ip_middleware,
self.aiohttp_jinja2_middleware,
]
)
# request.app['_service_'] available in jinja2 templates
self.app["_service_"] = self
self._add_routes()
self._nonces = set()
self._db: typing.Optional[Database] = None
self._db_session = None
self._challenge_validators = {}
def _match_keysize(self, public_key, what):
for key_type, key_size in self._keysize[what].items():
if isinstance(public_key, key_type):
(low, high) = key_size
break
else:
raise ValueError("This key type is not supported.")
if low <= public_key.key_size <= high:
return
raise ValueError(
f"{public_key.__class__.__name__} Keysize for {what} has to be {low} <= {public_key.key_size=} <= {high}"
)
def _add_routes(self):
specific_routes = []
for route_def in routes:
specific_routes.append(
web.RouteDef(
route_def.method,
route_def.path,
getattr(self, route_def.handler.__name__),
route_def.kwargs.copy(),
)
)
self.app.add_routes(specific_routes)
# catch-all get
self.app.router.add_route("GET", "/{tail:.*}", handle_get)
@classmethod
async def create_app(
cls, config: typing.Dict[str, typing.Any], **kwargs
) -> "AcmeServerBase":
"""A factory that also creates and initializes the database and session objects,
reading the necessary arguments from the passed config dict.
:param config: A dictionary holding the configuration. See :doc:`configuration` for supported options.
:return: The server instance
"""
db = Database(config["db"])
instance = cls(
**config,
**kwargs,
)
instance._db = db
instance._db_session = db.session
return instance
def _session(self, request):
return self._db_session(
info={"remote_host": request.get("actual_ip", "0.0.0.0")}
)
@classmethod
async def runner(
cls, config: typing.Dict[str, typing.Any], **kwargs
) -> typing.Tuple["aiohttp.web.AppRunner", "AcmeServerBase"]:
"""A factory that starts the server on the given hostname and port using an AppRunner
after constructing a server instance using :meth:`.create_app`.
:param config: A dictionary holding the configuration. See :doc:`configuration` for supported options.
:param kwargs: Additional kwargs are passed to the :meth:`.create_app` call.
:return: A tuple containing the app runner as well as the server instance.
"""
instance = await cls.create_app(config, **kwargs)
runner = web.AppRunner(instance.app)
await runner.setup()
site = web.TCPSite(runner, config["hostname"], config["port"])
await site.start()
return runner, instance
@classmethod
async def unix_socket(
cls, config: typing.Dict[str, typing.Any], path: str, **kwargs
) -> typing.Tuple["aiohttp.web.AppRunner", "AcmeServerBase"]:
"""A factory that starts the server on a Unix socket bound to the given path using an AppRunner
after constructing a server instance using :meth:`.create_app`.
:param config: A dictionary holding the configuration. See :doc:`configuration` for supported options.
:param path: Path of the unix socket.
:param kwargs: Additional kwargs are passed to the :meth:`.create_app` call.
:return: A tuple containing the app runner as well as the server instance.
"""
instance = await cls.create_app(config, **kwargs)
runner = web.AppRunner(instance.app)
await runner.setup()
site = web.UnixSite(runner, path)
await site.start()
return runner, instance
def register_challenge_validator(self, validator: ChallengeValidator):
"""Registers a :class:`ChallengeValidator` with the server.
The validator is subsequently used to validate challenges of all types that it
supports.
:param validator: The challenge validator to be registered.
:raises: :class:`ValueError` If a challenge validator is already registered that supports any of
the challenge types that *validator* supports.
"""
for challenge_type in validator.SUPPORTED_CHALLENGES:
if self._challenge_validators.get(challenge_type):
raise ValueError(
f"A challenge validator for type {challenge_type} is already registered"
)
else:
self._challenge_validators[challenge_type] = validator
@property
def _supported_challenges(self):
return self._challenge_validators.keys()
def _response(self, request, data=None, text=None, *args, **kwargs):
if data and text:
raise ValueError("only one of data, text, or body should be specified")
elif data and (data is not sentinel):
text = json.dumps(data)
kwargs.update({"content_type": "application/json"})
else:
text = data or text
return AcmeResponse(
self._issue_nonce(),
acmetk.util.url_for(request, "directory"),
*args,
**kwargs,
text=text,
)
def _issue_nonce(self):
nonce = uuid.uuid4().hex
self._nonces.add(nonce)
return nonce
def _verify_nonce(self, nonce):
if nonce in self._nonces:
self._nonces.remove(nonce)
else:
raise acme.messages.Error.with_code("badNonce", detail=nonce)
async def _verify_request(
self,
request,
session,
key_auth: bool = False,
post_as_get: bool = False,
expunge_account: bool = True,
):
"""Verifies an ACME request whose payload is encapsulated in a JWS.
`6.2. Request Authentication <https://tools.ietf.org/html/rfc8555#section-6.2>`_
All requests to handlers apart from :meth:`new_nonce` and :meth:`directory`
are authenticated.
:param key_auth: *True* if the JWK inside the JWS should be used to \
verify its signature. False otherwise
:param post_as_get: *True* if a `POST-as-GET <https://tools.ietf.org/html/rfc8555#section-6.3>`_ \
request is expected. False otherwise
:param expunge_account: *True* if the account object should be expunged from the session. \
Needs to be False if the account object is to be updated in the database later.
:raises:
* :class:`aiohttp.web.HTTPNotFound` if the JWS contains a kid, \
but the corresponding account does not exist.
* :class:`acme.messages.Error` if any of the following are true:
* The request does not contain a valid JWS
* The handler expects a `POST-as-GET <https://tools.ietf.org/html/rfc8555#section-6.3>`_ request, \
but got a non-empty payload
* The URL inside the JWS' signature is not equal to the actual request URL
* The signature was created using an algorithm that the server does not support, \
see :attr:`SUPPORTED_JWS_ALGORITHMS`
* The client supplied a bad nonce in the JWS' protected header
* The JWS does not have *either* a JWK *or* a kid
* The JWS' signature is invalid
* There is a mismatch between the URL's kid and the JWS' kid
* The account corresponding to the kid does not have status \
:attr:`acmetk.models.AccountStatus.VALID`
"""
data = await request.text()
try:
jws = acme.jws.JWS.json_loads(data)
except josepy.errors.DeserializationError:
raise acme.messages.Error.with_code(
"malformed", detail="The request does not contain a valid JWS."
)
if post_as_get and jws.payload != b"":
raise acme.messages.Error.with_code(
"malformed",
detail='The request payload must be b"" in a POST-as-GET request.',
)
sig = jws.signature.combined
if sig.url != str(acmetk.util.forwarded_url(request)):
raise acme.messages.Error.with_code("unauthorized")
if sig.alg not in self.SUPPORTED_JWS_ALGORITHMS:
raise acme.messages.Error.with_code(
"badSignatureAlgorithm",
detail=f"Supported algorithms: {', '.join([str(alg) for alg in self.SUPPORTED_JWS_ALGORITHMS])}",
)
nonce = acme.jose.b64.b64encode(sig.nonce).decode()
self._verify_nonce(nonce)
# Check whether we have *either* a jwk or a kid
if not ((sig.jwk is not None) ^ (sig.kid is not None)):
raise acme.messages.Error.with_code("malformed")
if key_auth:
# check whether key was supplied - josepy.errors.Error: No key found - malformed
if not jws.verify(sig.jwk):
raise acme.messages.Error.with_code("unauthorized")
else:
account = await self._db.get_account(session, key=sig.jwk)
elif sig.kid:
account_id = yarl.URL(sig.kid).name
if (
acmetk.util.url_for(request, "accounts", account_id=account_id)
!= sig.kid
):
"""Bug in the dehydrated client, accepted by boulder, so we accept it too.
Dehydrated puts .../new-account/{kid} into the request signature, instead of
.../accounts/{kid}."""
kid_new_account_route = yarl.URL(
acmetk.util.url_for(request, "new-account")
)
kid_new_account_route = kid_new_account_route.with_path(
kid_new_account_route.path + "/" + account_id
)
if str(kid_new_account_route) == sig.kid:
logger.debug("Buggy client kid account mismatch")
else:
raise acme.messages.Error.with_code("malformed")
elif (
"account_id" in | |
#!/usr/bin/env python
# Copyright 2018, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_json_output module."""
import datetime
import errno
import json
import os
import re
import sys
import gtest_json_test_utils
import gtest_test_utils
GTEST_FILTER_FLAG = '--gtest_filter'
GTEST_LIST_TESTS_FLAG = '--gtest_list_tests'
GTEST_OUTPUT_FLAG = '--gtest_output'
GTEST_DEFAULT_OUTPUT_FILE = 'test_detail.json'
GTEST_PROGRAM_NAME = 'gtest_xml_output_unittest_'
# The flag indicating stacktraces are not supported
NO_STACKTRACE_SUPPORT_FLAG = '--no_stacktrace_support'
SUPPORTS_STACK_TRACES = NO_STACKTRACE_SUPPORT_FLAG not in sys.argv
if SUPPORTS_STACK_TRACES:
STACK_TRACE_TEMPLATE = '\nStack trace:\n*'
else:
STACK_TRACE_TEMPLATE = ''
EXPECTED_NON_EMPTY = {
u'tests': 24,
u'failures': 4,
u'disabled': 2,
u'errors': 0,
u'timestamp': u'*',
u'time': u'*',
u'ad_hoc_property': u'42',
u'name': u'AllTests',
u'testsuites': [
{
u'name': u'SuccessfulTest',
u'tests': 1,
u'failures': 0,
u'disabled': 0,
u'errors': 0,
u'time': u'*',
u'testsuite': [
{
u'name': u'Succeeds',
u'status': u'RUN',
u'time': u'*',
u'classname': u'SuccessfulTest'
}
]
},
{
u'name': u'FailedTest',
u'tests': 1,
u'failures': 1,
u'disabled': 0,
u'errors': 0,
u'time': u'*',
u'testsuite': [
{
u'name': u'Fails',
u'status': u'RUN',
u'time': u'*',
u'classname': u'FailedTest',
u'failures': [
{
u'failure':
u'gtest_xml_output_unittest_.cc:*\n'
u'Expected equality of these values:\n'
u' 1\n 2' + STACK_TRACE_TEMPLATE,
u'type': u''
}
]
}
]
},
{
u'name': u'DisabledTest',
u'tests': 1,
u'failures': 0,
u'disabled': 1,
u'errors': 0,
u'time': u'*',
u'testsuite': [
{
u'name': u'DISABLED_test_not_run',
u'status': u'NOTRUN',
u'time': u'*',
u'classname': u'DisabledTest'
}
]
},
{
u'name': u'SkippedTest',
u'tests': 1,
u'failures': 0,
u'disabled': 0,
u'errors': 0,
u'time': u'*',
u'testsuite': [
{
u'name': u'Skipped',
u'status': u'SKIPPED',
u'time': u'*',
u'classname': u'SkippedTest'
}
]
},
{
u'name': u'MixedResultTest',
u'tests': 3,
u'failures': 1,
u'disabled': 1,
u'errors': 0,
u'time': u'*',
u'testsuite': [
{
u'name': u'Succeeds',
u'status': u'RUN',
u'time': u'*',
u'classname': u'MixedResultTest'
},
{
u'name': u'Fails',
u'status': u'RUN',
u'time': u'*',
u'classname': u'MixedResultTest',
u'failures': [
{
u'failure':
u'gtest_xml_output_unittest_.cc:*\n'
u'Expected equality of these values:\n'
u' 1\n 2' + STACK_TRACE_TEMPLATE,
u'type': u''
},
{
u'failure':
u'gtest_xml_output_unittest_.cc:*\n'
u'Expected equality of these values:\n'
u' 2\n 3' + STACK_TRACE_TEMPLATE,
u'type': u''
}
]
},
{
u'name': u'DISABLED_test',
u'status': u'NOTRUN',
u'time': u'*',
u'classname': u'MixedResultTest'
}
]
},
{
u'name': u'XmlQuotingTest',
u'tests': 1,
u'failures': 1,
u'disabled': 0,
u'errors': 0,
u'time': u'*',
u'testsuite': [
{
u'name': u'OutputsCData',
u'status': u'RUN',
u'time': u'*',
u'classname': u'XmlQuotingTest',
u'failures': [
{
u'failure':
u'gtest_xml_output_unittest_.cc:*\n'
u'Failed\nXML output: <?xml encoding="utf-8">'
u'<top><![CDATA[cdata text]]></top>' +
STACK_TRACE_TEMPLATE,
u'type': u''
}
]
}
]
},
{
u'name': u'InvalidCharactersTest',
u'tests': 1,
u'failures': 1,
u'disabled': 0,
u'errors': 0,
u'time': u'*',
u'testsuite': [
{
u'name': u'InvalidCharactersInMessage',
u'status': u'RUN',
u'time': u'*',
u'classname': u'InvalidCharactersTest',
u'failures': [
{
u'failure':
u'gtest_xml_output_unittest_.cc:*\n'
u'Failed\nInvalid characters in brackets'
u' [\x01\x02]' + STACK_TRACE_TEMPLATE,
u'type': u''
}
]
}
]
},
{
u'name': u'PropertyRecordingTest',
u'tests': 4,
u'failures': 0,
u'disabled': 0,
u'errors': 0,
u'time': u'*',
u'SetUpTestSuite': u'yes',
u'TearDownTestSuite': u'aye',
u'testsuite': [
{
u'name': u'OneProperty',
u'status': u'RUN',
u'time': u'*',
u'classname': u'PropertyRecordingTest',
u'key_1': u'1'
},
{
u'name': u'IntValuedProperty',
u'status': u'RUN',
u'time': u'*',
u'classname': u'PropertyRecordingTest',
u'key_int': u'1'
},
{
u'name': u'ThreeProperties',
u'status': u'RUN',
u'time': u'*',
u'classname': u'PropertyRecordingTest',
u'key_1': u'1',
u'key_2': u'2',
u'key_3': u'3'
},
{
u'name': u'TwoValuesForOneKeyUsesLastValue',
u'status': u'RUN',
u'time': u'*',
u'classname': u'PropertyRecordingTest',
u'key_1': u'2'
}
]
},
{
u'name': u'NoFixtureTest',
u'tests': 3,
u'failures': 0,
u'disabled': 0,
u'errors': 0,
u'time': u'*',
u'testsuite': [
{
u'name': u'RecordProperty',
u'status': u'RUN',
u'time': u'*',
u'classname': u'NoFixtureTest',
u'key': u'1'
},
{
u'name': u'ExternalUtilityThatCallsRecordIntValuedProperty',
u'status': u'RUN',
u'time': u'*',
u'classname': u'NoFixtureTest',
u'key_for_utility_int': u'1'
},
{
u'name':
u'ExternalUtilityThatCallsRecordStringValuedProperty',
u'status': u'RUN',
u'time': u'*',
u'classname': u'NoFixtureTest',
u'key_for_utility_string': u'1'
}
]
},
{
u'name': u'TypedTest/0',
u'tests': 1,
u'failures': 0,
u'disabled': 0,
u'errors': 0,
u'time': u'*',
u'testsuite': [
{
u'name': u'HasTypeParamAttribute',
u'type_param': u'int',
u'status': u'RUN',
u'time': u'*',
u'classname': u'TypedTest/0'
}
]
},
{
u'name': u'TypedTest/1',
u'tests': 1,
u'failures': 0,
u'disabled': 0,
u'errors': 0,
u'time': u'*',
u'testsuite': [
{
u'name': u'HasTypeParamAttribute',
u'type_param': u'long',
u'status': u'RUN',
u'time': u'*',
u'classname': u'TypedTest/1'
}
]
},
{
u'name': u'Single/TypeParameterizedTestSuite/0',
u'tests': 1,
u'failures': 0,
u'disabled': 0,
u'errors': 0,
u'time': u'*',
u'testsuite': [
{
u'name': u'HasTypeParamAttribute',
u'type_param': u'int',
u'status': u'RUN',
u'time': u'*',
u'classname': u'Single/TypeParameterizedTestSuite/0'
}
]
},
{
u'name': u'Single/TypeParameterizedTestSuite/1',
u'tests': 1,
u'failures': 0,
u'disabled': 0,
u'errors': 0,
u'time': u'*',
u'testsuite': [
{
u'name': u'HasTypeParamAttribute',
u'type_param': u'long',
u'status': u'RUN',
u'time': u'*',
u'classname': u'Single/TypeParameterizedTestSuite/1'
}
]
},
{
u'name': u'Single/ValueParamTest',
u'tests': 4,
u'failures': 0,
u'disabled': 0,
u'errors': 0,
u'time': u'*',
u'testsuite': [
{
u'name': u'HasValueParamAttribute/0',
u'value_param': u'33',
u'status': u'RUN',
u'time': u'*',
u'classname': u'Single/ValueParamTest'
},
{
u'name': u'HasValueParamAttribute/1',
u'value_param': u'42',
u'status': u'RUN',
u'time': u'*',
u'classname': u'Single/ValueParamTest'
},
{
u'name': u'AnotherTestThatHasValueParamAttribute/0',
u'value_param': u'33',
u'status': u'RUN',
u'time': u'*',
u'classname': u'Single/ValueParamTest'
},
{
u'name': u'AnotherTestThatHasValueParamAttribute/1',
u'value_param': u'42',
u'status': u'RUN',
u'time': u'*',
u'classname': u'Single/ValueParamTest'
}
]
}
]
}
EXPECTED_FILTERED = {
u'tests': 1,
u'failures': 0,
u'disabled': 0,
u'errors': 0,
u'time': u'*',
u'timestamp': u'*',
u'name': u'AllTests',
u'ad_hoc_property': u'42',
u'testsuites': [{
u'name': u'SuccessfulTest',
u'tests': 1,
u'failures': 0,
u'disabled': 0,
u'errors': 0,
u'time': u'*',
u'testsuite': [{
u'name': u'Succeeds',
u'status': u'RUN',
u'time': u'*',
u'classname': u'SuccessfulTest',
}]
}],
}
EXPECTED_EMPTY = {
u'tests': 0,
u'failures': 0,
u'disabled': 0,
u'errors': 0,
u'time': u'*',
u'timestamp': u'*',
u'name': u'AllTests',
u'testsuites': [],
}
GTEST_PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath(GTEST_PROGRAM_NAME)
SUPPORTS_TYPED_TESTS = 'TypedTest' in gtest_test_utils.Subprocess(
[GTEST_PROGRAM_PATH, GTEST_LIST_TESTS_FLAG], capture_stderr=False).output
class GTestJsonOutputUnitTest(gtest_test_utils.TestCase):
"""Unit test for Google Test's JSON output functionality.
"""
# This test currently breaks on platforms that do not support typed and
# type-parameterized tests, so we don't run it under them.
if SUPPORTS_TYPED_TESTS:
def testNonEmptyJsonOutput(self):
"""Verifies JSON output for a Google Test binary with non-empty output.
Runs a test program that generates a non-empty JSON output, and
tests that the JSON output is expected.
"""
self._TestJsonOutput(GTEST_PROGRAM_NAME, EXPECTED_NON_EMPTY, 1)
def testEmptyJsonOutput(self):
"""Verifies JSON output for a Google Test binary without actual tests.
Runs a test program that generates an empty JSON output, and
tests that the JSON output is expected.
"""
self._TestJsonOutput('gtest_no_test_unittest', EXPECTED_EMPTY, 0)
def testTimestampValue(self):
"""Checks whether the timestamp attribute in the JSON output is valid.
Runs a test program that generates an empty JSON output, and checks if
the timestamp attribute in the testsuites tag is valid.
"""
actual = self._GetJsonOutput('gtest_no_test_unittest', [], 0)
date_time_str = actual['timestamp']
# datetime.strptime() is only available in Python 2.5+ so we have to
# parse the expected datetime manually.
match = re.match(r'(\d+)-(\d\d)-(\d\d)T(\d\d):(\d\d):(\d\d)', date_time_str)
self.assertTrue(
re.match,
'JSON datettime string %s has incorrect format' % date_time_str)
date_time_from_json = datetime.datetime(
year=int(match.group(1)), month=int(match.group(2)),
day=int(match.group(3)), hour=int(match.group(4)),
minute=int(match.group(5)), second=int(match.group(6)))
time_delta = abs(datetime.datetime.now() - date_time_from_json)
# timestamp value should be near the current local time
self.assertTrue(time_delta < datetime.timedelta(seconds=600),
'time_delta is %s' % time_delta)
def testDefaultOutputFile(self):
"""Verifies the default output file name.
Confirms that Google Test produces an JSON output file with the expected
default name if no name is explicitly specified.
"""
output_file = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_DEFAULT_OUTPUT_FILE)
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(
'gtest_no_test_unittest')
try:
os.remove(output_file)
except OSError:
e = sys.exc_info()[1]
if e.errno != errno.ENOENT:
raise
p = gtest_test_utils.Subprocess(
[gtest_prog_path, '%s=json' % GTEST_OUTPUT_FLAG],
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
self.assert_(os.path.isfile(output_file))
def testSuppressedJsonOutput(self):
"""Verifies that no JSON output is generated.
Tests that no JSON file is generated if the default JSON listener is
shut down before RUN_ALL_TESTS is invoked.
"""
json_path = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_PROGRAM_NAME + 'out.json')
if os.path.isfile(json_path):
os.remove(json_path)
command = | |
# coding: utf-8
"""Dirty Hack to cache simprod data for weighting for offline usage
Code is adopted from `from_simprod` function:
https://code.icecube.wisc.edu/projects/icecube/browser/IceCube/meta-projects/
combo/trunk/weighting/python/weighting.py#L823
Line 823 is where to get the database PW from.
"""
from __future__ import print_function, division
import os
import importlib
from functools import partial
import click
import yaml
import warnings
from icecube.weighting import weighting
from icecube.icetray import I3Units
# GLOBALS
_sql_types = dict(string=str, int=int, double=float, float=float, bool=bool)
NOTHING = object()
def get(collection, key, default=NOTHING, type=NOTHING):
"""
Get with optional type coersion
"""
if default is NOTHING:
value = collection[key]
else:
value = collection.get(key, default)
if type is NOTHING:
return value
else:
return type(value)
def get_steering(cursor, dataset_id):
cursor.execute(
"SELECT name, type, value FROM steering_parameter WHERE dataset_id=%s",
(dataset_id,))
steering = {}
for name, typus, value in cursor.fetchall():
try:
steering[name] = _sql_types[typus](value)
except ValueError:
steering[name] = value
pass
return steering
def _import_mysql():
"Import the flavor of the month"
import importlib
for impl in 'MySQLdb', 'mysql.connector', 'pymysql':
try:
mysql = importlib.import_module(impl)
return mysql
except ImportError:
pass
raise ImportError('No MySQL bindings found!')
def get_generator_settings(
dataset_id, database_pwd,
use_muongun=False, database='vm-i3simprod.icecube.wisc.edu'):
"""Get Settings to create generator
Parameters
----------
dataset_id : int
The database id.
database_pwd : str
The database password. This is provided in the `from_simprod` script
at combo/trunk/weighting/python/weighting.py#L823.
database : str, optional
The database url.
Returns
-------
dict
A dict with the generator settings.
class: str
Name of the Generator class.
multiplier: float
The multipler to apply on the generator.
kwargs: dict
Keyword arguments that get passed to generator class
"""
generator_data = {}
import re
mysql = _import_mysql()
try:
db = mysql.connect(
host=database, user='i3simprod-ro', passwd=database_pwd,
db='i3simprod')
except mysql.OperationalError as e:
reason = e.args[1]
reason += " This might happen if you tried to connect to the simprod database from many cluster jobs in parallel. Don't do that. Instead, query the generator for your dataset once, and pass it to your jobs in a file."
raise mysql.OperationalError(e.args[0], reason)
cursor = db.cursor()
if isinstance(dataset_id, str):
raise UnboundLocalError
cursor.execute("SELECT COUNT(*) FROM dataset WHERE dataset_id=%s", (dataset_id,))
if cursor.fetchone()[0] == 0:
raise ValueError("Dataset %s does not exist in the simprod database" % repr(dataset_id))
# In case this is a post-processed set, chase the chain back until we hit the real generated set
while True:
cursor.execute("SELECT description FROM dataset WHERE dataset_id=%s", (dataset_id,))
description = cursor.fetchone()[0]
match = re.match(r'.*(from|of) dataset (\d{4,5})', description, re.IGNORECASE) if description else None
if match:
dataset_id = int(match.group(2))
else:
try:
try:
parent_id = get_steering(cursor, dataset_id)['inputdataset']
except KeyError:
parent_id = get_steering(cursor, dataset_id)['MCPE_dataset']
# check if this is an IceTop dataset, in which case we should
# stop before we get to generation level
parent = get_steering(cursor, parent_id)
if 'CORSIKA::platform' in parent:
break
dataset_id = parent_id
except KeyError:
break
# query category and number of completed files
cursor.execute("SELECT category FROM dataset JOIN simcat ON dataset.simcat_id=simcat.simcat_id and dataset.dataset_id=%s", (dataset_id,))
row = cursor.fetchone()
category = row[0]
steering = get_steering(cursor, dataset_id)
get_steering_param = partial(get, steering)
if category == 'Test':
if steering['mctype'] == 'corsika':
category = 'CORSIKA-in-ice'
elif steering['mctype'].startswith('nugen'):
category = 'neutrino-generator'
def _coerce_tray_parameter(row):
if not row:
return None
if row[1] in _sql_types:
try:
return _sql_types[row[1]](row[2])
except ValueError:
# not a literal, must be a function
return SimprodFunction(row[2], get_steering(cursor, dataset_id))
else:
cursor.execute("SELECT value FROM carray_element WHERE cparameter_id=%s", (row[0],))
return [float(v[0]) for v in cursor.fetchall()]
def get_tray_parameter(dataset_id, key, klass=None):
if klass is None:
cursor.execute("SELECT cparameter_id, type, value FROM cparameter WHERE dataset_id=%s AND name=%s ORDER BY tray_index ASC", (dataset_id, key))
else:
cursor.execute("SELECT cparameter_id, type, value FROM cparameter INNER JOIN (module_pivot, module) ON (module_pivot.module_id=module.module_id AND cparameter.module_pivot_id=module_pivot.module_pivot_id) WHERE module_pivot.dataset_id=%s AND cparameter.name=%s AND module.class=%s ORDER BY cparameter.tray_index ASC", (dataset_id, key, klass))
values = list(map(_coerce_tray_parameter, cursor.fetchall()))
if len(values) == 0:
return None
elif len(values) == 1:
return values[0]
else:
return values
def get_metaproject(dataset_id, tray_name, tray_index=None):
"""
Get metaproject version for a tray by name, or if that fails, by index
"""
cursor.execute("SELECT metaproject.name, metaproject.major_version, metaproject.minor_version, metaproject.patch_version FROM tray JOIN metaproject_pivot ON tray.tray_index=metaproject_pivot.tray_index AND tray.dataset_id=metaproject_pivot.dataset_id JOIN metaproject ON metaproject_pivot.metaproject_id=metaproject.metaproject_id WHERE tray.dataset_id=%s AND tray.name=%s", (dataset_id, tray_name))
row = cursor.fetchone()
if row is None and tray_index is not None:
cursor.execute("SELECT metaproject.name, metaproject.major_version, metaproject.minor_version, metaproject.patch_version FROM tray JOIN metaproject_pivot ON tray.tray_index=metaproject_pivot.tray_index AND tray.dataset_id=metaproject_pivot.dataset_id JOIN metaproject ON metaproject_pivot.metaproject_id=metaproject.metaproject_id WHERE tray.dataset_id=%s AND tray.tray_index=%s", (dataset_id, tray_index))
row = cursor.fetchone()
metaproject, major, minor, patch = row
prerelease = None
if '-' in patch:
patch, prerelease = patch.split('-')
return (metaproject, int(major), int(minor), int(patch), prerelease)
if category == 'neutrino-generator':
if 'NUGEN::elogmin' in steering:
emin, emax = 10**get_steering_param('NUGEN::elogmin', type=float), 10**get_steering_param('NUGEN::elogmax', type=float)
elif 'NUGEN::from_energy' in steering:
emin, emax = get_steering_param('NUGEN::from_energy', type=float), get_steering_param('NUGEN::to_energy', type=float)
else:
emin, emax = get_steering_param('NUGEN::emin', type=float), get_steering_param('NUGEN::emax', type=float)
nugen_kwargs = dict()
if 'NUGEN::injectionradius' in steering:
nugen_kwargs['InjectionRadius'] = get_steering_param('NUGEN::injectionradius', type=float)
elif 'NUGEN::cylinder_length' in steering:
nugen_kwargs['CylinderHeight'] = get_steering_param('NUGEN::cylinder_length', type=float)
nugen_kwargs['CylinderRadius'] = get_steering_param('NUGEN::cylinder_radius', type=float)
if get_metaproject(dataset_id, 'nugen', 0)[1:] >= (4,1,6):
nugen_kwargs['InjectionMode'] = 'Cylinder'
# generator = NeutrinoGenerator(
# NEvents=steering['nevents'],
# FromEnergy =emin,
# ToEnergy =emax,
# GammaIndex =get_steering_param('NUGEN::gamma', type=float),
# NeutrinoFlavor =get_steering_param('NUGEN::flavor'),
# ZenithMin =get_steering_param('NUGEN::zenithmin', type=float)*I3Units.deg,
# ZenithMax =get_steering_param('NUGEN::zenithmax', type=float)*I3Units.deg,
# **nugen_kwargs)
# write generator data
generator_data['class'] = 'icecube.weighting.weighting.NeutrinoGenerator'
generator_data['multiplier'] = None
generator_data['kwargs'] = dict(
NEvents=steering['nevents'],
FromEnergy =emin,
ToEnergy =emax,
GammaIndex =get_steering_param('NUGEN::gamma', type=float),
NeutrinoFlavor =get_steering_param('NUGEN::flavor'),
ZenithMin =get_steering_param('NUGEN::zenithmin', type=float)*I3Units.deg,
ZenithMax =get_steering_param('NUGEN::zenithmax', type=float)*I3Units.deg,
**nugen_kwargs
)
elif category == 'CORSIKA-in-ice':
composition = steering.get('composition', '5-component')
if composition.startswith('5-component') or composition == 'jcorsika':
gamma = get_tray_parameter(dataset_id, "pgam")
if gamma is None:
gamma = [-2]*5
else:
gamma = [-abs(v) for v in gamma]
norm = get_tray_parameter(dataset_id, "pnorm")
if norm is None:
norm = [10., 5., 3., 2., 1.]
if get_tray_parameter(dataset_id, 'CutoffType') == "EnergyPerNucleon":
LowerCutoffType = 'EnergyPerNucleon'
else:
LowerCutoffType = 'EnergyPerParticle'
UpperCutoffType = get_tray_parameter(dataset_id, 'UpperCutoffType')
if UpperCutoffType is None:
corsika_version = get_tray_parameter(dataset_id, 'CorsikaVersion')
if isinstance(corsika_version, list):
corsika_version = corsika_version[-1]
if corsika_version is None or '5comp' in corsika_version:
# 5-component dCORSIKA only supports a lower cutoff
UpperCutoffType = 'EnergyPerParticle'
elif get_metaproject(dataset_id, 'generate', 0)[1] >= 4:
# Upper cutoff type appeared in IceSim 4, and defaults to the lower cutoff type
UpperCutoffType = LowerCutoffType
else:
UpperCutoffType = 'EnergyPerParticle'
length = get_tray_parameter(dataset_id, 'length', "icecube.simprod.generators.CorsikaGenerator")
if length is None:
if 'CORSIKA::length' in steering:
length = get_steering_param('CORSIKA::length', type=float)*I3Units.m
else:
length = 1600*I3Units.m
warnings.warn("No target cylinder length for dataset {dataset_id}! Assuming {length:.0f} m".format(**locals()))
radius = get_tray_parameter(dataset_id, 'radius', "icecube.simprod.generators.CorsikaGenerator")
if radius is None:
if 'CORSIKA::radius' in steering:
radius = get_steering_param('CORSIKA::radius', type=float)*I3Units.m
else:
radius = 800*I3Units.m
warnings.warn("No target cylinder length for dataset {dataset_id}! Assuming {radius:.0f} m".format(**locals()))
if use_muongun:
from icecube import MuonGun
nevents = get_steering_param('CORSIKA::showers', type=int)
if gamma == [-2.0]*5 and norm == [10., 5., 3., 2., 1.]:
model = 'Standard5Comp'
elif gamma == [-2.6]*5 and norm == [3., 2., 1., 1., 1.]:
model = 'CascadeOptimized5Comp'
else:
raise ValueError("Unknown CORSIKA configuration!")
# generator = nevents*MuonGun.corsika_genprob(model)
# write generator data
generator_data['class'] = 'icecube.MuonGun.corsika_genprob'
generator_data['kwargs'] = dict(config=model)
generator_data['multiplier'] = nevents
else:
oversampling = get_steering_param('oversampling', 1, int)
# generator = FiveComponent(oversampling*get_steering_param('CORSIKA::showers', type=int),
# emin=get_steering_param('CORSIKA::eprimarymin', type=float)*I3Units.GeV,
# emax=get_steering_param('CORSIKA::eprimarymax', type=float)*I3Units.GeV,
# normalization=norm, gamma=gamma,
# LowerCutoffType=LowerCutoffType, UpperCutoffType=UpperCutoffType,
# height=length, radius=radius)
# write generator data
generator_data['class'] = 'icecube.weighting.weighting.FiveComponent'
generator_data['kwargs'] = dict(
nevents=oversampling*get_steering_param('CORSIKA::showers', type=int),
emin=get_steering_param('CORSIKA::eprimarymin', type=float)*I3Units.GeV,
emax=get_steering_param('CORSIKA::eprimarymax', type=float)*I3Units.GeV,
normalization=norm, gamma=gamma,
LowerCutoffType=LowerCutoffType, UpperCutoffType=UpperCutoffType,
height=length, radius=radius)
generator_data['multiplier'] = None
elif composition.startswith('polygonato') or composition.startswith('Hoerandel'):
if use_muongun:
from icecube import MuonGun
length = get_steering_param('CORSIKA::length', type=float)*I3Units.m
radius = get_steering_param('CORSIKA::radius', type=float)*I3Units.m
area = numpy.pi**2*radius*(radius+length)
areanorm = 0.131475115*area
# generator = (steering['CORSIKA::showers']/areanorm)*MuonGun.corsika_genprob('Hoerandel5')
# write generator data
generator_data['class'] = 'icecube.MuonGun.corsika_genprob'
generator_data['kwargs'] = dict(config='Hoerandel5')
generator_data['multiplier'] = (steering['CORSIKA::showers']/areanorm)
else:
# generator = Hoerandel(steering['CORSIKA::showers'],
# emin=get_steering_param('CORSIKA::eprimarymin', type=float)*I3Units.GeV,
# emax=get_steering_param('CORSIKA::eprimarymax', type=float)*I3Units.GeV,
# dslope=get_steering_param('CORSIKA::dslope', type=float),
# height=get_steering_param('CORSIKA::length', type=float)*I3Units.m,
# radius=get_steering_param('CORSIKA::radius', type=float)*I3Units.m)
# write generator data
generator_data['class'] = 'icecube.weighting.weighting.Hoerandel'
generator_data['kwargs'] = dict(
nevents=steering['CORSIKA::showers'],
emin=get_steering_param('CORSIKA::eprimarymin', type=float)*I3Units.GeV,
emax=get_steering_param('CORSIKA::eprimarymax', type=float)*I3Units.GeV,
dslope=get_steering_param('CORSIKA::dslope', type=float),
height=get_steering_param('CORSIKA::length', type=float)*I3Units.m,
radius=get_steering_param('CORSIKA::radius', type=float)*I3Units.m)
generator_data['multiplier'] = None
elif category == 'CORSIKA-ice-top':
# get the parent (generator) dataset, as the generator parameters may
# be buried several generations back
substeering = steering
while not ('CORSIKA::ebin' in substeering and 'CORSIKA::radius' in substeering):
try:
substeering = get_steering(cursor, substeering['inputdataset'])
except KeyError:
# sampling radius is in the topsimulator config
radius = get_tray_parameter(dataset_id, 'r', "icecube.simprod.modules.IceTopShowerGenerator")
break
else:
# sampling radius is a steering parameter
if type(substeering['CORSIKA::radius']) == str:
radius = SimprodFunction(substeering['CORSIKA::radius'], substeering)
else:
radius = lambda CORSIKA_ebin: substeering['CORSIKA::radius']
get_substeering_param = partial(get, substeering)
# logarithmic energy bin is | |
<gh_stars>10-100
#!/usr/bin/python
from __future__ import print_function, absolute_import
###Sterimol (and Tolman CA) Calculator###
###############################################################
# sterimoltools.py #
# #
###############################################################
#Python Libraries
import subprocess, sys, os
from numpy import *
#from scipy import *
from math import *
import numpy as np
#from vpython import *
#Chemistry Libaries
#from radialdata import *
#from pars import *
#Avoid number error warnings
import warnings
warnings.filterwarnings("ignore")
#Chemistry Arrays
periodictable = ["Bq","H","He","Li","Be","B","C","N","O","F","Ne","Na","Mg","Al","Si","P","S","Cl","Ar","K","Ca","Sc","Ti","V","Cr","Mn","Fe","Co","Ni","Cu","Zn","Ga","Ge","As","Se","Br","Kr","Rb","Sr","Y","Zr",
"Nb","Mo","Tc","Ru","Rh","Pd","Ag","Cd","In","Sn","Sb","Te","I","Xe","Cs","Ba","La","Ce","Pr","Nd","Pm","Sm","Eu","Gd","Tb","Dy","Ho","Er","Tm","Yb","Lu","Hf","Ta","W","Re","Os","Ir","Pt","Au","Hg","Tl",
"Pb","Bi","Po","At","Rn","Fr","Ra","Ac","Th","Pa","U","Np","Pu","Am","Cm","Bk","Cf","Es","Fm","Md","No","Lr","Rf","Db","Sg","Bh","Hs","Mt","Ds","Rg","Uub","Uut","Uuq","Uup","Uuh","Uus","Uuo"]
# Verloop's original Sterimol parameters use CPK atomic VdW radii based on atom-type definitions
sterimol_atomtypes = ["C", "C2", "C3", "C4", "C5/N5", "C6/N6", "C7", "C8", "H", "N", "C66", "N4", "O", "O2", "P", "S", "S1", "F", "C1", "S4", "B1", "I"]
# CPK VdW radii in pm
cpk_radii = [150,160,160,150,170,170,170,150,100,150,170,145,135,135,140,170,100,135,180,140,195,215]
def rotrel(vect1,vect2,vect3):
ax=np.cross(vect1,vect2)
ang=math.acos((np.dot(vect1,vect2))/(np.linalg.norm(vect1)*np.linalg.norm(vect2)))
norm=1/(np.linalg.norm(ax))
axnorm=np.dot(ax,norm)
ux=axnorm[0]
uy=axnorm[1]
uz=axnorm[2]
a=math.cos(ang)+((ux*ux)*(1-math.cos(ang)))
b=(ux*uy*(1-math.cos(ang)))-(uz*math.sin(ang))
c=(ux*uz*(1-math.cos(ang)))+(uy*math.sin(ang))
d=(uy*ux*(1-math.cos(ang)))+(uz*math.sin(ang))
e=(math.cos(ang))+(uy*uy*(1-math.cos(ang)))
f=(uy*uz*(1-math.cos(ang)))-(ux*math.sin(ang))
g=(uz*ux*(1-math.cos(ang)))-(uy*math.sin(ang))
h=(uz*uy*(1-math.cos(ang)))+(ux*math.sin(ang))
i=math.cos(ang)+(uz*uz*(1-math.cos(ang)))
bigmat=([[a,b,c],[d,e,f,],[g,h,i]])
vect=np.dot(bigmat,vect3)
return vect
def calcdist(a,b,carts):
return np.linalg.norm(np.subtract(carts[a],carts[b]))
def elementID(massno):
if massno < len(periodictable): return periodictable[massno]
else: return "XX"
def bondiRadius(massno):
#<NAME> radii for all atoms from: <NAME>. Phys. Chem. 1964, 68, 441-452, except hydrogen, which is taken from <NAME>.; <NAME>. Phys. Chem. 1996, 100, 7384-7391
#Radii that are not available in either of these publications have RvdW = 2.00 Angstrom
# radii for the entire periodic table (119 entries)
bondi = [0.0,1.09, 1.40, 1.82,2.00,2.00,1.70,1.55,1.52,1.47,1.54,2.27,1.73,2.00,2.10,1.80,1.80,1.75,1.88,2.75,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,1.63,1.40,1.39,1.87,2.00,1.85,1.90,1.85,2.02,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,1.63,1.72,1.58,1.93,2.17,2.00,2.06,1.98,2.16,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,1.72,1.66,1.55,1.96,2.02,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,1.86, 2.00, 2.00, 2.00, 2.00, 2.00, 2.00, 2.00, 2.00]
if massno<len(bondi): radius = bondi[massno]
else: radius = 2.0
return radius
def calcopposite(atom1,atom2,angle,molcart):
h=calcdist(atom1,atom2,molcart)
d=h*math.sin(angle)
return d
def calcadj(atom1,atom2,angle,molcart):
h=calcdist(atom1,atom2,molcart)
d=h*math.cos(angle)
return d
def getcoords(atom,molcart):
coords=[]
for i in range(3):
coords.append(molcart[atom][i])
return coords
def dprod(v1, v2): return sum((a*b) for a, b in zip(v1, v2))
def length(v): return math.sqrt(dprod(v, v))
def angle(v1, v2):
val = dprod(v1, v2) / length(v1) / length(v2)
if val > 0.999999: val = 1.0
if val < -0.999999: val = -1.0
return math.acos(val)
#Read molecule data from an input file - currently Gaussian *com and *pdb supported
class getinData:
def __init__(self, file):
def getJOBTYPE(self, inlines):
if fileformat == "com":
for i in range(0,len(inlines)):
if inlines[i].find("#") > -1: self.JOBTYPE = inlines[i]
def accepted_file(file):
filesplit = file.split(".")
if len(filesplit) > 1: # there is an extension
for suffix in ["com", "pdb"]: # authorized suffix in ccparse
if filesplit[len(filesplit)-1] == suffix: return True
return False
def getCHARGE(self, inlines):
if fileformat == "com":
for i in range(0,len(inlines)):
if inlines[i].find("#") > -1:
if len(inlines[i+1].split()) == 0:
self.CHARGE = inlines[i+4].split()[0]
self.MULT = inlines[i+4].split()[1]
if len(inlines[i+2].split()) == 0:
self.CHARGE = inlines[i+5].split()[0]
self.MULT = inlines[i+5].split()[1]
def getMEMREQ(self, inlines):
if fileformat == "com":
for i in range(0,len(inlines)):
if inlines[i].find("%mem") > -1: self.MEMREQ = inlines[i].split("=")[1].rstrip("\n")
def getNPROC(self, inlines):
if fileformat == "com":
for i in range(0,len(inlines)):
if inlines[i].find("%nproc") > -1: self.NPROC = inlines[i].split("=")[1].rstrip("\n")
def getATOMTYPES(self, inlines):
if fileformat == "com":
for i in range(0,len(inlines)):
if inlines[i].find("#") > -1:
if len(inlines[i+1].split()) == 0: start = i+5
if len(inlines[i+2].split()) == 0: start = i+6
break
self.ATOMTYPES = []
self.LEVELTYPES = []
self.MMTYPES = []
for i in range(start,len(inlines)):
if len(inlines[i].split()) ==0: break
else:
atominfo = inlines[i].split()[0]
atominfo = atominfo.split("-")[0]
if len(inlines[i].split()[0].split(atominfo))>1:
mminfo = inlines[i].split()[0].lstrip(atominfo)
self.MMTYPES.append(mminfo)
self.ATOMTYPES.append(atominfo.lower().capitalize())
level = ""
for oniomlevel in ["H", "M", "L"]:
if inlines[i][4:].rfind(oniomlevel)>1:
level = inlines[i][4:][inlines[i][4:].rfind(oniomlevel):].rstrip("\n")
self.LEVELTYPES.append(level)
if fileformat == "pdb":
self.ATOMTYPES = []
for i in range(0,len(inlines)):
if inlines[i].find("ATOM") > -1:
self.ATOMTYPES.append(elementID(int(inlines[i].split()[1])))
if inlines[i].find("HETATM")>-1:
self.ATOMTYPES.append(inlines[i].split()[-1].lower().capitalize())
def getCONNECTIVITY(self, inlines, natoms):
if fileformat == "com":
for i in range(0,len(inlines)):
if inlines[i].find("#") > -1:
if len(inlines[i+1].split()) == 0: start = i+natoms+6
if len(inlines[i+2].split()) == 0: start = i+natoms+7
break
if start < len(inlines):
self.CONNECTIVITY = []
j = 1
for i in range(start,len(inlines)):
if len(inlines[i].split()) != 0:
try: num = int(inlines[i].split()[0])
except ValueError: num = 0
if num == j:
bond=[]
neighbors=(len(inlines[i].split())-1)/2
if neighbors!=0:
for k in range(0,neighbors): bond.append((inlines[i].split()[1+2*k])+"__"+(inlines[i].split()[2+2*k]))
self.CONNECTIVITY.append(bond)
j = j+1
if len(self.CONNECTIVITY) == natoms:
for i in range(0, natoms):
for partner in self.CONNECTIVITY[i]:
info = partner.split("__")
nextatom = int(info[0])-1
bondorder = float(info[1])
nope=0
for otherpartner in self.CONNECTIVITY[nextatom]:
otherinfo = otherpartner.split("__")
othernextatom = int(otherinfo[0])-1
if othernextatom==i: nope=nope+1
if nope==0: self.CONNECTIVITY[nextatom].append(str(i+1)+"__"+info[1])
self.OPTIONAL = []
for i in range(start+j,len(inlines)):
if len(inlines[i].split()) != 0: self.OPTIONAL.append(inlines[i])
if fileformat == "pdb":
self.CONNECTIVITY = []
for i in range(self.NATOMS):
self.CONNECTIVITY.append([]) #pre-fill the list
for i in range(0,len(inlines)):
if inlines[i].find("CONECT") > -1: # a connectivity
conect = inlines[i].split()
if len(conect) > 1: # contains at least a bond connectivity
try: num = int(conect[1])-1
except ValueError: num = 0
bond=[]
neighbors=(len(conect)-2)
if neighbors!=0:
for k in range(0,neighbors): bond.append((conect[2+k])+"__1.0") #1.0 default value for bond order: single bond
self.CONNECTIVITY[num].extend(bond) #add the new bonds
# several bonds might have been added several time if the software was stupid. Lets clean
for i in range(self.NATOMS):
self.CONNECTIVITY[i] = list(set(self.CONNECTIVITY[i]))
#At the end, we should have a connectivity for each atom. We check the data is consistent and we fix it.
if len(self.CONNECTIVITY) == natoms:
for i in range(0, natoms):
for partner in self.CONNECTIVITY[i]:
info = partner.split("__")
nextatom = int(info[0])-1
bondorder = float(info[1])
nope=0
for otherpartner in self.CONNECTIVITY[nextatom]:
otherinfo = otherpartner.split("__")
othernextatom = int(otherinfo[0])-1
if othernextatom==i: nope=nope+1
if nope==0: self.CONNECTIVITY[nextatom].append(str(i+1)+"__"+info[1]) # add the connectivity to the nextatom
else:
print("Error: Number of connectivity inconsistent with coordinates")
def getCARTESIANS(self, inlines, natoms):
if fileformat == "com":
for i in range(0,len(inlines)):
if inlines[i].find("#") > -1:
if len(inlines[i+1].split()) == 0: start = i+5
if len(inlines[i+2].split()) == 0: start = i+6
break
self.CARTESIANS = []
for i in range(start,len(inlines)):
if len(inlines[i].split()) == 0: break
elif len(inlines[i].split()) == 4: self.CARTESIANS.append([float(inlines[i].split()[1]), float(inlines[i].split()[2]), float(inlines[i].split()[3])])
elif len(inlines[i].split()) > 4: self.CARTESIANS.append([float(inlines[i].split()[2]), float(inlines[i].split()[3]), float(inlines[i].split()[4])])
if fileformat == "pdb":
self.CARTESIANS = []
for i in range(0,len(inlines)):
if inlines[i].find("ATOM") > -1:
self.CARTESIANS.append(float(inlines[i].split()[2:4]))
if inlines[i].find("HETATM")>-1:
self.CARTESIANS.append([float(inlines[i].split()[-6]), float(inlines[i].split()[-5]), float(inlines[i].split()[-4])])
def getCONSTRAINED(self, optional):
if fileformat == "com":
self.CONSTRAINED = []
for line in optional:
if line.find("X") > -1 and line.find("F") > -1: self.CONSTRAINED.append([int(line.split(" ")[1])-1])
if line.find("B") > -1 and line.find("F") > -1: self.CONSTRAINED.append([int(line.split(" ")[1])-1,int(line.split(" ")[2])-1])
if line.find("A") > -1 and line.find("F") > -1: self.CONSTRAINED.append([int(line.split(" ")[1])-1,int(line.split(" ")[2])-1]),int(line.split(" ")[3])-1
if line.find("D") > -1 and line.find("F") > -1: self.CONSTRAINED.append([int(line.split(" ")[1])-1,int(line.split(" ")[2])-1, int(line.split(" ")[3])-1, int(line.split(" ")[4])-1])
if accepted_file(file):
# default values
self.CHARGE = 0
self.MULT = 0
self.MEMREQ = ""
self.NPROC = ""
self.ATOMTYPES = []
self.LEVELTYPES = []
self.MMTYPES = []
self.CONNECTIVITY = []
self.CARTESIANS = []
self.CONSTRAINED = []
# analyze
filesplit = file.split(".")
fileformat = filesplit[len(filesplit)-1]
infile = open(file,"r")
inlines = infile.readlines()
self.NAME = file
getJOBTYPE(self, inlines)
getCHARGE(self, inlines)
getMEMREQ(self, inlines)
getNPROC(self, inlines)
getATOMTYPES(self, inlines)
self.NATOMS=len(self.ATOMTYPES)
getCARTESIANS(self, inlines, self.NATOMS)
getCONNECTIVITY(self, inlines, self.NATOMS)
if hasattr(self, "OPTIONAL"): getCONSTRAINED(self, self.OPTIONAL)
if len(self.ATOMTYPES) == 0 or len(self.CARTESIANS) ==0: print("\nFATAL ERROR: Input file [ %s ] cannot be read"%file)
#print "Input file data [ %s ]\n"% (file)
#for i in range(len(self.CARTESIANS)):
# print "%3s %3s %8.3f %8.3f %8.3f CONNECT %s\n"% (i+1, self.ATOMTYPES[i], self.CARTESIANS[i][0],self.CARTESIANS[i][1],self.CARTESIANS[i][2], self.CONNECTIVITY[i])
else: print("\nError: Input file [ %s ] is not supported. [com, pdb] ")
#Read molecule data from an output file #######################
class getoutData:
def __init__(self, file):
self.NAME = file
def accepted_file(file):
filesplit = file.split(".")
if len(filesplit) > 1: # there is an extension
for suffix in ["out", "log"]: # authorized suffix
if filesplit[len(filesplit)-1] == suffix: return True
return False
def getFORMAT(self, outlines):
for i in range(0,len(outlines)):
if outlines[i].find("MOPAC") > -1: self.FORMAT = "Mopac"; break
if outlines[i].find("Gaussian") > -1: self.FORMAT = "Gaussian"; break
def getCHARGE(self, outlines, format):
if format == "Mopac":
for i in range(0,len(outlines)):
if outlines[i].find("CHARGE ON SYSTEM") > -1:
self.CHARGE = int(outlines[i].split()[5])
self.MULT = 1
if outlines[i].find("STATE CALCULATION") > -1:
if outlines[i].split()[0] == "SINGLET": self.MULT = 1
if outlines[i].split()[0] == "DOUBLET": self.MULT = 2
if outlines[i].split()[0] == "TRIPLET": self.MULT = 3
break
if format == "Gaussian":
for i in range(0,len(outlines)):
if outlines[i].find("Charge = ") > -1:
self.CHARGE = int(outlines[i].split()[2])
self.MULT = int(outlines[i].split()[5].rstrip("\n"))
break
def getATOMTYPES(self, outlines, format):
self.ATOMTYPES = []
self.CARTESIANS = []
if format == "Mopac":
for i in range(0,len(outlines)):
if outlines[i].find("CHEMICAL") > -1: standor = i+3
if outlines[i].find("Empirical Formula") > -1:
self.NATOMS = int((outlines[i].split("="))[1].split()[0])
if hasattr(self, "NATOMS"):
for i in range (standor,standor+self.NATOMS):
outlines[i] = outlines[i].replace("*", " ")
s = outlines[i].split()[3]
atom = ''.join([j for j in s if not j.isdigit()]).strip()
#print(outlines[i].split())
#print(atom)
self.ATOMTYPES.append(atom.lower().capitalize()) # ''.join([i for i in s if not i.isdigit()])
#self.ATOMTYPES.append(filter(lambda x: x.isalpha(), outlines[i].split()[3]))
#print(outlines[i])
self.CARTESIANS.append([float(outlines[i].split()[-3]), float(outlines[i].split()[-2]), float(outlines[i].split()[-1])])
if format == "Gaussian":
for i in | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import bisect
import collections
import io
import itertools
import json
import os
import pprint
import re
import sqlite3
import stat
import sys
import tarfile
import tempfile
import time
import traceback
from timeit import default_timer as timer
try:
import indexed_bzip2
from indexed_bzip2 import IndexedBzip2File
except ImportError:
print( "[Warning] The indexed_bzip2 module was not found. Please install it to open bz2 compressed TAR files!" )
try:
import indexed_gzip
from indexed_gzip import IndexedGzipFile
except ImportError:
print( "[Warning] The indexed_gzip module was not found. Please install it to open gzip compressed TAR files!" )
import fuse
__version__ = '0.5.0'
printDebug = 1
def overrides( parentClass ):
def overrider( method ):
assert method.__name__ in dir( parentClass )
return method
return overrider
class ProgressBar:
def __init__( self, maxValue ):
self.maxValue = maxValue
self.lastUpdateTime = time.time()
self.lastUpdateValue = 0
self.updateInterval = 2 # seconds
self.creationTime = time.time()
def update( self, value ):
if self.lastUpdateTime is not None and ( time.time() - self.lastUpdateTime ) < self.updateInterval:
return
# Use whole interval since start to estimate time
eta1 = int( ( time.time() - self.creationTime ) / value * ( self.maxValue - value ) )
# Use only a shorter window interval to estimate time.
# Accounts better for higher speeds in beginning, e.g., caused by caching effects.
# However, this estimate might vary a lot while the other one stabilizes after some time!
eta2 = int( ( time.time() - self.lastUpdateTime ) / ( value - self.lastUpdateValue ) * ( self.maxValue - value ) )
print( "Currently at position {} of {} ({:.2f}%). "
"Estimated time remaining with current rate: {} min {} s, with average rate: {} min {} s."
.format( value, self.maxValue, value / self.maxValue * 100.,
eta2 // 60, eta2 % 60,
eta1 // 60, eta1 % 60 ),
flush = True )
self.lastUpdateTime = time.time()
self.lastUpdateValue = value
class StenciledFile(io.BufferedIOBase):
"""A file abstraction layer giving a stenciled view to an underlying file."""
def __init__(self, fileobj, stencils):
"""
stencils: A list tuples specifying the offset and length of the underlying file to use.
The order of these tuples will be kept.
The offset must be non-negative and the size must be positive.
Examples:
stencil = [(5,7)]
Makes a new 7B sized virtual file starting at offset 5 of fileobj.
stencil = [(0,3),(5,3)]
Make a new 6B sized virtual file containing bytes [0,1,2,5,6,7] of fileobj.
stencil = [(0,3),(0,3)]
Make a 6B size file containing the first 3B of fileobj twice concatenated together.
"""
self.fileobj = fileobj
self.offsets = [ x[0] for x in stencils ]
self.sizes = [ x[1] for x in stencils ]
# Calculate cumulative sizes
self.cumsizes = [ 0 ]
for offset, size in stencils:
assert offset >= 0
assert size > 0
self.cumsizes.append( self.cumsizes[-1] + size )
# Seek to the first stencil offset in the underlying file so that "read" will work out-of-the-box
self.seek( 0 )
def _findStencil( self, offset ):
"""
Return index to stencil where offset belongs to. E.g., for stencils [(3,5),(8,2)], offsets 0 to
and including 4 will still be inside stencil (3,5), i.e., index 0 will be returned. For offset 6,
index 1 would be returned because it now is in the second contiguous region / stencil.
"""
# bisect_left( value ) gives an index for a lower range: value < x for all x in list[0:i]
# Because value >= 0 and list starts with 0 we can therefore be sure that the returned i>0
# Consider the stencils [(11,2),(22,2),(33,2)] -> cumsizes [0,2,4,6]. Seek to offset 2 should seek to 22.
assert offset >= 0
i = bisect.bisect_left( self.cumsizes, offset + 1 ) - 1
assert i >= 0
return i
def close(self):
self.fileobj.close()
def closed(self):
return self.fileobj.closed()
def fileno(self):
return self.fileobj.fileno()
def seekable(self):
return self.fileobj.seekable()
def readable(self):
return self.fileobj.readable()
def writable(self):
return False
def read(self, size=-1):
if size == -1:
size = self.cumsizes[-1] - self.offset
# This loop works in a kind of leapfrog fashion. On each even loop iteration it seeks to the next stencil
# and on each odd iteration it reads the data and increments the offset inside the stencil!
result = b''
i = self._findStencil( self.offset )
while size > 0 and i < len( self.sizes ):
# Read as much as requested or as much as the current contiguous region / stencil still contains
readableSize = min( size, self.sizes[i] - ( self.offset - self.cumsizes[i] ) )
if readableSize == 0:
# Go to next stencil
i += 1
if i >= len( self.offsets ):
break
self.fileobj.seek( self.offsets[i] )
else:
# Actually read data
tmp = self.fileobj.read( readableSize )
self.offset += len( tmp )
result += tmp
size -= readableSize
# Now, either size is 0 or readableSize will be 0 in the next iteration
return result
def seek(self, offset, whence=io.SEEK_SET):
if whence == io.SEEK_CUR:
self.offset += offset
elif whence == io.SEEK_END:
self.offset = self.cumsizes[-1] + offset
elif whence == io.SEEK_SET:
self.offset = offset
if self.offset < 0:
raise Exception("Trying to seek before the start of the file!")
if self.offset >= self.cumsizes[-1]:
return self.offset
i = self._findStencil( self.offset )
offsetInsideStencil = self.offset - self.cumsizes[i]
assert offsetInsideStencil >= 0
assert offsetInsideStencil < self.sizes[i]
self.fileobj.seek( self.offsets[i] + offsetInsideStencil, io.SEEK_SET )
return self.offset
def tell(self):
return self.offset
class SQLiteIndexedTar:
"""
This class reads once through the whole TAR archive and stores TAR file offsets
for all contained files in an index to support fast seeking to a given file.
"""
__slots__ = (
'__version__',
'tarFileName',
'mountRecursively',
'indexFileName',
'sqlConnection',
'parentFolderCache', # stores which parent folders were last tried to add to database and therefore do exist
'rawFileObject', # only set when opening a compressed file and only kept to keep the compressed file handle
# from being closed by the garbage collector
'tarFileObject', # file object to the uncompressed (or decompressed) TAR file to read actual data out of
'compression', # stores what kind of compression the originally specified TAR file uses.
)
# Names must be identical to the SQLite column headers!
FileInfo = collections.namedtuple( "FileInfo", "offsetheader offset size mtime mode type linkname uid gid istar issparse" )
def __init__(
self,
tarFileName = None,
fileObject = None,
writeIndex = False,
clearIndexCache = False,
recursive = False,
gzipSeekPointSpacing = 4*1024*1024,
):
"""
tarFileName : Path to the TAR file to be opened. If not specified, a fileObject must be specified.
If only a fileObject is given, the created index can't be cached (efficiently).
fileObject : A io.IOBase derived object. If not specified, tarFileName will be opened.
If it is an instance of IndexedBzip2File or IndexedGzipFile, then the offset
loading and storing from and to the SQLite database is managed automatically by this class.
"""
# Version 0.1.0:
# - Initial version
# Version 0.2.0:
# - Add sparse support and 'offsetheader' and 'issparse' columns to the SQLite database
# - Add TAR file size metadata in order to quickly check whether the TAR changed
# - Add 'offsetheader' to the primary key of the 'files' table so that files which were
# updated in the TAR can still be accessed if necessary.
self.__version__ = '0.2.0'
self.parentFolderCache = []
self.mountRecursively = recursive
self.sqlConnection = None
assert tarFileName or fileObject
if not tarFileName:
self.tarFileName = '<file object>'
self.createIndex( fileObject )
# return here because we can't find a save location without any identifying name
return
self.tarFileName = os.path.abspath( tarFileName )
if not fileObject:
fileObject = open( self.tarFileName, 'rb' )
self.tarFileObject, self.rawFileObject, self.compression = \
SQLiteIndexedTar._openCompressedFile( fileObject, gzipSeekPointSpacing )
# will be used for storing indexes if current path is read-only
possibleIndexFilePaths = [
self.tarFileName + ".index.sqlite",
os.path.expanduser( os.path.join( "~", ".ratarmount",
self.tarFileName.replace( "/", "_" ) + ".index.sqlite" ) )
]
self.indexFileName = None
if clearIndexCache:
for indexPath in possibleIndexFilePaths:
if os.path.isfile( indexPath ):
os.remove( indexPath )
# Try to find an already existing index
for indexPath in possibleIndexFilePaths:
if self._tryLoadIndex( indexPath ):
self.indexFileName = indexPath
break
if self.indexIsLoaded():
self._loadOrStoreCompressionOffsets()
return
| |
<gh_stars>0
from os import listdir, system, remove
from os.path import isfile, join
import re
import multiprocessing
from urllib.parse import unquote
import json
from lxml import etree
import pandas as pd
import tqdm
import time
import httplib2
from bs4 import BeautifulSoup, SoupStrainer
import wget
from multiprocessing.pool import ThreadPool
import os
import uuid
from queue import Queue
from typing import Optional
import csv
from threading import Thread
import pickle
from config import *
__author__ = "<NAME>"
class EuropePubMedCentralDataset:
def __init__(self,
start_path,
writing_multiple_csv,
skip_download,
download_workers,
unzip_threads,
process_article_threads,
max_file_to_download):
self.pubmed_file_path = start_path
self.skip_download = skip_download
self.download_workers = download_workers
self.unzip_threads = unzip_threads
self.process_article_threads = process_article_threads
self.max_file_to_download = max_file_to_download
self.pubmed_dump_file_path = join(self.pubmed_file_path, 'dump')
self.articles_path = join(self.pubmed_file_path, 'articles')
self.csv_file_path = join(self.pubmed_file_path, 'csv')
self.folder_articles = folder_articles
# We can both exploit a queue in order to write into a single dataset.csv
# or to save multiple csv and then concatenate them into the final dataset
self.writing_multiple_csv = writing_multiple_csv
if not self.writing_multiple_csv:
self.queue = Queue()
os.makedirs(self.articles_path, exist_ok=True)
os.makedirs(self.csv_file_path, exist_ok=True)
os.makedirs(self.pubmed_dump_file_path, exist_ok=True)
def start(self):
if not self.skip_download:
# for each file from the pubmed dump
f = self._get_files_in_dir(self.pubmed_dump_file_path)
# load local index of already downloaded dump and add to the list of already downloaded file
if os.path.isfile(join(self.pubmed_file_path, 'downloaded-dump.txt')):
with open(join(self.pubmed_file_path, 'downloaded-dump.txt'), 'r') as index_file:
f.append(index_file.readline().replace("\n",""))
# get the difference between files to download and files that we have
links = self.get_links_from_pubmed()
if len(links) > 0:
todownload = list(set(links).difference(set(f)))
if self.max_file_to_download != None:
todownload = todownload[:int(self.max_file_to_download)]
if len(todownload):
print("\nDownloading {} OA dumps from EuropePubMedCentral".format(len(todownload)))
with multiprocessing.Pool(self.download_workers) as pool:
pool.map(worker_download_links, ((d, self.pubmed_dump_file_path) for d in todownload))
else:
print("No link to download!")
# Update the file list
f = self._get_files_in_dir(self.pubmed_dump_file_path)
# Unzip all the files
if len(f) > 0:
print("\nUnzipping all the articles")
s = time.time()
with ThreadPool(self.unzip_threads) as pool:
list(tqdm.tqdm(pool.imap(self.worker_unzip_files, f), total=len(f)))
e = time.time()
print("\nTime: {}".format((e - s)))
# process each article
f = self._get_articles_in_dir(self.articles_path)
if len(f) > 0:
self.load_PMC_ids()
s = time.time()
print("\nProcessing the articles")
self.process_articles(f)
e = time.time()
print("\nTime: {}".format((e - s)))
self._concatenate_datasets(self.csv_file_path)
def load_PMC_ids(self):
# Download articles' IDs --
if not os.path.isfile(join(self.pubmed_file_path, 'PMC-ids.csv.gz')):
print("\nDownloading PMC's IDs dataset")
wget.download('http://ftp.ncbi.nlm.nih.gov/pub/pmc/PMC-ids.csv.gz', self.pubmed_file_path)
# Pickle a dictionary of the dataframe containing only the keys that we care about
if not os.path.isfile(join(self.pubmed_file_path, 'PMC-ids.pkl')):
# Read the dataset and create a single big dict having all the needed keys for entity resolution
articleids = pd.read_csv(join(self.pubmed_file_path, 'PMC-ids.csv.gz'), usecols=['PMCID', 'PMID', 'DOI'],
low_memory=True)
articleids = articleids.drop_duplicates()
view = articleids[articleids['PMID'].notna()]
view['PMID'] = view['PMID'].astype(int)
view_clean = view.drop_duplicates(subset='PMID', keep="last")
dataset = view_clean.set_index('PMID').to_dict('index')
del view
view = articleids[articleids['PMCID'].notna()]
view['PMID'] = view['PMID'].astype('Int64')
del articleids
view_clean = view.drop_duplicates(subset='PMCID', keep="last")
self.articleids = {**dataset, **view_clean.set_index('PMCID').to_dict('index')}
del view
pickle.dump(obj=self.articleids, file=open(join(self.pubmed_file_path, 'PMC-ids.pkl'), 'wb'))
else:
print("Loading PMC IDs from pickled dict")
self.articleids = pickle.load(open(join(self.pubmed_file_path, 'PMC-ids.pkl'), 'rb'))
def write_to_csv(self):
keys = ['cur_doi', 'cur_pmid', 'cur_pmcid', 'cur_name', 'references']
while True:
if not self.queue.empty():
row = self.queue.get()
if row == "STOP":
return
else:
row = [v for k, v in row.items()]
if not os.path.isfile(join(self.csv_file_path, "dataset.csv")):
with open(join(self.csv_file_path, "dataset.csv"), 'w', newline='') as output_file:
dict_writer = csv.writer(output_file, delimiter='\t')
dict_writer.writerow(keys)
dict_writer.writerow(row)
else:
with open(join(self.csv_file_path, "dataset.csv"), 'a', newline='') as output_file:
dict_writer = csv.writer(output_file, delimiter='\t')
dict_writer.writerow(row)
def worker_article(self, f: str) -> None:
# Use the extracted file
with open(f, 'r') as fi:
filename = f.split(os.sep)[-1]
try:
cur_xml = etree.parse(fi)
except Exception as e:
print(e)
os.makedirs(join(self.articles_path, 'exceptions'), exist_ok=True)
with open(join(self.articles_path, 'exceptions', filename), 'w') as fout:
for line in fi:
fout.write(line)
os.remove(f)
return
cur_pmid = self.get_id_from_xml_source(cur_xml, 'pmid')
cur_pmcid = self.get_id_from_xml_source(cur_xml, 'pmcid')
if cur_pmcid is not None and not cur_pmcid.startswith("PMC"):
cur_pmcid = "PMC{}".format(cur_pmcid)
cur_doi = self.normalise_doi(self.get_id_from_xml_source(cur_xml, 'doi'))
# If we have no identifier, stop the processing of the article
if cur_pmid is None and cur_pmcid is None and cur_doi is None:
os.makedirs(join(self.articles_path, 'without-id'), exist_ok=True)
with open(join(self.articles_path, 'without-id', filename), 'w') as fout:
with open(f, 'r') as fi:
for line in fi:
fout.write(line)
os.remove(f)
return
try:
# Extract missing metadata from the ID dataset
if cur_pmid is None or cur_pmcid is None or cur_doi is None:
row = None
if cur_pmid is not None and self.articleids.__contains__(int(cur_pmid)):
row = self.articleids[int(cur_pmid)]
elif cur_pmcid is not None and self.articleids.__contains__(cur_pmcid):
row = self.articleids[cur_pmcid]
if row is not None and len(row):
if cur_pmid is None and row['PMID'] is not None and not pd.isna(row['PMID']):
cur_pmid = row['PMID']
if cur_pmcid is None and row['PMCID'] is not None:
cur_pmcid = row['PMCID']
if cur_doi is None and row['DOI'] is not None:
cur_doi = self.normalise_doi(str(row['DOI']))
references = cur_xml.xpath(".//ref-list/ref")
references_list = []
if len(references):
for reference in references:
entry_text = self.create_entry_xml(reference)
ref_pmid = None
ref_doi = None
ref_pmcid = None
ref_url = None
ref_xmlid_attr = reference.get('id')
if len(ref_xmlid_attr):
ref_xmlid = ref_xmlid_attr
if ref_xmlid == "":
ref_xmlid = None
ref_pmid_el = reference.xpath(".//pub-id[@pub-id-type='pmid']")
if len(ref_pmid_el):
ref_pmid = etree.tostring(
ref_pmid_el[0], method="text", encoding='unicode').strip()
ref_doi_el = reference.xpath(".//pub-id[@pub-id-type='doi']")
if len(ref_doi_el):
ref_doi = self.normalise_doi(etree.tostring(
ref_doi_el[0], method="text", encoding='unicode').lower().strip())
if ref_doi == "":
ref_doi = None
ref_pmcid_el = reference.xpath(".//pub-id[@pub-id-type='pmcid']")
if len(ref_pmcid_el):
ref_pmcid = etree.tostring(
ref_pmcid_el[0], method="text", encoding='unicode').strip()
if ref_pmcid == "":
ref_pmcid = None
elif not ref_pmcid.startswith("PMC"):
ref_pmcid = "PMC{}".format(ref_pmcid)
ref_url_el = reference.xpath(".//ext-link")
if len(ref_url_el):
ref_url = etree.tostring(
ref_url_el[0], method="text", encoding='unicode').strip()
if not ref_url.startswith("http"):
ref_url = None
# Extract missing metadata from the ID dataset
if ref_pmid is None or ref_pmcid is None or ref_doi is None:
row = None
if ref_pmid is not None and self.articleids.__contains__(int(ref_pmid)):
row = self.articleids[int(ref_pmid)]
elif ref_pmcid is not None and self.articleids.__contains__(ref_pmcid):
row = self.articleids[ref_pmcid]
if row is not None and len(row):
if ref_pmid is None and row['PMID'] is not None:
ref_pmid = row['PMID']
if ref_pmcid is None and row['PMCID'] is not None:
ref_pmcid = row['PMCID']
if not ref_pmcid.startswith("PMC"):
ref_pmcid = "PMC{}".format(ref_pmcid)
if ref_doi is None and row['DOI'] is not None:
ref_doi = self.normalise_doi(str(row['DOI']))
# Create an object to store the reference
obj = {}
if entry_text is not None:
obj['entry_text'] = entry_text
if ref_pmid is not None:
obj['ref_pmid'] = str(ref_pmid)
if ref_pmcid is not None:
obj['ref_pmcid'] = ref_pmcid
if ref_doi is not None:
obj['ref_doi'] = ref_doi
if ref_url is not None:
obj['ref_url'] = ref_url
if ref_xmlid is not None:
obj['ref_xmlid'] = ref_xmlid
references_list.append(obj)
if self.writing_multiple_csv:
df = pd.DataFrame({
'cur_doi': [cur_doi],
'cur_pmid': [cur_pmid],
'cur_pmcid': [cur_pmcid],
'cur_name': [f.split("articles"+os.sep)[-1]],
'references': [json.dumps(references_list)]
})
df.to_csv(join(self.csv_file_path, "{}.csv".format(filename)), sep="\t", index=False)
else:
self.queue.put({
'cur_doi': cur_doi,
'cur_pmid': cur_pmid,
'cur_pmcid': cur_pmcid,
'cur_name': f,
'references': json.dumps(references_list)
})
except Exception as e:
os.makedirs(join(self.articles_path, 'exceptions'), exist_ok=True)
with open(join(self.articles_path, 'exceptions', filename), 'w') as fout:
with open(f, 'r') as fi:
for line in fi:
fout.write(line)
os.remove(f)
print("Exception {} with file: {}".format(e, f))
return
def process_articles(self, f):
articles_to_process = []
for dump_articles_folder in f:
for path, subdirs, files in os.walk(os.path.join(self.articles_path, dump_articles_folder)):
for name in files:
articles_to_process.append(os.path.join(path, name))
if not self.writing_multiple_csv:
consumer = Thread(target=self.write_to_csv)
consumer.setDaemon(True)
consumer.start()
with ThreadPool(self.process_article_threads) as pool:
list(tqdm.tqdm(pool.imap(self.worker_article, (fi for fi in articles_to_process)), total=len(articles_to_process)))
if not self.writing_multiple_csv:
self.queue.put("STOP")
consumer.join()
@staticmethod
def normalise_doi(doi_string) -> Optional[
str]: # taken from https://github.com/opencitations/index/blob/master/identifier/doimanager.py
if doi_string is not None:
try:
doi_string = re.sub("\0+", "", re.sub("\s+", "", unquote(doi_string[doi_string.index("10."):])))
return doi_string.lower().strip()
except ValueError:
return None
else:
return None
def worker_unzip_files(self, f: str) -> None:
try:
# Unzip
system("gunzip -k {}".format(join(self.pubmed_dump_file_path, f)))
# This is the new filename
gzip_name = f
f = f.replace(".gz", "")
# Create one file for each article, having its named
tree = etree.parse(join(self.pubmed_dump_file_path, f), etree.XMLParser(remove_blank_text=True))
# Extract all the article nodes
articles = tree.findall('article')
dump_articles_dir = os.path.join(self.articles_path, f.replace(".xml", ""))
os.makedirs(dump_articles_dir, exist_ok=True)
for i in range(self.folder_articles+1):
os.makedirs(os.path.join(dump_articles_dir, str(i)), exist_ok=True)
for i, cur_xml in enumerate(articles):
dir_of_article = os.path.join(dump_articles_dir, str(i % self.folder_articles))
with open(join(dir_of_article, "{}.xml".format(str(uuid.uuid4()))), 'w') as writefile:
writefile.write(etree.tostring(cur_xml, pretty_print=True, encoding='unicode'))
# Remove the downloaded dump
remove(join(self.pubmed_dump_file_path, f))
remove(join(self.pubmed_dump_file_path, gzip_name))
except Exception as e:
print("Exception during the extraction: {}".format(e))
system("rm {}{}*.xml".format(self.pubmed_dump_file_path,os.sep))
@staticmethod
def create_entry_xml(xml_ref): # Taken from CCC
entry_string = ""
el_citation = xml_ref.xpath("./element-citation | ./mixed-citation | ./citation")
if len(el_citation):
cur_el = el_citation[0]
is_element_citation = cur_el.tag == "element-citation" or cur_el.tag == "citation"
has_list_of_people = False
first_text_passed = False
for el in cur_el.xpath(".//node()"):
type_name = type(el).__name__
if type_name == "_Element":
cur_text = el.text
if cur_text is not None and " ".join(cur_text.split()) != "":
if first_text_passed:
is_in_person_group = len(el.xpath("ancestor::person-group")) > 0
if is_in_person_group:
entry_string += ", "
has_list_of_people = True
elif not is_in_person_group and has_list_of_people:
entry_string += ". "
has_list_of_people = False
| |
<reponame>securedataplane/mts
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 11 22:12:05 2018
@author: saad
"""
'''
Scenarios:
+ Single tenant:
+ phy2vm2vm2phy_Baseline_NoDPDK
+ phy2vm2vm2phy_Baseline_DPDK
+ phy2vm2vm2phy_SRIOV_NoDPDK
+ phy2vm2vm2phy_SRIOV_DPDK
+ phy2phy_Baseline_NoDPDK
+ phy2phy_Baseline_DPDK
+ phy2phy_SRIOV_NoDPDK
+ phy2phy_SRIOV_DPDK
+multi tenant:
+ phy2vm2vm2phy_Baseline_MultiTenant_NoDPDK
+ phy2vm2vm2phy_Baseline_MultiTenant_DPDK
+ phy2vm2vm2phy_SRIOV_MultiTenant_NoDPDK
+ phy2vm2vm2phy_SRIOV_MultiTenant_DPDK
+ phy2vm2vm2phy_SRIOV_MultiOvs_NoDPDK
+ phy2vm2vm2phy_SRIOV_MultiOvs_DPDK
+ phy2phy_Baseline_MultiTenant_NoDPDK
+ phy2phy_Baseline_MultiTenant_DPDK
+ phy2phy_SRIOV_MultiTenant_NoDPDK
+ phy2phy_SRIOV_MultiTenant_DPDK
+ phy2phy_SRIOV_MultiOvs_NoDPDK
+ phy2phy_SRIOV_MultiOvs_DPDK
'''
import expLib as exp
import smtplib
from datetime import datetime
import os
import errno
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email import encoders
###########################################################################################
####################################---Scenario Specific Data---###########################
###########################################################################################
# Server CPU
MaxCpu= 8
#cpu isolation
IsCpusIsolated=True
#Destination MAC (needed for Flow Rules)
outDestMac="00:00:00:00:30:56"
#cpu_array=[Host ,Vswitch VM ,Tenant VM ,ovs-vswitchd ,dpdk-vswitch ,dpdk-tenant, OVS-VM count, Tenents per OVS]
CpuData= []
################################ single tenant ################################
CpuData.append(["phy2phy", "Baseline_NoDPDK", [2 ,0 ,0 ,0 ,0 ,0, 0 ,0]])
CpuData.append(["phy2phy", "Baseline_DPDK", [2 ,0 ,0 ,0 ,1 ,0, 0 ,0]])
CpuData.append(["phy2phy", "SRIOV_NoDPDK", [1 ,2 ,0 ,0 ,0 ,0, 1, 0]])
CpuData.append(["phy2phy", "SRIOV_DPDK", [1 ,1 ,0 ,0 ,1 ,0, 1, 0]])
#-------------------------------------------------------------------------------------------------------
CpuData.append(["phy2vm2vm2phy", "Baseline_NoDPDK", [2 ,0 ,2 ,0 ,0 ,0, 0, 1]])
CpuData.append(["phy2vm2vm2phy", "Baseline_DPDK", [2 ,0 ,2 ,0 ,1 ,0, 0, 1]])
CpuData.append(["phy2vm2vm2phy", "SRIOV_NoDPDK", [1 ,2 ,2 ,0 ,0 ,0, 1, 1]])
CpuData.append(["phy2vm2vm2phy", "SRIOV_DPDK", [1 ,1 ,2 ,0 ,1 ,0, 1, 1]])
################################ Multi tenant ################################
CpuData.append(["phy2phy", "Baseline_MultiTenant_NoDPDK", [2 ,0 ,0 ,0 ,0 ,0, 0 ,0]])
CpuData.append(["phy2phy", "Baseline_MultiTenant_DPDK", [2 ,0 ,0 ,0 ,1 ,0, 0 ,0]])
CpuData.append(["phy2phy", "SRIOV_MultiTenant_NoDPDK", [1 ,2 ,0 ,0 ,0 ,0, 1, 0]])
CpuData.append(["phy2phy", "SRIOV_MultiTenant_DPDK", [1 ,1 ,0 ,0 ,1 ,0, 1, 0]])
CpuData.append(["phy2phy", "SRIOV_MultiOvs_NoDPDK", [1 ,2 ,0 ,0 ,0 ,0, 2, 0]])
CpuData.append(["phy2phy", "SRIOV_MultiOvs_DPDK", [1 ,1 ,0 ,0 ,1 ,0, 2, 0]])
#-------------------------------------------------------------------------------------------------------
CpuData.append(["phy2vm2vm2phy", "Baseline_MultiTenant_NoDPDK", [2 ,0 ,2 ,0 ,0 ,0, 0, 2]])
CpuData.append(["phy2vm2vm2phy", "Baseline_MultiTenant_DPDK", [2 ,0 ,2 ,0 ,1 ,0, 0, 2]])
CpuData.append(["phy2vm2vm2phy", "SRIOV_MultiTenant_NoDPDK", [1 ,2 ,2 ,0 ,0 ,0, 1, 2]])
CpuData.append(["phy2vm2vm2phy", "SRIOV_MultiTenant_DPDK", [1 ,1 ,2 ,0 ,1 ,0, 1, 2]])
CpuData.append(["phy2vm2vm2phy", "SRIOV_MultiOvs_NoDPDK", [1 ,2 ,2 ,0 ,0 ,0, 2, 1]])
CpuData.append(["phy2vm2vm2phy", "SRIOV_MultiOvs_DPDK", [1 ,1 ,2 ,0 ,1 ,0, 2, 1]])
###########################################################################################
####################################---Helper Functions---#################################
###########################################################################################
def Logs(Summary,cpuArray, time):
exp.CmdLogPath= "./ExpLib_logs/"+time+"/"+exp.scsName+"_"+exp.Server_cnx[0]+"_"+str(cpuArray)+"_.txt"
exp.SummaryPath= "./ExpLib_logs/"+time+"/summary.txt"
if not os.path.exists(os.path.dirname(exp.SummaryPath)):
try:
os.makedirs(os.path.dirname(exp.SummaryPath))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
with open(exp.SummaryPath, "w") as f:
f.write(Summary)
f.close()
def CpuArrayFinder(ScsName, dpdk_vswitch_cores):
for d in CpuData:
if d[0]==ScsName and d[1]==dpdk_vswitch_cores:
return d[2]
#cpu_array=[Host ,Vswitch VM ,Tenant VM ,ovs-vswitchd ,dpdk-vswitch ,dpdk-tenant, OVS-VM count, Tenents per OVS]
def CpuAllocation(config):
exp.HOST_CPU=str(config[0])
ovsVmCpuListArray=[]
tenantVmCpuListArray=[]
if config[1]!=0:
if config[6]==1:
ovsHostCpuCount= config[0]
ovsVmCpuCount= config[1] +config[3] +config[4]
tenantVmCpuCount= config[2] +config[5]
ovsVmCpuList = range(ovsHostCpuCount,ovsVmCpuCount+ovsHostCpuCount)
ovsVmCpuListArray.append(ovsVmCpuList)
start= ovsVmCpuCount+ovsHostCpuCount
end=ovsVmCpuCount+ovsHostCpuCount
for i in range(config[7]):
start=end
end=end + tenantVmCpuCount
tenantVmCpuList = range(start,end)
tenantVmCpuListArray.append(tenantVmCpuList)
vSwitchdCpuList= range(config[1]+config[3])
dpdkVswitchCpu= range(config[1]+config[3], config[1]+config[3]+config[4])
vSwitchdCpuStr= ','.join(str(c) for c in vSwitchdCpuList)
OvsCpu=[vSwitchdCpuStr, vSwitchdCpuStr]
DpdkCpu=[ovsVmCpuCount,dpdkVswitchCpu]
else:
ovsHostCpuCount= config[0]
ovsVmCpuCount= config[1] +config[3] +config[4]
tenantVmCpuCount= config[2] +config[5]
if IsCpusIsolated:
ovsVmCpuCount= ovsVmCpuCount-1
start=ovsHostCpuCount
end=ovsHostCpuCount
for i in range(config[6]):
start=end
end=end + ovsVmCpuCount
ovsVmCpuList = range(start,end)
ovsVmCpuListArray.append(ovsVmCpuList)
else:
start=ovsHostCpuCount
end=ovsHostCpuCount+ ovsVmCpuCount
for i in range(config[6]):
ovsVmCpuList = range(start,end)
ovsVmCpuListArray.append(ovsVmCpuList)
Tstart=end
Tend=end
for i in range(config[6]):
Tstart= Tend
Tend= Tend + tenantVmCpuCount
tenantVmCpuList = range(Tstart,Tend)
tenantVmCpuListArray.append(tenantVmCpuList)
if IsCpusIsolated:
dpdkVswitchCpu= range(ovsVmCpuCount)
if config[1]+config[3]>1:
vSwitchdCpuList= range(config[1]+config[3]-1)
else:
vSwitchdCpuList= range(config[1]+config[3])
else:
dpdkVswitchCpu= range(config[1]+config[3], config[1]+config[3]+config[4])
vSwitchdCpuList= range(config[1]+config[3])
vSwitchdCpuStr= ','.join(str(c) for c in vSwitchdCpuList)
OvsCpu=[vSwitchdCpuStr, vSwitchdCpuStr]
DpdkCpu=[ovsVmCpuCount,dpdkVswitchCpu]
else:
ovsCpuCount= config[3] +config[4]
if ovsCpuCount>0:
ovsHostCpuCount= config[0] -1
else:
ovsHostCpuCount= config[0]
tenantVmCpuCount= config[2] +config[5]
vSwitchdCpuList= range(ovsHostCpuCount)
dpdkVswitchCpu= range(ovsHostCpuCount, ovsHostCpuCount +config[4])
ovsVmCpuList=[]
ovsVmCpuListArray.append(ovsVmCpuList)
start= ovsCpuCount+ovsHostCpuCount
end=ovsCpuCount+ovsHostCpuCount
for i in range(config[7]):
start=end
end=end + tenantVmCpuCount
tenantVmCpuList = range(start,end)
tenantVmCpuListArray.append(tenantVmCpuList)
vSwitchdCpuStr= ','.join(str(c) for c in vSwitchdCpuList)
OvsCpu=[vSwitchdCpuStr, vSwitchdCpuStr]
DpdkCpu=[MaxCpu ,dpdkVswitchCpu]
return [ovsVmCpuListArray, tenantVmCpuListArray, OvsCpu, DpdkCpu]
def EmailNotify(body, state, time, attachmentPath=""):
subject = time+": "+exp.scsName+" "+state+" on "+exp.Server_cnx[0]
# Sign In
gmail_sender = 'XXX'
gmail_passwd = '<PASSWORD>'
#Receivers
receivers = "XXX"
subject = time+": "+exp.scsName+" "+state+" on "+exp.Server_cnx[0]
msg = MIMEMultipart()
msg['From'] = gmail_sender
msg['To'] = receivers
msg['Subject'] = subject
msg.attach(MIMEText(body))
if(attachmentPath!=""):
attachment =open(attachmentPath,'rb')
part = MIMEBase('application','octet-stream')
part.set_payload((attachment).read())
encoders.encode_base64(part)
part.add_header('Content-Disposition',"attachment; filename= "+attachmentPath)
msg.attach(part)
text = msg.as_string()
server = smtplib.SMTP('smtp.gmail.com',587)
server.starttls()
server.login(gmail_sender,gmail_passwd)
try:
server.sendmail(gmail_sender, receivers.split(","), text)
except:
print ('error sending mail')
server.quit()
def GetScenarioSummary(OvsVmPorts, OvsCpu, DpdkCpu, DpdkMem):
SummaryMsg="Scenario Name: "+exp.scsName+"\n"
SummaryMsg= SummaryMsg+ "Nic : "+ str(exp.NicType)+"\n"
SummaryMsg= SummaryMsg+ "Is OVS running with DPDK : "+str(exp.IsDPDK)+"\n"
SummaryMsg= SummaryMsg+ "Is SR-IOV enabled : "+str(exp.isSRIOV)+"\n"
SummaryMsg= SummaryMsg+ "CPU Isolation : "+str(IsCpusIsolated)+"\n"
SummaryMsg= SummaryMsg+ " \n \n"
SummaryMsg= SummaryMsg+ "Physical Ports config: \n"
if exp.isSRIOV:
for p in exp.PhyPorts:
SummaryMsg= SummaryMsg+ " +port "+p[0]+" has "+p[1]+" configured Vfs \n"
else:
if exp.IsDPDK:
for p in exp.PhyPorts:
SummaryMsg= SummaryMsg+ " +port "+p[0]+" assigned to:"+p[1]+" as a DPDK port on Core"+p[3]+" \n"
else:
for p in exp.PhyPorts:
SummaryMsg= SummaryMsg+ " +port "+p[0]+" assigned to:"+p[1]+" \n"
SummaryMsg= SummaryMsg+ " \n \n"
if exp.isSRIOV:
SummaryMsg= SummaryMsg+ "Vfs config: \n"
for vf in exp.MyVfs:
SummaryMsg= SummaryMsg+ " +"+vf[0]+" Vlan: "+vf[1]+" Spoofchk: "+vf[2]+" asssigned to "+vf[3]+"\n"
else:
if len(exp.VirtualPorts)!=0:
SummaryMsg= SummaryMsg+ "Virtual ports config: \n"
for vp in exp.VirtualPorts:
SummaryMsg= SummaryMsg+ " +Mac Id: "+vp[0]+" assigned to bridge: "+vp[1]+" assigned to: "+vp[2]+"\n"
SummaryMsg= SummaryMsg+ " \n \n"
if len(exp.usedVms)!=0:
SummaryMsg= SummaryMsg+ "VMs config: \n"
for vm in exp.usedVms:
SummaryMsg= SummaryMsg+ " +"+vm[0]+" Running on cores:"+str(vm[1])+" RAM: "+vm[2]+"\n"
SummaryMsg= SummaryMsg+ " \n \n"
SummaryMsg= SummaryMsg+ "OVS CPU Config: \n"
SummaryMsg= SummaryMsg+ " +OVSDb: "+OvsCpu[0]+" \n"
SummaryMsg= SummaryMsg+ " +Vswitchd: "+OvsCpu[1]+" \n"
if exp.IsDPDK:
SummaryMsg= SummaryMsg+ " +OVS-DPDK: "+str(DpdkCpu[1])+" \n"
SummaryMsg= SummaryMsg+ " +DPDK Memory: "+DpdkMem+" \n"
SummaryMsg= SummaryMsg+ " \n \n"
if exp.isSRIOV and len(OvsVmPorts)!=0:
SummaryMsg= SummaryMsg+ "OVS VM(s) Config: \n"
indx=1
for pl in OvsVmPorts:
SummaryMsg= SummaryMsg+ "OVS-VM "+str(indx)+" Config: \n"
indx=indx+1
for vmp in pl:
if vmp[1]==True:
SummaryMsg= SummaryMsg+ " +vf "+vmp[0]+" is a DPDK port Running on cores:"+vmp[2]+"\n"
else:
SummaryMsg= SummaryMsg+ " +vf "+vmp[0]+"\n"
return SummaryMsg
###########################################################################################
####################################---Scenarios---########################################
###########################################################################################
###########################################################################################
####################################---Single Tenant VM---#################################
###########################################################################################
def phy2vm2vm2phy_SRIOV_DPDK(cnx_server, config, dpdkMemory="1024,0"):
#----------------------------------------#
exp.NicType= "mlx"
exp.isSRIOV= True
exp.IsDPDK= True
exp.OVS_PATH= exp.dpdk_path
exp.Server_cnx= cnx_server
exp.scsName= "phy2vm2vm2phy_SRIOV_DPDK"
logTimeStamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
#----------------------------------------#
cpu_config= CpuAllocation(config)
exp.PhyPorts= [
("enp3s0f0", "8"),
("enp3s0f1", "8")
]
exp.MyVfs= [
("enp3s0f2", "0", "off", "vswitch-vm"),
("enp3s1f2", "0", "off", "vswitch-vm"),
("enp3s1f3", "10", "off", "vswitch-vm"),
("enp3s0f3", "10", "off", "vswitch-vm"),
("enp3s0f4", "10", "off", "tenant-green-1"),
("enp3s1f4", "10", "off", "tenant-green-1")]
exp.usedVms=[
("vswitch-vm", cpu_config[0][0], "4G"),
("tenant-green-1", cpu_config[1][0], "4G")]
#----------------- OVS-VM_1------------------
OvsCpu= cpu_config[2]
DpdkCpu= cpu_config[3]
DpdkMem= dpdkMemory
if len(DpdkCpu[1]) >1:
OvsVmPorts1= [
("enp3s0f2", True, str(DpdkCpu[1][0])),
("enp3s1f2", True, str(DpdkCpu[1][0])),
("enp3s1f3", True, str(DpdkCpu[1][1])),
("enp3s0f3", True, str(DpdkCpu[1][1]))]
else:
OvsVmPorts1= [
("enp3s0f2", True, str(DpdkCpu[1][0])),
("enp3s1f2", True, str(DpdkCpu[1][0])),
("enp3s1f3", True, str(DpdkCpu[1][0])),
("enp3s0f3", True, str(DpdkCpu[1][0]))]
msg= GetScenarioSummary([OvsVmPorts1], OvsCpu, DpdkCpu, DpdkMem)
EmailNotify(msg, "is beeing prepared", logTimeStamp)
Logs(msg,config, logTimeStamp)
#----------------------------------------#
exp.InitialConfig()
exp.Vfsconfig()
exp.ConfigOVS("vswitch-vm", "br0", OvsVmPorts1, OvsCpu, DpdkMem, DpdkCpu)
'''
OVS Flow Rules:
+ (1) in: enp3s0f2 with ip:10.0.0.2--> out= enp3s0f3, change Mac to enp3s0f4 mac
+ (2) in: enp3s1f3 --> out= enp3s1f2, change Mac to 00:00:00:00:30:56
'''
#Flow Rules (1)
#---------------------------------------------------------
match="in_port="+exp.VfsMatch["enp3s0f2"]+",ip,nw_dst=10.0.0.2"
action="mod_dl_dst:"+exp.GetMacByVf("enp3s0f4")+","+exp.VfsMatch["enp3s0f3"]
exp.addFlowRule("vswitch-vm" , exp.OVS_PATH, "br0", match, action)
#Flow Rules (2)
#---------------------------------------------------------
match="in_port="+exp.VfsMatch["enp3s1f3"]
action="mod_dl_dst:"+outDestMac+","+exp.VfsMatch["enp3s1f2"]
exp.addFlowRule("vswitch-vm" , exp.OVS_PATH, "br0", match, action)
#show Flow rules of br0
exp.showFlowRules("vswitch-vm", exp.OVS_PATH,"br0")
#Start DPDK App in The tenantVM
#exp.StartDpdkApp("tenant-green-1")
EmailNotify(msg, "is ready", logTimeStamp)
return True
def phy2vm2vm2phy_SRIOV_NoDPDK(cnx_server, config):
#----------------------------------------#
exp.NicType= "mlx"
exp.isSRIOV= True
exp.IsDPDK= False
exp.OVS_PATH= exp.nodpdk_path
exp.Server_cnx= cnx_server
exp.scsName= "phy2vm2vm2phy_SRIOV_NoDPDK"
logTimeStamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
#----------------------------------------#
cpu_config= CpuAllocation(config)
exp.PhyPorts= [
("enp3s0f0", "8"),
("enp3s0f1", "8")
]
exp.MyVfs= [
("enp3s0f2", "0", "off", "vswitch-vm"),
("enp3s1f2", "0", "off", "vswitch-vm"),
("enp3s1f3", "10", "off", "vswitch-vm"),
("enp3s0f3", "10", "off", "vswitch-vm"),
("enp3s0f4", "10", "off", "tenant-green-1"),
("enp3s1f4", "10", "off","tenant-green-1")]
exp.usedVms=[
("vswitch-vm", cpu_config[0][0], "4G"),
("tenant-green-1", cpu_config[1][0], "4G")]
#----------------- OVS-VM_1------------------
OvsCpu= cpu_config[2]
OvsVmPorts1= [
("enp3s0f2", False),
("enp3s1f2", False),
("enp3s1f3", False),
("enp3s0f3", False)]
msg= GetScenarioSummary([OvsVmPorts1], OvsCpu, [], "")
EmailNotify(msg, "is beeing prepared", logTimeStamp)
Logs(msg,config, logTimeStamp)
#----------------------------------------#
exp.InitialConfig()
exp.Vfsconfig()
exp.ConfigOVS("vswitch-vm", "br0", OvsVmPorts1, OvsCpu)
'''
OVS Flow Rules:
+ (1) in: enp3s0f2 with ip:10.0.0.2 --> out= enp3s0f3, change Mac to enp3s0f4 mac
+ (2) in: enp3s1f3 --> out= enp3s1f2, change Mac to 00:00:00:00:30:56
'''
#Flow Rules (1)
#---------------------------------------------------------
match="in_port="+exp.VfsMatch["enp3s0f2"]+",ip,nw_dst=10.0.0.2"
action="mod_dl_dst:"+exp.GetMacByVf("enp3s0f4")+","+exp.VfsMatch["enp3s0f3"]
exp.addFlowRule("vswitch-vm" , exp.OVS_PATH, "br0", match, action)
#Flow Rules (2)
#---------------------------------------------------------
match="in_port="+exp.VfsMatch["enp3s1f3"]
action="mod_dl_dst:"+outDestMac+","+exp.VfsMatch["enp3s1f2"]
exp.addFlowRule("vswitch-vm" , exp.OVS_PATH, "br0", match, action)
#show Flow rules of br0
exp.showFlowRules("vswitch-vm", exp.OVS_PATH,"br0")
#Start DPDK App in The tenantVM
#exp.StartDpdkApp("tenant-green-1")
EmailNotify(msg, "is ready", logTimeStamp)
return True
def phy2vm2vm2phy_Baseline_NoDPDK(cnx_server, config):
# ----------------------------------------#
exp.NicType = "mlx"
exp.isSRIOV = False
exp.IsDPDK = False
exp.OVS_PATH = exp.nodpdk_path
exp.Server_cnx = cnx_server
exp.scsName= "phy2vm2vm2phy_Baseline_NoDPDK"
logTimeStamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
| |
import autogalaxy as ag
import numpy as np
import pytest
from autogalaxy import exc
from autogalaxy.plane import plane
from skimage import measure
from autogalaxy.mock import mock
def critical_curve_via_magnification_from_plane_and_grid(plane, grid):
magnification = plane.magnification_2d_from_grid(grid=grid)
inverse_magnification = 1 / magnification
critical_curves_indices = measure.find_contours(inverse_magnification.native, 0)
no_critical_curves = len(critical_curves_indices)
contours = []
critical_curves = []
for jj in np.arange(no_critical_curves):
contours.append(critical_curves_indices[jj])
contour_x, contour_y = contours[jj].T
pixel_coord = np.stack((contour_x, contour_y), axis=-1)
critical_curve = grid.mask.grid_scaled_from_grid_pixels_1d_for_marching_squares(
grid_pixels_1d=pixel_coord, shape_native=magnification.sub_shape_native
)
critical_curve = np.array(grid=critical_curve)
critical_curves.append(critical_curve)
return critical_curves
def caustics_via_magnification_from_plane_and_grid(plane, grid):
caustics = []
critical_curves = critical_curve_via_magnification_from_plane_and_grid(
plane=plane, grid=grid
)
for i in range(len(critical_curves)):
critical_curve = critical_curves[i]
deflections_1d = plane.deflections_2d_from_grid(grid=critical_curve)
caustic = critical_curve - deflections_1d
caustics.append(caustic)
return caustics
class TestAbstractPlane:
class TestProperties:
def test__point_dict(self, ps_0, ps_1):
plane = ag.Plane(galaxies=[ag.Galaxy(redshift=0.5)], redshift=None)
assert plane.point_dict == {}
plane = ag.Plane(
galaxies=[ag.Galaxy(redshift=0.5, point_0=ps_0)], redshift=None
)
assert plane.point_dict == {"point_0": ps_0}
plane = ag.Plane(
galaxies=[ag.Galaxy(redshift=0.5, point_0=ps_0, point_1=ps_1)],
redshift=None,
)
assert plane.point_dict == {"point_0": ps_0, "point_1": ps_1}
plane = ag.Plane(
galaxies=[
ag.Galaxy(redshift=0.5, point_0=ps_0, point_1=ps_1),
ag.Galaxy(redshift=0.5, point_2=ps_0),
],
redshift=None,
)
assert plane.point_dict == {
"point_0": ps_0,
"point_1": ps_1,
"point_2": ps_0,
}
def test__has_light_profile(self):
plane = ag.Plane(galaxies=[ag.Galaxy(redshift=0.5)], redshift=None)
assert plane.has_light_profile is False
plane = ag.Plane(
galaxies=[ag.Galaxy(redshift=0.5, light_profile=ag.lp.LightProfile())],
redshift=None,
)
assert plane.has_light_profile is True
plane = ag.Plane(
galaxies=[
ag.Galaxy(redshift=0.5, light_profile=ag.lp.LightProfile()),
ag.Galaxy(redshift=0.5),
],
redshift=None,
)
assert plane.has_light_profile is True
def test__has_mass_profile(self):
plane = ag.Plane(galaxies=[ag.Galaxy(redshift=0.5)], redshift=None)
assert plane.has_mass_profile is False
plane = ag.Plane(
galaxies=[ag.Galaxy(redshift=0.5, mass_profile=ag.mp.MassProfile())],
redshift=None,
)
assert plane.has_mass_profile is True
plane = ag.Plane(
galaxies=[
ag.Galaxy(redshift=0.5, mass_profile=ag.mp.MassProfile()),
ag.Galaxy(redshift=0.5),
],
redshift=None,
)
assert plane.has_mass_profile is True
def test__has_pixelization(self):
plane = ag.Plane(galaxies=[ag.Galaxy(redshift=0.5)], redshift=None)
assert plane.has_pixelization is False
galaxy_pix = ag.Galaxy(
redshift=0.5,
pixelization=ag.pix.Pixelization(),
regularization=ag.reg.Regularization(),
)
plane = ag.Plane(galaxies=[galaxy_pix], redshift=None)
assert plane.has_pixelization is True
plane = ag.Plane(
galaxies=[galaxy_pix, ag.Galaxy(redshift=0.5)], redshift=None
)
assert plane.has_pixelization is True
def test__has_regularization(self):
plane = ag.Plane(galaxies=[ag.Galaxy(redshift=0.5)], redshift=None)
assert plane.has_regularization is False
galaxy_pix = ag.Galaxy(
redshift=0.5,
pixelization=ag.pix.Pixelization(),
regularization=ag.reg.Regularization(),
)
plane = ag.Plane(galaxies=[galaxy_pix], redshift=None)
assert plane.has_regularization is True
plane = ag.Plane(
galaxies=[galaxy_pix, ag.Galaxy(redshift=0.5)], redshift=None
)
assert plane.has_regularization is True
def test__has_hyper_galaxy(self):
plane = ag.Plane(galaxies=[ag.Galaxy(redshift=0.5)], redshift=None)
assert plane.has_hyper_galaxy is False
galaxy = ag.Galaxy(redshift=0.5, hyper_galaxy=ag.HyperGalaxy())
plane = ag.Plane(galaxies=[galaxy], redshift=None)
assert plane.has_hyper_galaxy is True
plane = ag.Plane(galaxies=[galaxy, ag.Galaxy(redshift=0.5)], redshift=None)
assert plane.has_hyper_galaxy is True
def test__mass_profiles(self):
plane = ag.Plane(galaxies=[ag.Galaxy(redshift=0.5)], redshift=None)
assert plane.mass_profiles == []
sis_0 = ag.mp.SphIsothermal(einstein_radius=1.0)
sis_1 = ag.mp.SphIsothermal(einstein_radius=2.0)
sis_2 = ag.mp.SphIsothermal(einstein_radius=3.0)
plane = ag.Plane(
galaxies=[ag.Galaxy(redshift=0.5, mass_profile=sis_0)], redshift=None
)
assert plane.mass_profiles == [sis_0]
plane = ag.Plane(
galaxies=[
ag.Galaxy(redshift=0.5, mass_profile_0=sis_0, mass_profile_1=sis_1),
ag.Galaxy(redshift=0.5, mass_profile_0=sis_2, mass_profile_1=sis_1),
],
redshift=None,
)
assert plane.mass_profiles == [sis_0, sis_1, sis_2, sis_1]
def test__hyper_image_of_galaxy_with_pixelization(self):
galaxy_pix = ag.Galaxy(
redshift=0.5,
pixelization=ag.pix.Pixelization(),
regularization=ag.reg.Regularization(),
)
plane = ag.Plane(galaxies=[galaxy_pix], redshift=None)
assert plane.hyper_galaxy_image_of_galaxy_with_pixelization is None
galaxy_pix = ag.Galaxy(
redshift=0.5,
pixelization=ag.pix.Pixelization(),
regularization=ag.reg.Regularization(),
hyper_galaxy_image=1,
)
plane = ag.Plane(
galaxies=[galaxy_pix, ag.Galaxy(redshift=0.5)], redshift=None
)
assert plane.hyper_galaxy_image_of_galaxy_with_pixelization == 1
plane = ag.Plane(galaxies=[ag.Galaxy(redshift=0.5)], redshift=None)
assert plane.hyper_galaxy_image_of_galaxy_with_pixelization is None
class TestPixelization:
def test__no_galaxies_with_pixelizations_in_plane__returns_none(self):
galaxy_no_pix = ag.Galaxy(redshift=0.5)
plane = ag.Plane(galaxies=[galaxy_no_pix], redshift=None)
assert plane.pixelization is None
def test__1_galaxy_in_plane__it_has_pixelization__returns_mapper(self):
galaxy_pix = ag.Galaxy(
redshift=0.5,
pixelization=mock.MockPixelization(value=1),
regularization=mock.MockRegularization(matrix_shape=(1, 1)),
)
plane = ag.Plane(galaxies=[galaxy_pix], redshift=None)
assert plane.pixelization.value == 1
galaxy_pix = ag.Galaxy(
redshift=0.5,
pixelization=mock.MockPixelization(value=2),
regularization=mock.MockRegularization(matrix_shape=(2, 2)),
)
galaxy_no_pix = ag.Galaxy(redshift=0.5)
plane = ag.Plane(galaxies=[galaxy_no_pix, galaxy_pix], redshift=None)
assert plane.pixelization.value == 2
def test__2_galaxies_in_plane__both_have_pixelization__raises_error(self):
galaxy_pix_0 = ag.Galaxy(
redshift=0.5,
pixelization=mock.MockPixelization(value=1),
regularization=mock.MockRegularization(matrix_shape=(1, 1)),
)
galaxy_pix_1 = ag.Galaxy(
redshift=0.5,
pixelization=mock.MockPixelization(value=2),
regularization=mock.MockRegularization(matrix_shape=(1, 1)),
)
plane = ag.Plane(galaxies=[galaxy_pix_0, galaxy_pix_1], redshift=None)
with pytest.raises(exc.PixelizationException):
print(plane.pixelization)
class TestRegularization:
def test__no_galaxies_with_regularizations_in_plane__returns_none(self):
galaxy_no_pix = ag.Galaxy(redshift=0.5)
plane = ag.Plane(galaxies=[galaxy_no_pix], redshift=None)
assert plane.regularization is None
def test__1_galaxy_in_plane__it_has_regularization__returns_regularization(
self,
):
galaxy_reg = ag.Galaxy(
redshift=0.5,
pixelization=mock.MockPixelization(value=1),
regularization=mock.MockRegularization(matrix_shape=(1, 1)),
)
plane = ag.Plane(galaxies=[galaxy_reg], redshift=None)
assert plane.regularization.shape == (1, 1)
galaxy_reg = ag.Galaxy(
redshift=0.5,
pixelization=mock.MockPixelization(value=1),
regularization=mock.MockRegularization(matrix_shape=(2, 2)),
)
galaxy_no_reg = ag.Galaxy(redshift=0.5)
plane = ag.Plane(galaxies=[galaxy_no_reg, galaxy_reg], redshift=None)
assert plane.regularization.shape == (2, 2)
def test__2_galaxies_in_plane__both_have_regularization__raises_error(self):
galaxy_reg_0 = ag.Galaxy(
redshift=0.5,
pixelization=mock.MockPixelization(value=1),
regularization=mock.MockRegularization(matrix_shape=(1, 1)),
)
galaxy_reg_1 = ag.Galaxy(
redshift=0.5,
pixelization=mock.MockPixelization(value=2),
regularization=mock.MockRegularization(matrix_shape=(1, 1)),
)
plane = ag.Plane(galaxies=[galaxy_reg_0, galaxy_reg_1], redshift=None)
with pytest.raises(exc.PixelizationException):
print(plane.regularization)
class TestAbstractPlaneProfiles:
class TestProfileImage:
def test__image_2d_from_grid__same_as_its_light_image(
self, sub_grid_2d_7x7, gal_x1_lp
):
light_profile = gal_x1_lp.light_profiles[0]
lp_image = light_profile.image_2d_from_grid(grid=sub_grid_2d_7x7)
# Perform sub gridding average manually
lp_image_pixel_0 = (
lp_image[0] + lp_image[1] + lp_image[2] + lp_image[3]
) / 4
lp_image_pixel_1 = (
lp_image[4] + lp_image[5] + lp_image[6] + lp_image[7]
) / 4
plane = ag.Plane(galaxies=[gal_x1_lp], redshift=None)
image = plane.image_2d_from_grid(grid=sub_grid_2d_7x7)
assert (image.binned[0] == lp_image_pixel_0).all()
assert (image.binned[1] == lp_image_pixel_1).all()
assert (image == lp_image).all()
def test__image_2d_from_grid__same_as_its_galaxy_image(
self, sub_grid_2d_7x7, gal_x1_lp
):
galaxy_image = gal_x1_lp.image_2d_from_grid(grid=sub_grid_2d_7x7)
plane = ag.Plane(galaxies=[gal_x1_lp], redshift=None)
image = plane.image_2d_from_grid(grid=sub_grid_2d_7x7)
assert image == pytest.approx(galaxy_image, 1.0e-4)
def test__image_from_positions__same_as_galaxy_image_with_conversions(
self, grid_2d_irregular_7x7, gal_x1_lp
):
galaxy_image = gal_x1_lp.image_2d_from_grid(grid=grid_2d_irregular_7x7)
plane = ag.Plane(galaxies=[gal_x1_lp], redshift=None)
image = plane.image_2d_from_grid(grid=grid_2d_irregular_7x7)
assert image.in_list[0] == pytest.approx(galaxy_image.in_list[0], 1.0e-4)
def test__images_of_galaxies(self, sub_grid_2d_7x7):
# Overwrite one value so intensity in each pixel is different
sub_grid_2d_7x7[5] = np.array([2.0, 2.0])
g0 = ag.Galaxy(redshift=0.5, light_profile=ag.lp.EllSersic(intensity=1.0))
g1 = ag.Galaxy(redshift=0.5, light_profile=ag.lp.EllSersic(intensity=2.0))
lp0 = g0.light_profiles[0]
lp1 = g1.light_profiles[0]
lp0_image = lp0.image_2d_from_grid(grid=sub_grid_2d_7x7)
lp1_image = lp1.image_2d_from_grid(grid=sub_grid_2d_7x7)
# Perform sub gridding average manually
lp0_image_pixel_0 = (
lp0_image[0] + lp0_image[1] + lp0_image[2] + lp0_image[3]
) / 4
lp0_image_pixel_1 = (
lp0_image[4] + lp0_image[5] + lp0_image[6] + lp0_image[7]
) / 4
lp1_image_pixel_0 = (
lp1_image[0] + lp1_image[1] + lp1_image[2] + lp1_image[3]
) / 4
lp1_image_pixel_1 = (
lp1_image[4] + lp1_image[5] + lp1_image[6] + lp1_image[7]
) / 4
plane = ag.Plane(galaxies=[g0, g1], redshift=None)
image = plane.image_2d_from_grid(grid=sub_grid_2d_7x7)
assert image.binned[0] == pytest.approx(
lp0_image_pixel_0 + lp1_image_pixel_0, 1.0e-4
)
assert image.binned[1] == pytest.approx(
lp0_image_pixel_1 + lp1_image_pixel_1, 1.0e-4
)
image_of_galaxies = plane.images_of_galaxies_from_grid(grid=sub_grid_2d_7x7)
assert image_of_galaxies[0].binned[0] == lp0_image_pixel_0
assert image_of_galaxies[0].binned[1] == lp0_image_pixel_1
assert image_of_galaxies[1].binned[0] == lp1_image_pixel_0
assert image_of_galaxies[1].binned[1] == lp1_image_pixel_1
def test__same_as_above__use_multiple_galaxies(self, sub_grid_2d_7x7):
# Overwrite one value so intensity in each pixel is different
sub_grid_2d_7x7[5] = np.array([2.0, 2.0])
g0 = ag.Galaxy(redshift=0.5, light_profile=ag.lp.EllSersic(intensity=1.0))
g1 = ag.Galaxy(redshift=0.5, light_profile=ag.lp.EllSersic(intensity=2.0))
g0_image = g0.image_2d_from_grid(grid=sub_grid_2d_7x7)
g1_image = g1.image_2d_from_grid(grid=sub_grid_2d_7x7)
plane = ag.Plane(galaxies=[g0, g1], redshift=None)
image = plane.image_2d_from_grid(grid=sub_grid_2d_7x7)
assert image == pytest.approx(g0_image + g1_image, 1.0e-4)
def test__same_as_above__grid_is_positions(self):
# Overwrite one value so intensity in each pixel is different
positions = ag.Grid2DIrregular(grid=[(2.0, 2.0), (3.0, 3.0)])
g0 = ag.Galaxy(redshift=0.5, light_profile=ag.lp.EllSersic(intensity=1.0))
g1 = ag.Galaxy(redshift=0.5, light_profile=ag.lp.EllSersic(intensity=2.0))
g0_image = g0.image_2d_from_grid(grid=positions)
g1_image = g1.image_2d_from_grid(grid=positions)
plane = ag.Plane(galaxies=[g0, g1], redshift=None)
image = plane.image_2d_from_grid(grid=positions)
assert image.in_list[0] == pytest.approx(
g0_image.in_list[0] + g1_image.in_list[0], 1.0e-4
)
assert image.in_list[1] == pytest.approx(
g0_image.in_list[1] + g1_image.in_list[1], 1.0e-4
)
def test__plane_has_no_galaxies__image_is_zeros_size_of_ungalaxyed_grid(
self, sub_grid_2d_7x7
):
plane = ag.Plane(galaxies=[], redshift=0.5)
image = plane.image_2d_from_grid(grid=sub_grid_2d_7x7)
assert image.shape_native == (7, 7)
assert (image[0] == 0.0).all()
assert (image[1] == 0.0).all()
def test__x1_plane__padded_image__compare_to_galaxy_images_using_padded_grid_stack(
self, sub_grid_2d_7x7
):
padded_grid = sub_grid_2d_7x7.padded_grid_from_kernel_shape(
kernel_shape_native=(3, 3)
)
g0 = ag.Galaxy(redshift=0.5, light_profile=ag.lp.EllSersic(intensity=1.0))
g1 = ag.Galaxy(redshift=0.5, light_profile=ag.lp.EllSersic(intensity=2.0))
g2 = ag.Galaxy(redshift=0.5, light_profile=ag.lp.EllSersic(intensity=3.0))
padded_g0_image = g0.image_2d_from_grid(grid=padded_grid)
padded_g1_image = g1.image_2d_from_grid(grid=padded_grid)
padded_g2_image = g2.image_2d_from_grid(grid=padded_grid)
plane = ag.Plane(galaxies=[g0, g1, g2])
padded_plane_image = plane.padded_image_2d_from_grid_and_psf_shape(
grid=sub_grid_2d_7x7, psf_shape_2d=(3, 3)
)
assert padded_plane_image.shape_native == (9, 9)
assert padded_plane_image == pytest.approx(
padded_g0_image + padded_g1_image + padded_g2_image, 1.0e-4
)
def test__galaxy_image_dict_from_grid(self, sub_grid_2d_7x7):
g0 = ag.Galaxy(redshift=0.5, light_profile=ag.lp.EllSersic(intensity=1.0))
g1 = ag.Galaxy(
redshift=0.5,
mass_profile=ag.mp.SphIsothermal(einstein_radius=1.0),
light_profile=ag.lp.EllSersic(intensity=2.0),
)
g2 = ag.Galaxy(redshift=0.5, light_profile=ag.lp.EllSersic(intensity=3.0))
g0_image = g0.image_2d_from_grid(grid=sub_grid_2d_7x7)
g1_image = g1.image_2d_from_grid(grid=sub_grid_2d_7x7)
g2_image = g2.image_2d_from_grid(grid=sub_grid_2d_7x7)
plane = ag.Plane(redshift=-0.75, galaxies=[g1, g0, g2])
image_1d_dict = plane.galaxy_image_dict_from_grid(grid=sub_grid_2d_7x7)
assert (image_1d_dict[g0].slim == g0_image).all()
assert (image_1d_dict[g1].slim == g1_image).all()
assert (image_1d_dict[g2].slim == g2_image).all()
image_dict = plane.galaxy_image_dict_from_grid(grid=sub_grid_2d_7x7)
assert (image_dict[g0].native == g0_image.native).all()
assert (image_dict[g1].native == g1_image.native).all()
assert (image_dict[g2].native == g2_image.native).all()
class TestConvergence:
def test__convergence_same_as_multiple_galaxies__include_reshape_mapping(
self, sub_grid_2d_7x7
):
# The *ungalaxyed* sub-grid must be used to compute the convergence. This changes the subgrid to ensure this
# is the case.
sub_grid_2d_7x7[5] = np.array([5.0, 2.0])
g0 = ag.Galaxy(
redshift=0.5,
mass_profile=ag.mp.SphIsothermal(
einstein_radius=1.0, centre=(1.0, 0.0)
),
)
g1 = ag.Galaxy(
redshift=0.5,
mass_profile=ag.mp.SphIsothermal(
einstein_radius=2.0, centre=(1.0, 1.0)
),
)
mp0 = g0.mass_profiles[0]
mp1 = g1.mass_profiles[0]
mp0_sub_convergence = mp0.convergence_2d_from_grid(grid=sub_grid_2d_7x7)
mp1_sub_convergence = mp1.convergence_2d_from_grid(grid=sub_grid_2d_7x7)
mp_sub_convergence = mp0_sub_convergence + mp1_sub_convergence
# Perform sub gridding average manually
mp_convergence_pixel_0 = (
mp_sub_convergence[0]
+ mp_sub_convergence[1]
+ mp_sub_convergence[2]
+ mp_sub_convergence[3]
) / 4
mp_convergence_pixel_1 = (
mp_sub_convergence[4]
+ mp_sub_convergence[5]
+ mp_sub_convergence[6]
+ mp_sub_convergence[7]
) / 4
plane = ag.Plane(galaxies=[g0, g1], redshift=None)
convergence = plane.convergence_2d_from_grid(grid=sub_grid_2d_7x7)
assert convergence.binned.native[2, 2] == pytest.approx(
mp_convergence_pixel_0, 1.0e-4
)
assert convergence.binned.native[2, 3] == pytest.approx(
mp_convergence_pixel_1, 1.0e-4
)
def test__same_as_above_galaxies___use_galaxy_to_compute_convergence(
self, sub_grid_2d_7x7
):
g0 = ag.Galaxy(
redshift=0.5, mass_profile=ag.mp.SphIsothermal(einstein_radius=1.0)
)
g1 = ag.Galaxy(
redshift=0.5, mass_profile=ag.mp.SphIsothermal(einstein_radius=2.0)
)
g0_convergence = g0.convergence_2d_from_grid(grid=sub_grid_2d_7x7)
g1_convergence = g1.convergence_2d_from_grid(grid=sub_grid_2d_7x7)
plane = ag.Plane(galaxies=[g0, g1], redshift=None)
convergence = plane.convergence_2d_from_grid(grid=sub_grid_2d_7x7)
assert convergence == pytest.approx(g0_convergence + g1_convergence, 1.0e-8)
def test__convergence_2d_from_grid_as_positions(self, grid_2d_irregular_7x7):
g0 = ag.Galaxy(
redshift=0.5, mass_profile=ag.mp.SphIsothermal(einstein_radius=1.0)
)
g0_convergence = g0.convergence_2d_from_grid(grid=grid_2d_irregular_7x7)
plane = ag.Plane(galaxies=[g0], redshift=None)
convergence = plane.convergence_2d_from_grid(grid=grid_2d_irregular_7x7)
assert convergence.in_list[0] | |
'31'
BoxLayout:
orientation: 'vertical'
padding: [10,40,40,30]
BoxLayout:
size_hint_y: .3
Label:
text:''
font_name: './yahei.ttf'
font_size: 60
markup: True
Button:
background_normal: ''
background_color: 0,0,0,0
text: '咸'
font_size: 60
font_name: './yahei.ttf'
on_release:
root.manager.current = 'menu'
root.manager.transition.direction = 'right'
Button:
background_normal: ''
background_color: 0,0,0,0
text: '运去黄金失色\\n时来棒槌发芽\\n月令极好无差\\n且喜心宽意大'
font_name: './yahei.ttf'
BoxLayout:
Image:
id: 31
source: '31.png'
allow_stretch: False
Label:
text: '咸亨利贞取女吉'
font_name: './yahei.ttf'
BoxLayout:
BoxLayout:
orientation: 'vertical'
canvas:
Color:
rgba: 1,1,1,1
Rectangle:
size: 60, 10
pos: 140, 240
Rectangle:
size: 60, 10
pos: 220, 240
Rectangle:
size: 140, 10
pos: 140, 200
Rectangle:
size: 140, 10
pos: 140, 160
Rectangle:
size: 140, 10
pos: 140, 120
Rectangle:
size: 60, 10
pos: 140, 80
Rectangle:
size: 60, 10
pos: 220, 80
Rectangle:
size: 60, 10
pos: 140, 40
Rectangle:
size: 60, 10
pos: 220, 40
BoxLayout:
orientation: 'vertical'
Label:
text: '上六咸其辅颊舌'
font_name: './yahei.ttf'
Label:
text: '九五咸其脢无悔'
font_name: './yahei.ttf'
Label:
text: '九四贞吉悔亡憧憧往来朋従尔思'
font_name: './yahei.ttf'
Label:
text: '九三咸其股执其随往吝'
font_name: './yahei.ttf'
Label:
text: '六二咸其腓凶居吉'
font_name: './yahei.ttf'
Label:
text: '初六咸其拇'
font_name: './yahei.ttf'
<Screen56>:
name: '56'
BoxLayout:
orientation: 'vertical'
padding: [10,40,40,30]
BoxLayout:
size_hint_y: .3
Label:
text:''
font_name: './yahei.ttf'
font_size: 60
markup: True
Button:
background_normal: ''
background_color: 0,0,0,0
text: '旅'
font_size: 60
font_name: './yahei.ttf'
on_release:
root.manager.current = 'menu'
root.manager.transition.direction = 'right'
Button:
background_normal: ''
background_color: 0,0,0,0
text: '飞鸟树上垒窝巢\\n小人使计举火烧\\n君占此卦为不吉\\n一切谋望枉徒劳'
font_name: './yahei.ttf'
BoxLayout:
Image:
id: 56
source: '56.png'
allow_stretch: False
Label:
text: '旅小亨旅贞吉'
font_name: './yahei.ttf'
BoxLayout:
BoxLayout:
orientation: 'vertical'
canvas:
Color:
rgba: 1,1,1,1
Rectangle:
size: 140, 10
pos: 140, 240
Rectangle:
size: 60, 10
pos: 140, 200
Rectangle:
size: 60, 10
pos: 220, 200
Rectangle:
size: 140, 10
pos: 140, 160
Rectangle:
size: 140, 10
pos: 140, 120
Rectangle:
size: 60, 10
pos: 140, 80
Rectangle:
size: 60, 10
pos: 220, 80
Rectangle:
size: 60, 10
pos: 140, 40
Rectangle:
size: 60, 10
pos: 220, 40
BoxLayout:
orientation: 'vertical'
Label:
text: '上九鸟焚其巢旅人先笑后号咷丧牛于易凶'
font_name: './yahei.ttf'
Label:
text: '六五射雉一矢亡终以誉命'
font_name: './yahei.ttf'
Label:
text: '九四旅于处得其资斧我心不快'
font_name: './yahei.ttf'
Label:
text: '九三旅焚其次丧其童仆贞厉'
font_name: './yahei.ttf'
Label:
text: '六二旅即次怀其资得童仆贞'
font_name: './yahei.ttf'
Label:
text: '初六旅琐琐斯其所取灾'
font_name: './yahei.ttf'
<Screen62>:
name: '62'
BoxLayout:
orientation: 'vertical'
padding: [10,40,40,30]
BoxLayout:
size_hint_y: .3
Label:
text:''
font_name: './yahei.ttf'
font_size: 60
markup: True
Button:
background_normal: ''
background_color: 0,0,0,0
text: '小过'
font_size: 60
font_name: './yahei.ttf'
on_release:
root.manager.current = 'menu'
root.manager.transition.direction = 'right'
Button:
background_normal: ''
background_color: 0,0,0,0
text: '行人路过独木桥\\n心内惶恐眼里瞧\\n爽利保保过得去\\n慢行一定不安牢'
font_name: './yahei.ttf'
BoxLayout:
Image:
id: 62
source: '62.png'
allow_stretch: False
Label:
text: '小过亨利贞可小事不可大事飞鸟遗之音不宜上宜下大吉'
font_name: './yahei.ttf'
BoxLayout:
BoxLayout:
orientation: 'vertical'
canvas:
Color:
rgba: 1,1,1,1
Rectangle:
size: 60, 10
pos: 140, 240
Rectangle:
size: 60, 10
pos: 220, 240
Rectangle:
size: 60, 10
pos: 140, 200
Rectangle:
size: 60, 10
pos: 220, 200
Rectangle:
size: 140, 10
pos: 140, 160
Rectangle:
size: 140, 10
pos: 140, 120
Rectangle:
size: 60, 10
pos: 140, 80
Rectangle:
size: 60, 10
pos: 220, 80
Rectangle:
size: 60, 10
pos: 140, 40
Rectangle:
size: 60, 10
pos: 220, 40
BoxLayout:
orientation: 'vertical'
Label:
text: '上六弗遇过之飞鸟离之凶是谓灾眚'
font_name: './yahei.ttf'
Label:
text: '六五密云不雨自我西郊公弋取彼在穴'
font_name: './yahei.ttf'
Label:
text: '九四无咎弗过遇之往厉必戒勿用永贞'
font_name: './yahei.ttf'
Label:
text: '九三弗过防之従或戕之凶'
font_name: './yahei.ttf'
Label:
text: '六二过其祖遇其妣不及其君遇其臣无咎'
font_name: './yahei.ttf'
Label:
text: '初六飞鸟以凶'
font_name: './yahei.ttf'
<Screen53>:
name: '53'
BoxLayout:
orientation: 'vertical'
padding: [10,40,40,30]
BoxLayout:
size_hint_y: .3
Label:
text:''
font_name: './yahei.ttf'
font_size: 60
markup: True
Button:
background_normal: ''
background_color: 0,0,0,0
text: '渐'
font_size: 60
font_name: './yahei.ttf'
on_release:
root.manager.current = 'menu'
root.manager.transition.direction = 'right'
Button:
background_normal: ''
background_color: 0,0,0,0
text: '俊鸟幸得出笼中\\n脱离灾难显威风\\n一朝得意福力至\\n东西南北任意行'
font_name: './yahei.ttf'
BoxLayout:
Image:
id: 53
source: '53.png'
allow_stretch: False
Label:
text: '渐女归吉利贞'
font_name: './yahei.ttf'
BoxLayout:
BoxLayout:
orientation: 'vertical'
canvas:
Color:
rgba: 1,1,1,1
Rectangle:
size: 140, 10
pos: 140, 240
Rectangle:
size: 140, 10
pos: 140, 200
Rectangle:
size: 60, 10
pos: 140, 160
Rectangle:
size: 60, 10
pos: 220, 160
Rectangle:
size: 140, 10
pos: 140, 120
Rectangle:
size: 60, 10
pos: 140, 80
Rectangle:
size: 60, 10
pos: 220, 80
Rectangle:
size: 60, 10
pos: 140, 40
Rectangle:
size: 60, 10
pos: 220, 40
BoxLayout:
orientation: 'vertical'
Label:
text: '上九鸿渐于陆其羽可用为仪吉'
font_name: './yahei.ttf'
Label:
text: '九五鸿渐于陵妇三岁不孕终莫之胜吉'
font_name: './yahei.ttf'
Label:
text: '六四鸿渐于木或得其桷无咎'
font_name: './yahei.ttf'
Label:
text: '九三鸿渐于陆夫征不复妇孕不育凶利御寇'
font_name: './yahei.ttf'
Label:
text: '六二鸿渐于磐饮食衎衎吉'
font_name: './yahei.ttf'
Label:
text: '初六鸿渐于干小子厉有言无咎'
font_name: './yahei.ttf'
<Screen39>:
name: '39'
BoxLayout:
orientation: 'vertical'
padding: [10,40,40,30]
BoxLayout:
size_hint_y: .3
Label:
text:''
font_name: './yahei.ttf'
font_size: 60
markup: True
Button:
background_normal: ''
background_color: 0,0,0,0
text: '蹇'
font_size: 60
font_name: './yahei.ttf'
on_release:
root.manager.current = 'menu'
root.manager.transition.direction = 'right'
Button:
background_normal: ''
background_color: 0,0,0,0
text: '大雨倾地雪满天\\n路上行人苦又寒\\n拖泥带水费尽力\\n事不遂心且耐烦'
font_name: './yahei.ttf'
BoxLayout:
Image:
id: 39
source: '39.png'
allow_stretch: False
Label:
text: '蹇利西南不利东北利见大人贞吉'
font_name: './yahei.ttf'
BoxLayout:
BoxLayout:
orientation: 'vertical'
canvas:
Color:
rgba: 1,1,1,1
Rectangle:
size: 60, 10
pos: 140, 240
Rectangle:
size: 60, 10
pos: 220, 240
Rectangle:
size: 140, 10
pos: 140, 200
Rectangle:
size: 60, 10
pos: 140, 160
Rectangle:
size: 60, 10
pos: 220, 160
Rectangle:
size: 140, 10
pos: 140, 120
Rectangle:
size: 60, 10
pos: 140, 80
Rectangle:
size: 60, 10
pos: 220, 80
Rectangle:
size: 60, 10
pos: 140, 40
Rectangle:
size: 60, 10
pos: 220, 40
BoxLayout:
orientation: 'vertical'
Label:
text: '上六往蹇来硕吉利见大人'
font_name: './yahei.ttf'
Label:
text: '九五大蹇朋来'
font_name: './yahei.ttf'
Label:
text: '六四往蹇来连'
font_name: './yahei.ttf'
Label:
text: '九三往蹇来反'
font_name: './yahei.ttf'
Label:
text: '六二王臣蹇蹇匪躬之故'
font_name: './yahei.ttf'
Label:
text: '初六往蹇来誉'
font_name: './yahei.ttf'
<Screen52>:
name: '52'
BoxLayout:
orientation: 'vertical'
padding: [10,40,40,30]
BoxLayout:
size_hint_y: .3
Label:
text:''
font_name: './yahei.ttf'
font_size: 60
markup: True
Button:
background_normal: ''
background_color: 0,0,0,0
text: '艮'
font_size: 60
font_name: './yahei.ttf'
on_release:
root.manager.current = 'menu'
root.manager.transition.direction = 'right'
Button:
background_normal: ''
background_color: 0,0,0,0
text: '财帛常打心头走\\n可惜眼前难到手\\n不如意时且忍耐\\n逢着闲事休开口'
font_name: './yahei.ttf'
BoxLayout:
Image:
id: 52
source: '52.png'
allow_stretch: False
Label:
text: '艮其背不获其身行其庭不见其人无咎'
font_name: './yahei.ttf'
BoxLayout:
BoxLayout:
orientation: 'vertical'
canvas:
Color:
rgba: 1,1,1,1
Rectangle:
size: 140, 10
pos: 140, 240
Rectangle:
size: 60, 10
pos: 140, 200
Rectangle:
size: 60, 10
pos: 220, 200
Rectangle:
size: 60, 10
pos: 140, 160
Rectangle:
size: 60, 10
pos: 220, 160
Rectangle:
size: 140, 10
pos: 140, 120
Rectangle:
size: 60, 10
pos: 140, 80
Rectangle:
size: 60, 10
pos: 220, 80
Rectangle:
size: 60, 10
pos: 140, 40
Rectangle:
size: 60, 10
pos: 220, 40
BoxLayout:
orientation: 'vertical'
Label:
text: '上九敦艮吉'
font_name: './yahei.ttf'
Label:
text: '六五艮其辅言有序悔亡'
font_name: './yahei.ttf'
Label:
text: '六四艮其身无咎'
font_name: './yahei.ttf'
Label:
text: '九三艮其限列其夤厉熏心'
font_name: './yahei.ttf'
Label:
text: '六二艮其腓不拯其随其心不快'
font_name: './yahei.ttf'
Label:
text: '初六艮其趾无咎利永贞'
font_name: './yahei.ttf'
<Screen15>:
name: '15'
BoxLayout:
orientation: 'vertical'
padding: [10,40,40,30]
BoxLayout:
size_hint_y: .3
Label:
text:''
font_name: './yahei.ttf'
font_size: 60
markup: True
Button:
background_normal: ''
background_color: 0,0,0,0
text: '谦'
font_size: 60
font_name: './yahei.ttf'
on_release:
root.manager.current = 'menu'
root.manager.transition.direction = 'right'
Button:
background_normal: ''
background_color: 0,0,0,0
text: '天赐贫人一封金\\n不争不抢两平分\\n彼此分得金到手\\n一切谋望皆遂心'
font_name: './yahei.ttf'
BoxLayout:
Image:
id: 15
source: '15.png'
allow_stretch: False
Label:
text: '谦亨君子有终'
font_name: './yahei.ttf'
BoxLayout:
BoxLayout:
orientation: 'vertical'
canvas:
Color:
rgba: 1,1,1,1
Rectangle:
size: 60, 10
pos: 140, 240
Rectangle:
size: 60, 10
pos: 220, 240
Rectangle:
size: 60, 10
pos: 140, 200
Rectangle:
size: 60, 10
pos: 220, 200
Rectangle:
size: 60, 10
pos: 140, 160
Rectangle:
size: 60, 10
pos: 220, 160
Rectangle:
size: 140, 10
pos: 140, 120
Rectangle:
size: 60, 10
pos: 140, 80
Rectangle:
size: 60, 10
pos: 220, 80
Rectangle:
size: 60, 10
pos: 140, 40
Rectangle:
size: 60, 10
pos: 220, 40
BoxLayout:
orientation: 'vertical'
Label:
text: '上六鸣谦利用行师征邑国'
font_name: './yahei.ttf'
Label:
text: '六五不富以其邻利用侵伐无不利'
font_name: './yahei.ttf'
Label:
text: '六四无不利捴谦'
font_name: './yahei.ttf'
Label:
text: '九三劳谦君子有终吉'
font_name: './yahei.ttf'
Label:
text: '六二鸣谦贞吉'
font_name: './yahei.ttf'
Label:
text: '初六谦谦君子用涉大川吉'
font_name: './yahei.ttf'
<Screen12>:
name: '12'
BoxLayout:
orientation: 'vertical'
padding: [10,40,40,30]
BoxLayout:
size_hint_y: .3
Label:
text:'七月\\n立秋\\n猴'
font_name: './yahei.ttf'
markup: True
Button:
background_normal: ''
background_color: 0,0,0,0
text: '否'
font_size: 60
font_name: './yahei.ttf'
on_release:
root.manager.current = 'menu'
root.manager.transition.direction = 'right'
Button:
background_normal: ''
background_color: 0,0,0,0
text: '虎落陷坑不堪言\\n进前容易退后难\\n谋望不遂自己便\\n疾病口舌事牵连'
font_name: './yahei.ttf'
BoxLayout:
Image:
id: 12
source: '12.png'
allow_stretch: False
Label:
text: '否之匪人不利君子貞大往小來'
font_name: './yahei.ttf'
BoxLayout:
BoxLayout:
orientation: 'vertical'
canvas:
Color:
rgba: 1,1,1,1
Rectangle:
size: 140, 10
pos: 140, 240
Rectangle:
size: 140, 10
pos: | |
import numpy as np
import gym
import copy
import matplotlib.pyplot as plt
from matplotlib import animation
from collections import defaultdict
from dps import cfg
from dps.env.basic import game
from dps.env.env import BatchGymEnv
from dps.utils import Param, square_subplots, create_maze
from dps.train import Hook
from dps.utils.tf import FeedforwardCell, MLP, ScopedFunction
from dps.rl.policy import Policy, ProductDist, SigmoidNormal, Softmax
class CollectBase(game.ObjectGame):
agent_spec = Param()
collectable_specs = Param()
obstacle_specs = Param()
step_size = Param()
time_reward = Param()
discrete_actions = Param()
def __init__(self, **kwargs):
self.agent_spec = copy.deepcopy(dict(self.agent_spec))
self.agent_spec['collectable'] = False
self.collectable_specs = copy.deepcopy(list(self.collectable_specs))
self.collectable_specs = [dict(cs) for cs in self.collectable_specs]
for spec in self.collectable_specs:
spec['collectable'] = True
self.obstacle_specs = copy.deepcopy(list(self.obstacle_specs))
self.obstacle_specs = [dict(os) for os in self.obstacle_specs]
for spec in self.obstacle_specs:
spec['collectable'] = False
self.entity_specs = [self.agent_spec] + self.collectable_specs + self.obstacle_specs
for i, es in enumerate(self.entity_specs):
es['idx'] = i
if self.discrete_actions:
action_space = gym.spaces.MultiDiscrete([8, 3])
else:
action_space = gym.spaces.Box(low=np.array([-1, -1, 0]), high=np.array([1, 1, 1]), dtype=np.float32)
super(CollectBase, self).__init__(
action_space=action_space,
reward_range=(-10, 10),
entity_feature_dim=len(self.entity_specs),
**kwargs)
def move_entities(self, action):
if self.discrete_actions:
angle_idx, magnitude_idx = action
angle = angle_idx * 2 * np.pi / 8
magnitude = [0.1, 0.5, 1.0][int(magnitude_idx)]
y = self.step_size * magnitude * np.sin(angle)
x = self.step_size * magnitude * np.cos(angle)
else:
y, x, magnitude = action
y = np.clip(y, -1, 1)
x = np.clip(x, -1, 1)
magnitude = np.clip(magnitude, 0, 1)
norm = np.sqrt(x**2 + y**2)
if norm > 1e-6:
y = self.step_size * magnitude * y / norm
x = self.step_size * magnitude * x / norm
else:
y = x = 0
return self._move_entity(self.entities[0], y, x)
def resolve_collision(self, mover, other):
""" Return (kill, stop, reward) """
if isinstance(other, str): # wall
return (False, True, 0)
else:
if other.collectable:
if self.time_reward:
return (True, False, 0)
else:
return (True, False, 1/self.n_collectables)
else:
return (False, True, 0)
def get_entity_features(self, entity):
return [int(entity.idx == i) for i in range(len(self.entity_specs))]
def compute_reward(self):
if self.time_reward:
return sum([-1 for entity in self.entities if entity.collectable and entity.alive]) / (self.n_collectables * cfg.T)
else:
return 0.0
class CollectA(CollectBase):
n_collectables = Param()
n_obstacles = Param()
max_overlap = Param()
max_entities = None
def __init__(self, **kwargs):
self.max_entities = 1 + self.n_collectables + self.n_obstacles
assert self.n_collectables > 0
super(CollectA, self).__init__(**kwargs)
def setup_field(self):
collectable_specs = list(np.random.choice(self.collectable_specs, size=self.n_collectables, replace=True))
obstacle_specs = list(np.random.choice(self.obstacle_specs, size=self.n_obstacles, replace=True))
specs = [self.agent_spec] + collectable_specs + obstacle_specs
shapes = [spec['shape'] for spec in specs]
rectangles = game.sample_entities(self.image_shape, shapes, self.max_overlap)
entities = [game.Entity(**spec) for spec in specs]
for rect, entity in zip(rectangles, entities):
entity.top = rect.top
entity.left = rect.left
return entities
def build_env():
gym_env = CollectA()
return BatchGymEnv(gym_env=gym_env)
class CollectB(CollectBase):
""" Objects placed in a circle concentric with the image, and only one collectable. """
angle_sep = Param()
n_dirs = Param()
max_entities = None
def __init__(self, **kwargs):
self.max_entities = 2*self.n_dirs + 1
self.n_collectables = 1
super(CollectB, self).__init__(**kwargs)
def setup_field(self):
assert self.image_shape[0] == self.image_shape[1]
start_angle = np.pi/4
radius = int(np.floor(self.image_shape[0] / 2 - self.agent_spec['shape'][0]/2))
center = (self.image_shape[0]/2, self.image_shape[1]/2)
centers = []
for i in range(self.n_dirs):
angle = start_angle + 2*np.pi * i / self.n_dirs
angle1 = angle - self.angle_sep
angle2 = angle + self.angle_sep
for angle in [angle1, angle2]:
y = radius * np.sin(angle) + center[0]
x = radius * np.cos(angle) + center[1]
centers.append((y, x))
collectable_spec = np.random.choice(self.collectable_specs)
obstacle_specs = list(np.random.choice(self.obstacle_specs, size=2*self.n_dirs-1, replace=True))
object_specs = np.random.permutation([collectable_spec] + obstacle_specs)
agent = game.Entity(**self.agent_spec)
agent.center = center
objects = [game.Entity(**spec) for spec in object_specs]
for center, obj in zip(centers, objects):
obj.center = center
return [agent, *objects]
class CollectC(CollectBase):
""" No obstacles. """
max_overlap = Param()
n_collectables = Param()
max_entities = None
def __init__(self, **kwargs):
self.max_entities = 1 + self.n_collectables
super(CollectC, self).__init__(**kwargs)
def setup_field(self):
collectable_specs = list(np.random.choice(self.collectable_specs, size=self.n_collectables, replace=True))
specs = [self.agent_spec] + collectable_specs
shapes = [spec['shape'] for spec in specs]
rectangles = game.sample_entities(self.image_shape, shapes, self.max_overlap)
entities = [game.Entity(**spec) for spec in specs]
for rect, entity in zip(rectangles, entities):
entity.top = rect.top
entity.left = rect.left
return entities
class CollectD(CollectBase):
""" Same as CollectA, but obstacles are arranged into a maze. """
n_collectables = Param()
n_obstacles = Param()
max_overlap = Param()
max_entities = None
def __init__(self, **kwargs):
self.max_entities = 1 + self.n_collectables + self.n_obstacles
assert self.n_collectables > 0
super(CollectD, self).__init__(**kwargs)
def setup_field(self):
agent_shape = self.agent_spec['shape']
maze_shape = (
int(np.ceil(self.image_shape[0] / agent_shape[0])),
int(np.ceil(self.image_shape[1] / agent_shape[1])))
maze = create_maze(maze_shape)
collectable_specs = list(np.random.choice(self.collectable_specs, size=self.n_collectables, replace=True))
obstacle_specs = list(np.random.choice(self.obstacle_specs, size=self.n_obstacles, replace=True))
specs = [self.agent_spec] + collectable_specs + obstacle_specs
shapes = [spec['shape'] for spec in specs]
maze = maze[None, :, :]
masks = np.concatenate(
[np.tile(1-maze, (self.n_collectables+1, 1, 1)),
np.tile(maze, (self.n_obstacles, 1, 1))],
axis=0)
rectangles = game.sample_entities(self.image_shape, shapes, self.max_overlap, masks=masks)
entities = [game.Entity(**spec) for spec in specs]
for rect, entity in zip(rectangles, entities):
entity.top = rect.top
entity.left = rect.left
return entities
class RolloutsHook(Hook):
def __init__(self, env_class, plot_step=None, env_kwargs=None, **kwargs):
self.env_class = env_class
self.env_kwargs = env_kwargs or {}
kwarg_string = "_".join("{}={}".format(k, v) for k, v in self.env_kwargs.items())
name = env_class.__name__ + ("_" + kwarg_string if kwarg_string else "")
self.name = name.replace(" ", "_")
self.plot_step = plot_step
super(RolloutsHook, self).__init__(final=True, **kwargs)
def start_stage(self, training_loop, updater, stage_idx):
gym_env = self.env_class(**self.env_kwargs)
self.env = BatchGymEnv(gym_env=gym_env)
def plot(self, updater, rollouts):
plt.ion()
if updater.env.gym_env.image_obs:
obs = rollouts.obs
else:
obs = rollouts.image
fig, axes = square_subplots(rollouts.batch_size, figsize=(5, 5))
plt.subplots_adjust(top=0.95, bottom=0, left=0, right=1, wspace=0.1, hspace=0.1)
images = []
for i, ax in enumerate(axes.flatten()):
ax.set_aspect("equal")
ax.set_axis_off()
image = ax.imshow(np.zeros(obs.shape[2:]))
images.append(image)
def animate(t):
for i in range(rollouts.batch_size):
images[i].set_array(obs[t, i, :, :, :])
anim = animation.FuncAnimation(fig, animate, frames=len(rollouts), interval=500)
path = updater.exp_dir.path_for('plots', '{}_animation.gif'.format(self.name))
anim.save(path, writer='imagemagick')
plt.close(fig)
def step(self, training_loop, updater, step_idx=None):
n_rollouts = cfg.n_val_rollouts
batch_size = cfg.batch_size
record = defaultdict(float)
n_iters = int(np.ceil(n_rollouts / batch_size))
for it in range(n_iters):
n_remaining = n_rollouts - it * batch_size
_batch_size = min(batch_size, n_remaining)
for learner in updater.learners:
with learner:
rollouts = self.env.do_rollouts(policy=learner.pi, n_rollouts=_batch_size, T=cfg.T, mode='val')
key = "{}-reward_per_ep".format(self.name)
record[key] += _batch_size * rollouts.rewards.sum(0).mean()
if it == 0 and self.plot_step and (step_idx is None or step_idx % self.plot_step == 0):
self.plot(updater, rollouts)
return dict(val={k: v / n_rollouts for k, v in record.items()})
colors = "red green blue"
agent_spec = dict(appearance="star", color="black", z=100, shape=(10, 10))
entity_size = (10, 10)
noise_res = getattr(cfg, 'noise_res', None)
collectable_specs = [dict(appearance="x", color=colors)]
obstacle_specs = [
dict(appearance="circle", color=colors),
dict(appearance="ud_triangle", color=colors),
dict(appearance="triangle", color=colors),
dict(appearance="plus", color=colors),
dict(appearance="diamond", color=colors),
]
for es in collectable_specs + obstacle_specs:
es.update(shape=entity_size, noise_res=noise_res)
hook_step = 1000
hook_kwargs = dict(n=hook_step, plot_step=hook_step, initial=True)
# env config
config = game.config.copy(
env_name="collect",
n_collectables=5,
n_obstacles=5,
agent_spec=agent_spec,
collectable_specs=collectable_specs,
obstacle_specs=obstacle_specs,
build_env=build_env,
image_shape=(48, 48), background_colour="white", max_overlap=0.25, step_size=14,
hooks=[
RolloutsHook(env_class=CollectB, env_kwargs=dict(n_dirs=4), **hook_kwargs),
RolloutsHook(env_class=CollectB, env_kwargs=dict(n_dirs=5), **hook_kwargs),
RolloutsHook(env_class=CollectB, env_kwargs=dict(n_dirs=6), **hook_kwargs),
RolloutsHook(env_class=CollectB, env_kwargs=dict(n_dirs=7), **hook_kwargs),
RolloutsHook(env_class=CollectB, env_kwargs=dict(n_dirs=8), **hook_kwargs),
RolloutsHook(env_class=CollectC, env_kwargs=dict(n_collectables=5), **hook_kwargs),
RolloutsHook(env_class=CollectC, env_kwargs=dict(n_collectables=10), **hook_kwargs),
RolloutsHook(env_class=CollectA, env_kwargs=dict(n_collectables=6, n_obstacles=6), **hook_kwargs),
RolloutsHook(env_class=CollectA, env_kwargs=dict(n_collectables=7, n_obstacles=7), **hook_kwargs),
RolloutsHook(env_class=CollectA, env_kwargs=dict(n_collectables=8, n_obstacles=8), **hook_kwargs),
RolloutsHook(env_class=CollectA, env_kwargs=dict(n_collectables=9, n_obstacles=9), **hook_kwargs),
RolloutsHook(env_class=CollectA, env_kwargs=dict(n_collectables=10, n_obstacles=10), **hook_kwargs),
RolloutsHook(env_class=CollectA, env_kwargs=dict(image_shape=(72, 72), n_collectables=5, n_obstacles=5), **hook_kwargs),
RolloutsHook(env_class=CollectA, env_kwargs=dict(image_shape=(72, 72), n_collectables=6, n_obstacles=6), **hook_kwargs),
RolloutsHook(env_class=CollectA, env_kwargs=dict(image_shape=(72, 72), n_collectables=7, n_obstacles=7), **hook_kwargs),
RolloutsHook(env_class=CollectA, env_kwargs=dict(image_shape=(72, 72), n_collectables=8, n_obstacles=8), **hook_kwargs),
RolloutsHook(env_class=CollectA, env_kwargs=dict(image_shape=(72, 72), n_collectables=9, n_obstacles=9), **hook_kwargs),
RolloutsHook(env_class=CollectA, env_kwargs=dict(image_shape=(72, 72), n_collectables=10, n_obstacles=10), **hook_kwargs),
],
angle_sep=np.pi/16,
discrete_actions=True,
time_reward=False,
eval_step=1000,
display_step=1000,
)
class Backbone(ScopedFunction):
backbone = None
mlp = None
def _call(self, inp, output_size, is_training):
if self.backbone is None:
self.backbone = Backbone()
if self.mlp is None:
self.mlp = MLP([100, 100])
outp = self.backbone(inp, 0, is_training)
outp = self.mlp(outp, output_size, is_training)
return outp
def build_attentional_relation_network(output_size, name):
from dps.utils.tf import AttentionalRelationNetwork
ff = AttentionalRelationNetwork(n_repeats=2, scope="collection_controller")
return FeedforwardCell(ff, output_size, name=name)
def build_object_network_controller(output_size, name):
from dps.utils.tf import ObjectNetwork
ff = ObjectNetwork(n_repeats=1, scope="collection_controller")
return FeedforwardCell(ff, output_size, name=name)
def build_controller(output_size, name):
if cfg.controller_type == "arn":
return build_attentional_relation_network(output_size, name)
elif cfg.controller_type == "obj":
return build_object_network_controller(output_size, name)
else:
raise Exception("Unknown controller_type: {}".format(cfg.controller_type))
def build_policy(env, **kwargs):
if cfg.discrete_actions:
action_selection = ProductDist(
Softmax(8, one_hot=False), Softmax(3, one_hot=False))
else:
action_selection = ProductDist(
SigmoidNormal(-1, 1, explore=cfg.explore),
SigmoidNormal(-1, 1, explore=cfg.explore),
SigmoidNormal(0, 1, explore=cfg.explore),)
return Policy(action_selection, env.obs_shape, **kwargs)
# alg config
config.update(
build_controller=build_controller,
controller_type="obj",
d=128,
# d=256,
layer_norm=True,
symmetric_op="max",
use_mask=True,
# For obj
# build_on_input_network=lambda scope: MLP([128, 128], scope=scope),
# build_on_object_network=lambda scope: MLP([128, 128], scope=scope),
# build_on_output_network=lambda scope: MLP([128, 128, 128], scope=scope),
build_on_input_network=lambda scope: MLP([128], scope=scope),
build_on_object_network=lambda scope: MLP([128], scope=scope),
build_on_output_network=lambda scope: MLP([128, 128], scope=scope),
# For arn
build_arn_network=lambda scope: MLP([128, 128], scope=scope),
build_arn_object_network=lambda scope: MLP([128, 128], scope=scope),
n_heads=1,
exploration_schedule=1.0,
val_exploration_schedule=1.0,
build_policy=build_policy,
)
if __name__ == "__main__":
with config:
env = build_env().gym_env
agent = game.RandomAgent(env.action_space)
game.do_rollouts(
env, agent, render=True,
callback=lambda action, reward, **kwargs: print("Action: {}, | |
0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0],
[0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0],
[1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1],
[1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1],
[1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0],
[1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1],
[0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1],
[0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0],
[1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0],
[0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0],
[1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0],
[0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1],
[1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1],
[0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, | |
def create_xpub_wallet(self, xpub):
account = BIP32_Account({'xpub':xpub})
self.storage.put('seed_version', self.seed_version)
self.add_master_public_key(self.root_name, xpub)
self.add_account('0', account)
class BIP32_RD_Wallet(BIP32_Wallet):
# Abstract base class for a BIP32 wallet with a self.root_derivation
@classmethod
def account_derivation(self, account_id):
return self.root_derivation + account_id
@classmethod
def address_derivation(self, account_id, change, address_index):
account_derivation = self.account_derivation(account_id)
return "%s/%d/%d" % (account_derivation, change, address_index)
def address_id(self, address):
acc_id, (change, address_index) = self.get_address_index(address)
return self.address_derivation(acc_id, change, address_index)
def add_xprv_from_seed(self, seed, name, password, passphrase=''):
# we don't store the seed, only the master xpriv
xprv, xpub = bip32_root(self.mnemonic_to_seed(seed, passphrase))
xprv, xpub = bip32_private_derivation(xprv, "m/", self.root_derivation)
self.add_master_public_key(name, xpub)
self.add_master_private_key(name, xprv, password)
def add_xpub_from_seed(self, seed, name):
# store only master xpub
xprv, xpub = bip32_root(self.mnemonic_to_seed(seed,''))
xprv, xpub = bip32_private_derivation(xprv, "m/", self.root_derivation)
self.add_master_public_key(name, xpub)
def create_master_keys(self, password):
seed = self.get_seed(password)
self.add_xprv_from_seed(seed, self.root_name, password)
class BIP32_HD_Wallet(BIP32_RD_Wallet):
# Abstract base class for a BIP32 wallet that admits account creation
def __init__(self, storage):
BIP32_Wallet.__init__(self, storage)
# Backwards-compatibility. Remove legacy "next_account2" and
# drop unused master public key to avoid duplicate errors
acc2 = storage.get('next_account2', None)
if acc2:
self.master_public_keys.pop(self.root_name + acc2[0] + "'", None)
storage.put('next_account2', None)
storage.put('master_public_keys', self.master_public_keys)
def next_account_number(self):
assert (set(self.accounts.keys()) ==
set(['%d' % n for n in range(len(self.accounts))]))
return len(self.accounts)
def show_account(self, account_id):
return self.account_is_used(account_id) or account_id in self.labels
def last_account_id(self):
return '%d' % (self.next_account_number() - 1)
def accounts_to_show(self):
# The last account is shown only if named or used
result = list(self.accounts.keys())
last_id = self.last_account_id()
if not self.show_account(last_id):
result.remove(last_id)
return result
def can_create_accounts(self):
return self.root_name in self.master_private_keys.keys()
def permit_account_naming(self):
return (self.can_create_accounts() and
not self.show_account(self.last_account_id()))
def create_hd_account(self, password):
# First check the password is valid (this raises if it isn't).
if self.can_change_password():
self.check_password(password)
assert self.next_account_number() == 0
self.create_next_account(password, _('Main account'))
self.create_next_account(password)
def create_next_account(self, password, label=None):
account_id = '%d' % self.next_account_number()
derivation = self.account_derivation(account_id)
root_name = self.root_derivation.split('/')[0] # NOT self.root_name!
xpub, xprv = self.derive_xkeys(root_name, derivation, password)
wallet_key = self.root_name + account_id + "'"
self.add_master_public_key(wallet_key, xpub)
if xprv:
self.add_master_private_key(wallet_key, xprv, password)
account = BIP32_Account({'xpub':xpub})
self.add_account(account_id, account)
if label:
self.set_label(account_id, label)
self.save_accounts()
def account_is_used(self, account_id):
return self.accounts[account_id].is_used(self)
def accounts_all_used(self):
return all(self.account_is_used(acc_id) for acc_id in self.accounts)
class BIP44_Wallet(BIP32_HD_Wallet):
root_derivation = "m/44'/5'/"
wallet_type = 'bip44'
@classmethod
def account_derivation(self, account_id):
return self.root_derivation + account_id + "'"
def can_sign_xpubkey(self, x_pubkey):
xpub, sequence = BIP32_Account.parse_xpubkey(x_pubkey)
return xpub in self.master_public_keys.values()
def can_create_accounts(self):
return not self.is_watching_only()
@staticmethod
def normalize_passphrase(passphrase):
return normalize('NFKD', unicode(passphrase or ''))
@staticmethod
def mnemonic_to_seed(mnemonic, passphrase):
# See BIP39
import pbkdf2, hashlib, hmac
PBKDF2_ROUNDS = 2048
mnemonic = normalize('NFKD', ' '.join(mnemonic.split()))
passphrase = BIP44_Wallet.normalize_passphrase(passphrase)
return pbkdf2.PBKDF2(mnemonic, 'mnemonic' + passphrase,
iterations = PBKDF2_ROUNDS, macmodule = hmac,
digestmodule = hashlib.sha512).read(64)
def derive_xkeys(self, root, derivation, password):
root = self.root_name
derivation = derivation.replace(self.root_derivation, root)
x = self.master_private_keys.get(root)
if x:
root_xprv = pw_decode(x, password)
xprv, xpub = bip32_private_derivation(root_xprv, root, derivation)
return xpub, xprv
else:
root_xpub = self.master_public_keys.get(root)
xpub = bip32_public_derivation(root_xpub, root, derivation)
return xpub, None
class NewWallet(BIP32_RD_Wallet, Mnemonic):
# Standard wallet
root_derivation = "m/"
wallet_type = 'standard'
def create_main_account(self):
xpub = self.master_public_keys.get("x/")
account = BIP32_Account({'xpub':xpub})
self.add_account('0', account)
def can_import(self):
return not self.is_watching_only()
class Multisig_Wallet(BIP32_RD_Wallet, Mnemonic):
# generic m of n
root_name = "x1/"
root_derivation = "m/"
def __init__(self, storage):
BIP32_Wallet.__init__(self, storage)
self.wallet_type = storage.get('wallet_type')
self.m, self.n = Wallet.multisig_type(self.wallet_type)
def load_accounts(self):
self.accounts = {}
d = self.storage.get('accounts', {})
v = d.get('0')
if v:
if v.get('xpub3'):
v['xpubs'] = [v['xpub'], v['xpub2'], v['xpub3']]
elif v.get('xpub2'):
v['xpubs'] = [v['xpub'], v['xpub2']]
self.accounts = {'0': Multisig_Account(v)}
def create_main_account(self):
account = Multisig_Account({'xpubs': self.master_public_keys.values(), 'm': self.m})
self.add_account('0', account)
def get_master_public_keys(self):
return self.master_public_keys
def get_action(self):
for i in range(self.n):
if self.master_public_keys.get("x%d/"%(i+1)) is None:
return 'create_seed' if i == 0 else 'add_cosigners'
if not self.accounts:
return 'create_main_account'
def get_fingerprint(self):
return ''.join(sorted(self.get_master_public_keys().values()))
class OldWallet(Deterministic_Wallet):
wallet_type = 'old'
def __init__(self, storage):
Deterministic_Wallet.__init__(self, storage)
self.gap_limit = storage.get('gap_limit', 5)
def make_seed(self):
import old_mnemonic
seed = random_seed(128)
return ' '.join(old_mnemonic.mn_encode(seed))
def format_seed(self, seed):
import old_mnemonic
# see if seed was entered as hex
seed = seed.strip()
if seed:
try:
seed.decode('hex')
return OLD_SEED_VERSION, str(seed)
except Exception:
pass
words = seed.split()
seed = old_mnemonic.mn_decode(words)
if not seed:
raise Exception("Invalid seed")
return OLD_SEED_VERSION, seed
def create_master_keys(self, password):
seed = self.get_seed(password)
mpk = OldAccount.mpk_from_seed(seed)
self.storage.put('master_public_key', mpk)
def get_master_public_key(self):
return self.storage.get("master_public_key")
def get_master_public_keys(self):
return {'Main Account':self.get_master_public_key()}
def create_main_account(self):
mpk = self.storage.get("master_public_key")
self.create_account(mpk)
def create_account(self, mpk):
self.accounts['0'] = OldAccount({'mpk':mpk, 0:[], 1:[]})
self.save_accounts()
def create_watching_only_wallet(self, mpk):
self.seed_version = OLD_SEED_VERSION
self.storage.put('seed_version', self.seed_version)
self.storage.put('master_public_key', mpk)
self.create_account(mpk)
def get_seed(self, password):
seed = pw_decode(self.seed, password).encode('utf8')
return seed
def check_password(self, password):
seed = self.get_seed(password)
self.accounts['0'].check_seed(seed)
def get_mnemonic(self, password):
import old_mnemonic
s = self.get_seed(password)
return ' '.join(old_mnemonic.mn_encode(s))
WalletType = namedtuple("WalletType", "category type constructor")
# former WalletFactory
class Wallet(object):
"""The main wallet "entry point".
This class is actually a factory that will return a wallet of the correct
type when passed a WalletStorage instance."""
wallets = [ # category type constructor
WalletType('standard', 'old', OldWallet),
WalletType('standard', 'xpub', BIP32_Simple_Wallet),
WalletType('standard', 'standard', NewWallet),
WalletType('standard', 'imported', Imported_Wallet),
WalletType('multisig', '2of2', Multisig_Wallet),
WalletType('multisig', '2of3', Multisig_Wallet),
WalletType('bip44', 'bip44', BIP44_Wallet),
]
def __new__(self, storage):
seed_version = storage.get('seed_version')
if not seed_version:
seed_version = OLD_SEED_VERSION if len(storage.get('master_public_key','')) == 128 else NEW_SEED_VERSION
if seed_version not in [OLD_SEED_VERSION, NEW_SEED_VERSION]:
msg = "Your wallet has an unsupported seed version."
msg += '\n\nWallet file: %s' % os.path.abspath(storage.path)
if seed_version in [5, 7, 8, 9, 10]:
msg += "\n\nTo open this wallet, try 'git checkout seed_v%d'"%seed_version
if seed_version == 6:
# version 1.9.8 created v6 wallets when an incorrect seed was entered in the restore dialog
msg += '\n\nThis file was created because of a bug in version 1.9.8.'
if storage.get('master_public_keys') is None and storage.get('master_private_keys') is None and storage.get('imported_keys') is None:
# pbkdf2 was not included with the binaries, and wallet creation aborted.
msg += "\nIt does not contain any keys, and can safely be removed."
else:
# creation was complete if electrum was run from source
msg += "\nPlease open this file with Electrum 1.9.8, and move your coins to a new wallet."
raise BaseException(msg)
wallet_type = storage.get('wallet_type')
WalletClass = Wallet.wallet_class(wallet_type, seed_version)
wallet = WalletClass(storage)
# Convert hardware wallets restored with older versions of
# Electrum to BIP44 wallets. A hardware wallet does not have
# a seed and plugins do not need to handle having one.
rwc = getattr(wallet, 'restore_wallet_class', None)
if rwc and storage.get('seed', ''):
storage.print_error("converting wallet type to " + rwc.wallet_type)
storage.put('wallet_type', rwc.wallet_type)
wallet = rwc(storage)
return wallet
@staticmethod
def categories():
return [wallet.category for wallet in Wallet.wallets]
@staticmethod
def register_plugin_wallet(category, type, constructor):
Wallet.wallets.append(WalletType(category, type, constructor))
@staticmethod
def wallet_class(wallet_type, seed_version):
if wallet_type:
if Wallet.multisig_type(wallet_type):
return Multisig_Wallet
for wallet in Wallet.wallets:
if wallet.type == wallet_type:
return wallet.constructor
raise RuntimeError("Unknown wallet type: " + wallet_type)
return OldWallet if seed_version == OLD_SEED_VERSION else NewWallet
@staticmethod
def is_seed(seed):
return is_old_seed(seed) or is_new_seed(seed)
@staticmethod
def is_mpk(text):
return Wallet.is_old_mpk(text) or Wallet.is_xpub(text)
@staticmethod
def is_old_mpk(mpk):
try:
int(mpk, 16)
except:
return False
return len(mpk) == 128
@staticmethod
def is_xpub(text):
if text[0:4] != 'xpub':
return False
try:
deserialize_xkey(text)
return True
except:
return False
@staticmethod
def is_xprv(text):
if text[0:4] != 'xprv':
return False
try:
deserialize_xkey(text)
return True
except:
return False
@staticmethod
def is_address(text):
parts = text.split()
return bool(parts) and all(bitcoin.is_address(x) for x in parts)
@staticmethod
def is_private_key(text):
parts = text.split()
return bool(parts) and all(bitcoin.is_private_key(x) for x in parts)
@staticmethod
def is_any(text):
return (Wallet.is_seed(text) or Wallet.is_old_mpk(text)
or Wallet.is_xprv(text) or Wallet.is_xpub(text)
or Wallet.is_address(text) or Wallet.is_private_key(text))
@staticmethod
def should_encrypt(text):
return (Wallet.is_seed(text) or Wallet.is_xprv(text)
or Wallet.is_private_key(text))
@staticmethod
def multisig_type(wallet_type):
'''If wallet_type is mofn multi-sig, return [m, n],
otherwise return None.'''
match = re.match('(\d+)of(\d+)', wallet_type)
if match:
match = [int(x) for x in match.group(1, 2)]
return match
@staticmethod
def from_seed(seed, password, storage):
if is_old_seed(seed):
klass = OldWallet
elif is_new_seed(seed):
klass = NewWallet
w = klass(storage)
w.add_seed(seed, password)
w.create_master_keys(password)
return w
@staticmethod
def from_address(text, storage):
w = Imported_Wallet(storage)
for x in text.split():
w.accounts[IMPORTED_ACCOUNT].add(x, None, None, None)
w.save_accounts()
return w
@staticmethod
def from_private_key(text, password, storage):
w = Imported_Wallet(storage)
w.update_password(None, password)
for x in text.split():
w.import_key(x, password)
return w
@staticmethod
def from_old_mpk(mpk, storage):
w = OldWallet(storage)
w.seed = ''
w.create_watching_only_wallet(mpk)
return w
@staticmethod
def from_xpub(xpub, storage):
w = BIP32_Simple_Wallet(storage)
w.create_xpub_wallet(xpub)
return w
@staticmethod
def from_xprv(xprv, password, storage):
w = BIP32_Simple_Wallet(storage)
w.create_xprv_wallet(xprv, password)
return w
@staticmethod
| |
<filename>whoville/cloudbreak/apis/v3utils_api.py
# coding: utf-8
"""
Cloudbreak API
Cloudbreak is a powerful left surf that breaks over a coral reef, a mile off southwest the island of Tavarua, Fiji. Cloudbreak is a cloud agnostic Hadoop as a Service API. Abstracts the provisioning and ease management and monitoring of on-demand clusters. SequenceIQ's Cloudbreak is a RESTful application development platform with the goal of helping developers to build solutions for deploying Hadoop YARN clusters in different environments. Once it is deployed in your favourite servlet container it exposes a REST API allowing to span up Hadoop clusters of arbitary sizes and cloud providers. Provisioning Hadoop has never been easier. Cloudbreak is built on the foundation of cloud providers API (Amazon AWS, Microsoft Azure, Google Cloud Platform, Openstack), Apache Ambari, Docker lightweight containers, Swarm and Consul. For further product documentation follow the link: <a href=\"http://hortonworks.com/apache/cloudbreak/\">http://hortonworks.com/apache/cloudbreak/</a>
OpenAPI spec version: 2.9.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class V3utilsApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def check_client_version_v3(self, version, **kwargs):
"""
checks the client version
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.check_client_version_v3(version, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str version: (required)
:return: VersionCheckResult
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.check_client_version_v3_with_http_info(version, **kwargs)
else:
(data) = self.check_client_version_v3_with_http_info(version, **kwargs)
return data
def check_client_version_v3_with_http_info(self, version, **kwargs):
"""
checks the client version
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.check_client_version_v3_with_http_info(version, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str version: (required)
:return: VersionCheckResult
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['version']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method check_client_version_v3" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'version' is set
if ('version' not in params) or (params['version'] is None):
raise ValueError("Missing the required parameter `version` when calling `check_client_version_v3`")
collection_formats = {}
path_params = {}
if 'version' in params:
path_params['version'] = params['version']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v3/utils/client/{version}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='VersionCheckResult',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_rds_database_util_v3(self, **kwargs):
"""
create a database for the service in the RDS if the connection could be created
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_rds_database_util_v3(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param RDSBuildRequest body:
:param list[str] target:
:return: RdsBuildResult
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_rds_database_util_v3_with_http_info(**kwargs)
else:
(data) = self.create_rds_database_util_v3_with_http_info(**kwargs)
return data
def create_rds_database_util_v3_with_http_info(self, **kwargs):
"""
create a database for the service in the RDS if the connection could be created
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_rds_database_util_v3_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param RDSBuildRequest body:
:param list[str] target:
:return: RdsBuildResult
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'target']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_rds_database_util_v3" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'target' in params:
query_params.append(('target', params['target']))
collection_formats['target'] = 'multi'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v3/utils/rds-database', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RdsBuildResult',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_cloud_storage_matrix_v3(self, **kwargs):
"""
returns supported cloud storage for stack version
Define stack version at least at patch level eg. 2.6.0
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_cloud_storage_matrix_v3(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str stack_version:
:return: list[CloudStorageSupportedResponse]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_cloud_storage_matrix_v3_with_http_info(**kwargs)
else:
(data) = self.get_cloud_storage_matrix_v3_with_http_info(**kwargs)
return data
def get_cloud_storage_matrix_v3_with_http_info(self, **kwargs):
"""
returns supported cloud storage for stack version
Define stack version at least at patch level eg. 2.6.0
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_cloud_storage_matrix_v3_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str stack_version:
:return: list[CloudStorageSupportedResponse]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['stack_version']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_cloud_storage_matrix_v3" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'stack_version' in params:
query_params.append(('stackVersion', params['stack_version']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v3/utils/cloudstoragematrix', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[CloudStorageSupportedResponse]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_stack_matrix_util_v3(self, **kwargs):
"""
returns default ambari details for distinct HDP and HDF
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_stack_matrix_util_v3(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: StackMatrix
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_stack_matrix_util_v3_with_http_info(**kwargs)
else:
(data) = self.get_stack_matrix_util_v3_with_http_info(**kwargs)
return data
def get_stack_matrix_util_v3_with_http_info(self, **kwargs):
"""
returns default ambari details for distinct HDP and HDF
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_stack_matrix_util_v3_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: StackMatrix
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in | |
return passwd.GetMyHomeDir()
return passwd.GetHomeDir(token)
def _EvalVarNum(self, var_num):
# type: (int) -> value_t
assert var_num >= 0
return self.mem.GetArgNum(var_num)
def _EvalSpecialVar(self, op_id, quoted):
# type: (int, bool) -> Tuple[value_t, bool]
"""Returns (val, bool maybe_decay_array).
TODO: Should that boolean be part of the value?
"""
# $@ is special -- it need to know whether it is in a double quoted
# context.
#
# - If it's $@ in a double quoted context, return an ARRAY.
# - If it's $@ in a normal context, return a STRING, which then will be
# subject to splitting.
maybe_decay_array = False
if op_id in (Id.VSub_At, Id.VSub_Star):
argv = self.mem.GetArgv()
val = value.MaybeStrArray(argv) # type: value_t
if op_id == Id.VSub_At:
# "$@" evaluates to an array, $@ should be decayed
maybe_decay_array = not quoted
else: # $* "$*" are both decayed
maybe_decay_array = True
elif op_id == Id.VSub_Hyphen:
val = value.Str(_GetDollarHyphen(self.exec_opts))
else:
val = self.mem.GetSpecialVar(op_id)
return val, maybe_decay_array
def _ApplyTestOp(self,
val, # type: value_t
op, # type: suffix_op__Unary
quoted, # type: bool
part_vals, # type: Optional[List[part_value_t]]
var_name, # type: str
var_index, # type: a_index_t
blame_token, # type: Token
):
# type: (...) -> bool
"""
Returns:
Whether part_vals was mutated
${a:-} returns part_value[]
${a:+} returns part_value[]
${a:?error} returns error word?
${a:=} returns part_value[] but also needs self.mem for side effects.
So I guess it should return part_value[], and then a flag for raising an
error, and then a flag for assigning it?
The original BracedVarSub will have the name.
Example of needing multiple part_value[]
echo X-${a:-'def'"ault"}-X
We return two part values from the BracedVarSub. Also consider:
echo ${a:-x"$@"x}
"""
# NOTE: Splicing part_values is necessary because of code like
# ${undef:-'a b' c 'd # e'}. Each part_value can have a different
# do_glob/do_elide setting.
# TODO: Change this to a bitwise test?
if op.op_id in (Id.VTest_ColonHyphen, Id.VTest_ColonEquals,
Id.VTest_ColonQMark, Id.VTest_ColonPlus):
UP_val = val
with tagswitch(val) as case:
if case(value_e.Undef):
is_falsey = True
elif case(value_e.Str):
val = cast(value__Str, UP_val)
is_falsey = len(val.s) == 0
elif case(value_e.MaybeStrArray):
val = cast(value__MaybeStrArray, UP_val)
is_falsey = len(val.strs) == 0
else:
raise NotImplementedError(val.tag_())
else:
is_falsey = val.tag_() == value_e.Undef
#print('!!',id, is_falsey)
if op.op_id in (Id.VTest_ColonHyphen, Id.VTest_Hyphen):
if is_falsey:
self._EvalWordToParts(op.arg_word, quoted, part_vals, is_subst=True)
return True
else:
return False
# Inverse of the above.
elif op.op_id in (Id.VTest_ColonPlus, Id.VTest_Plus):
if is_falsey:
return False
else:
self._EvalWordToParts(op.arg_word, quoted, part_vals, is_subst=True)
return True
# Splice and assign
elif op.op_id in (Id.VTest_ColonEquals, Id.VTest_Equals):
if is_falsey:
# Collect new part vals.
assign_part_vals = [] # type: List[part_value_t]
self._EvalWordToParts(op.arg_word, quoted, assign_part_vals,
is_subst=True)
# Append them to out param AND return them.
part_vals.extend(assign_part_vals)
if var_name is None:
# TODO: error context
e_die("Can't assign to special variable")
else:
# NOTE: This decays arrays too! 'set -o strict_array' could
# avoid it.
rhs_str = _DecayPartValuesToString(assign_part_vals,
self.splitter.GetJoinChar())
if var_index is None: # using None when no index
lval = lvalue.Named(var_name) # type: lvalue_t
else:
UP_var_index = var_index
with tagswitch(var_index) as case:
if case(a_index_e.Int):
var_index = cast(a_index__Int, UP_var_index)
lval = lvalue.Indexed(var_name, var_index.i)
elif case(a_index_e.Str):
var_index = cast(a_index__Str, UP_var_index)
lval = lvalue.Keyed(var_name, var_index.s)
else:
raise AssertionError()
self.mem.SetVar(lval, value.Str(rhs_str), scope_e.Dynamic)
return True
else:
return False
elif op.op_id in (Id.VTest_ColonQMark, Id.VTest_QMark):
if is_falsey:
# The arg is the error mesage
error_part_vals = [] # type: List[part_value_t]
self._EvalWordToParts(op.arg_word, quoted, error_part_vals,
is_subst=True)
error_str = _DecayPartValuesToString(error_part_vals,
self.splitter.GetJoinChar())
e_die("unset variable %r", error_str, token=blame_token)
else:
return False
else:
raise NotImplementedError(op.op_id)
def _EvalIndirectArrayExpansion(self, name, index):
# type: (str, str) -> Optional[value_t]
"""Expands ${!ref} when $ref has the form `name[index]`.
Args:
name, index: arbitrary strings
Returns:
value, or None if invalid
"""
if not match.IsValidVarName(name):
return None
val = self.mem.GetVar(name)
UP_val = val
with tagswitch(val) as case:
if case(value_e.Undef):
return value.Undef()
elif case(value_e.Str):
return None
elif case(value_e.MaybeStrArray):
val = cast(value__MaybeStrArray, UP_val)
if index in ('@', '*'):
# TODO: maybe_decay_array
return value.MaybeStrArray(val.strs)
try:
index_num = int(index)
except ValueError:
return None
try:
return value.Str(val.strs[index_num])
except IndexError:
return value.Undef()
elif case(value_e.AssocArray):
val = cast(value__AssocArray, UP_val)
if index in ('@', '*'):
raise NotImplementedError()
try:
return value.Str(val.d[index])
except KeyError:
return value.Undef()
else:
raise AssertionError()
def _ApplyPrefixOp(self, val, prefix_op, token):
# type: (value_t, speck, Token) -> value_t
"""
Returns:
value
"""
assert val.tag != value_e.Undef
op_id = prefix_op.id
if op_id == Id.VSub_Pound: # LENGTH
UP_val = val
with tagswitch(val) as case:
if case(value_e.Str):
val = cast(value__Str, UP_val)
# NOTE: Whether bash counts bytes or chars is affected by LANG
# environment variables.
# Should we respect that, or another way to select? set -o
# count-bytes?
# https://stackoverflow.com/questions/17368067/length-of-string-in-bash
try:
length = string_ops.CountUtf8Chars(val.s)
except error.Strict as e:
# Add this here so we don't have to add it so far down the stack.
# TODO: It's better to show BOTH this CODE an the actual DATA
# somehow.
e.span_id = token.span_id
if self.exec_opts.strict_word_eval():
raise
else:
# NOTE: Doesn't make the command exit with 1; it just returns a
# length of -1.
self.errfmt.PrettyPrintError(e, prefix='warning: ')
return value.Str('-1')
elif case(value_e.MaybeStrArray):
val = cast(value__MaybeStrArray, UP_val)
# There can be empty placeholder values in the array.
length = 0
for s in val.strs:
if s is not None:
length += 1
elif case(value_e.AssocArray):
val = cast(value__AssocArray, UP_val)
length = len(val.d)
else:
raise AssertionError()
return value.Str(str(length))
elif op_id == Id.VSub_Bang: # ${!foo}, "indirect expansion"
# NOTES:
# - Could translate to eval('$' + name) or eval("\$$name")
# - ${!array[@]} means something completely different. TODO: implement
# that.
# - It might make sense to suggest implementing this with associative
# arrays?
UP_val = val
with tagswitch(val) as case:
if case(value_e.Str):
val = cast(value__Str, UP_val)
# plain variable name, like 'foo'
if match.IsValidVarName(val.s):
return self.mem.GetVar(val.s)
# positional argument, like '1'
try:
return self.mem.GetArgNum(int(val.s))
except ValueError:
pass
if val.s in ('@', '*'):
# TODO: maybe_decay_array
return value.MaybeStrArray(self.mem.GetArgv())
# note: case 6 in var-ref.test.sh passes because of this
# otherwise an array reference, like 'arr[0]' or 'arr[xyz]' or 'arr[@]'
i = val.s.find('[')
if i >= 0 and val.s[-1] == ']':
name = val.s[:i]
index = val.s[i+1:-1]
result = self._EvalIndirectArrayExpansion(name, index)
if result is not None:
return result
# Note that bash doesn't consider this fatal. It makes the
# command exit with '1', but we don't have that ability yet?
e_die('Bad indirect expansion: %r', val.s, token=token)
elif case(value_e.MaybeStrArray):
val = cast(value__MaybeStrArray, UP_val)
# translation issue: tuple indices not supported in list comprehensions
#indices = [str(i) for i, s in enumerate(val.strs) if s is not None]
indices = [] # type: List[str]
for i, s in enumerate(val.strs):
if s is not None:
indices.append(str(i))
return value.MaybeStrArray(indices)
elif case(value_e.AssocArray):
val = cast(value__AssocArray, UP_val)
assert val.d is not None # for MyPy, so it's not Optional[]
return value.MaybeStrArray(val.d.keys())
else:
raise NotImplementedError(val.tag_())
else:
raise AssertionError(op_id)
def _ApplyUnarySuffixOp(self, val, op):
# type: (value_t, suffix_op__Unary) -> value_t
assert val.tag != value_e.Undef
op_kind = consts.GetKind(op.op_id)
if op_kind == Kind.VOp1:
# NOTE: glob syntax is supported in ^ ^^ , ,, ! As well as % %% # ##.
arg_val = self.EvalWordToString(op.arg_word, quote_kind=quote_e.FnMatch)
assert arg_val.tag == value_e.Str
UP_val = val
with tagswitch(val) as case:
if case(value_e.Str):
val = cast(value__Str, UP_val)
s = string_ops.DoUnarySuffixOp(val.s, op, arg_val.s)
#log('%r %r -> %r', val.s, arg_val.s, s)
new_val = value.Str(s) # type: value_t
elif case(value_e.MaybeStrArray):
val = cast(value__MaybeStrArray, UP_val)
# ${a[@]#prefix} is VECTORIZED on arrays. Oil should have this too.
strs = [] # type: List[str]
for s in val.strs:
if s is not None:
strs.append(string_ops.DoUnarySuffixOp(s, op, arg_val.s))
new_val = value.MaybeStrArray(strs)
elif case(value_e.AssocArray):
val = cast(value__AssocArray, UP_val)
strs = []
for s in val.d.values():
strs.append(string_ops.DoUnarySuffixOp(s, op, arg_val.s))
new_val = value.MaybeStrArray(strs)
else:
raise AssertionError(val.tag_())
else:
raise AssertionError(Kind_str(op_kind))
return new_val
def _EvalDoubleQuoted(self,
parts, # type: List[word_part_t]
part_vals, # type: List[part_value_t]
):
# type: (...) -> None
"""DoubleQuoted -> part_value
Args:
part_vals: output param to append to.
"""
# Example of returning array:
# $ a=(1 2); b=(3); $ c=(4 5)
# $ argv "${a[@]}${b[@]}${c[@]}"
# ['1', '234', '5']
# Example of multiple | |
= ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.4*tol_factor)
# Patch on 2,3
print('with patches on 2,3:')
kkkp.process(cat, catp)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.7*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
# Patch on 1,3
print('with patches on 1,3:')
kkkp.process(catp, cat, catp)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
# Finally a set (with all patches) using the KKKCrossCorrelation class.
kkkc = treecorr.KKKCrossCorrelation(nbins=3, min_sep=30., max_sep=100.,
min_u=0.9, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.1, nvbins=1, rng=rng)
print('CrossCorrelation:')
kkkc.process(catp, catp, catp)
for k1 in kkkc._all:
print(k1.ntri.ravel())
print(k1.zeta.ravel())
print(k1.varzeta.ravel())
np.testing.assert_allclose(k1.ntri, kkk.ntri, rtol=0.05 * tol_factor)
np.testing.assert_allclose(k1.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
np.testing.assert_allclose(k1.varzeta, kkk.varzeta, rtol=0.05 * tol_factor, atol=3.e-6)
print('jackknife:')
cov = kkkc.estimate_cov('jackknife')
print(np.diagonal(cov))
for i in range(6):
v = np.diagonal(cov)[i*6:(i+1)*6]
print('max log(ratio) = ',np.max(np.abs(np.log(v)-np.log(var_kkk))))
np.testing.assert_allclose(np.log(v), np.log(var_kkk), atol=0.5*tol_factor)
print('sample:')
cov = kkkc.estimate_cov('sample')
print(np.diagonal(cov))
for i in range(6):
v = np.diagonal(cov)[i*6:(i+1)*6]
print('max log(ratio) = ',np.max(np.abs(np.log(v)-np.log(var_kkk))))
np.testing.assert_allclose(np.log(v), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkc.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
for i in range(6):
v = np.diagonal(cov)[i*6:(i+1)*6]
print('max log(ratio) = ',np.max(np.abs(np.log(v)-np.log(var_kkk))))
np.testing.assert_allclose(np.log(v), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkc.estimate_cov('bootstrap')
print(np.diagonal(cov))
for i in range(6):
v = np.diagonal(cov)[i*6:(i+1)*6]
print('max log(ratio) = ',np.max(np.abs(np.log(v)-np.log(var_kkk))))
np.testing.assert_allclose(np.log(v), np.log(var_kkk), atol=0.5*tol_factor)
# All catalogs need to have the same number of patches
catq = treecorr.Catalog(x=x, y=y, k=k, npatch=2*npatch)
with assert_raises(RuntimeError):
kkkp.process(catp, catq)
with assert_raises(RuntimeError):
kkkp.process(catp, catq, catq)
with assert_raises(RuntimeError):
kkkp.process(catq, catp, catq)
with assert_raises(RuntimeError):
kkkp.process(catq, catq, catp)
@timer
def test_ggg_jk():
# Test jackknife and other covariance estimates for ggg correlations.
if __name__ == '__main__':
# This setup takes about 590 sec to run.
nhalo = 5000
nsource = 5000
npatch = 32
tol_factor = 1
elif False:
# This setup takes about 160 sec to run.
nhalo = 2000
nsource = 2000
npatch = 16
tol_factor = 2
elif False:
# This setup takes about 50 sec to run.
nhalo = 1000
nsource = 1000
npatch = 16
tol_factor = 3
else:
# This setup takes about 13 sec to run.
nhalo = 500
nsource = 500
npatch = 8
tol_factor = 3
# I couldn't figure out a way to get reasonable S/N in the shear field. I thought doing
# discrete halos would give some significant 3pt shear pattern, at least for equilateral
# triangles, but the signal here is still consistent with zero. :(
# The point is the variance, which is still calculated ok, but I would have rathered
# have something with S/N > 0.
# For these tests, I set up the binning to just accumulate all roughly equilateral triangles
# in a small separation range. The binning always uses two bins for each to get + and - v
# bins. So this function averages these two values to produce 1 value for each gamma.
f = lambda g: np.array([np.mean(g.gam0), np.mean(g.gam1), np.mean(g.gam2), np.mean(g.gam3)])
file_name = 'data/test_ggg_jk_{}.npz'.format(nsource)
print(file_name)
if not os.path.isfile(file_name):
nruns = 1000
all_gggs = []
rng1 = np.random.RandomState()
for run in range(nruns):
x, y, g1, g2, _ = generate_shear_field(nsource, nhalo, rng1)
# For some reason std(g2) is coming out about 1.5x larger than std(g1).
# Probably a sign of some error in the generate function, but I don't see it.
# For this purpose I think it doesn't really matter, but it's a bit odd.
print(run,': ',np.mean(g1),np.std(g1),np.mean(g2),np.std(g2))
cat = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2)
ggg = treecorr.GGGCorrelation(nbins=1, min_sep=20., max_sep=40.,
min_u=0.6, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.6, nvbins=1)
ggg.process(cat)
print(ggg.ntri.ravel())
print(f(ggg))
all_gggs.append(ggg)
all_ggg = np.array([f(ggg) for ggg in all_gggs])
mean_ggg = np.mean(all_ggg, axis=0)
var_ggg = np.var(all_ggg, axis=0)
np.savez(file_name, mean_ggg=mean_ggg, var_ggg=var_ggg)
data = np.load(file_name)
mean_ggg = data['mean_ggg']
var_ggg = data['var_ggg']
print('mean = ',mean_ggg)
print('var = ',var_ggg)
rng = np.random.RandomState(12345)
x, y, g1, g2, _ = generate_shear_field(nsource, nhalo, rng)
cat = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2)
ggg = treecorr.GGGCorrelation(nbins=1, min_sep=20., max_sep=40.,
min_u=0.6, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.6, nvbins=1, rng=rng)
ggg.process(cat)
print(ggg.ntri.ravel())
print(ggg.gam0.ravel())
print(ggg.gam1.ravel())
print(ggg.gam2.ravel())
print(ggg.gam3.ravel())
gggp = ggg.copy()
catp = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2, npatch=npatch)
# Do the same thing with patches.
gggp.process(catp)
print('with patches:')
print(gggp.ntri.ravel())
print(gggp.vargam0.ravel())
print(gggp.vargam1.ravel())
print(gggp.vargam2.ravel())
print(gggp.vargam3.ravel())
print(gggp.gam0.ravel())
print(gggp.gam1.ravel())
print(gggp.gam2.ravel())
print(gggp.gam3.ravel())
np.testing.assert_allclose(gggp.ntri, ggg.ntri, rtol=0.05 * tol_factor)
np.testing.assert_allclose(gggp.gam0, ggg.gam0, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(gggp.gam1, ggg.gam1, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(gggp.gam2, ggg.gam2, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(gggp.gam3, ggg.gam3, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(gggp.vargam0, ggg.vargam0, rtol=0.1 * tol_factor)
np.testing.assert_allclose(gggp.vargam1, ggg.vargam1, rtol=0.1 * tol_factor)
np.testing.assert_allclose(gggp.vargam2, ggg.vargam2, rtol=0.1 * tol_factor)
np.testing.assert_allclose(gggp.vargam3, ggg.vargam3, rtol=0.1 * tol_factor)
print('jackknife:')
cov = gggp.estimate_cov('jackknife', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.4*tol_factor)
print('sample:')
cov = gggp.estimate_cov('sample', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('marked:')
cov = gggp.estimate_cov('marked_bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.9*tol_factor)
print('bootstrap:')
cov = gggp.estimate_cov('bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.3*tol_factor)
# Now as a cross correlation with all 3 using the same patch catalog.
print('with 3 patched catalogs:')
gggp.process(catp, catp, catp)
print(gggp.gam0.ravel())
np.testing.assert_allclose(gggp.gam0, ggg.gam0, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(gggp.gam1, ggg.gam1, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(gggp.gam2, ggg.gam2, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(gggp.gam3, ggg.gam3, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
print('jackknife:')
cov = gggp.estimate_cov('jackknife', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.4*tol_factor)
print('sample:')
cov = gggp.estimate_cov('sample', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.6*tol_factor)
print('marked:')
cov = gggp.estimate_cov('marked_bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('bootstrap:')
cov = gggp.estimate_cov('bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.4*tol_factor)
# The separate patch/non-patch combinations aren't that interesting, so skip them
# for GGG unless running from main.
if __name__ == '__main__':
# Patch on 1 only:
print('with patches on 1 only:')
gggp.process(catp, cat)
print('jackknife:')
cov = gggp.estimate_cov('jackknife', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('sample:')
cov = gggp.estimate_cov('sample', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.7*tol_factor)
print('marked:')
cov = gggp.estimate_cov('marked_bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('bootstrap:')
cov = gggp.estimate_cov('bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
# Patch on 2 only:
print('with patches on 2 only:')
gggp.process(cat, catp, cat)
print('jackknife:')
cov = gggp.estimate_cov('jackknife', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('sample:')
cov = gggp.estimate_cov('sample', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.7*tol_factor)
print('marked:')
cov = gggp.estimate_cov('marked_bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('bootstrap:')
cov = gggp.estimate_cov('bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
# Patch on 3 only:
print('with patches on 3 only:')
gggp.process(cat, cat, catp)
print('jackknife:')
cov = gggp.estimate_cov('jackknife', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('sample:')
cov = gggp.estimate_cov('sample', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.7*tol_factor)
print('marked:')
cov = gggp.estimate_cov('marked_bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('bootstrap:')
cov = gggp.estimate_cov('bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.9*tol_factor)
# Patch on 1,2
print('with patches on 1,2:')
gggp.process(catp, catp, cat)
print('jackknife:')
cov = gggp.estimate_cov('jackknife', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.6*tol_factor)
print('sample:')
cov = gggp.estimate_cov('sample', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.6*tol_factor)
print('marked:')
cov = gggp.estimate_cov('marked_bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('bootstrap:')
cov = gggp.estimate_cov('bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.5*tol_factor)
# Patch on 2,3
print('with patches on 2,3:')
gggp.process(cat, catp)
print('jackknife:')
cov = gggp.estimate_cov('jackknife', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.6*tol_factor)
print('sample:')
cov = gggp.estimate_cov('sample', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('marked:')
cov = gggp.estimate_cov('marked_bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=1.0*tol_factor)
print('bootstrap:')
cov = gggp.estimate_cov('bootstrap', func=f)
print(np.diagonal(cov).real)
print('max | |
(ie. not logged) user can see a user's river.
"""
# request uA's river without loging in.
resp = self.client.get(reverse("user_river_sieve",
kwargs={"owner_name":"uA"}))
self.assertEqual(302,resp.status_code)
self.assertRegex(
resp["Location"],
reverse('user_login')
+ "\\?next="
+ reverse("user_river_sieve",
kwargs={"owner_name":"uA"}))
def test_post_json_pick_item_out_of_sieve(self):
"""
Make sure posting an item as read will remove it from the sieve.
"""
# login as uA and make sure it succeeds
self.assertTrue(self.client.login(username="uA",password="pA"))
# check presence of r1 reference
resp = self.client.get(reverse("user_river_sieve",
kwargs={"owner_name":"uA"}))
items = resp.context["oldest_unread_references"]
num_ref_r1 = [r.reference.url for r in items].count("http://r1")
self.assertLessEqual(1,num_ref_r1)
# mark the first reference as read.
resp = self.client.post(reverse("user_river_sieve",
kwargs={"owner_name":"uA"}),
json.dumps({"action":"read","references":["http://r1"]}),
content_type="application/json")
self.assertEqual(200,resp.status_code)
resp_dic = json.loads(resp.content)
self.assertEqual("read",resp_dic["action"])
self.assertEqual("success",resp_dic["status"])
self.assertLessEqual(num_ref_r1,resp_dic["count"])
# check absence of r1 reference
resp = self.client.get(reverse("user_river_sieve",
kwargs={"owner_name":"uA"}))
items = resp.context["oldest_unread_references"]
self.assertEqual(0,[r.reference.url for r in items].count("http://r1"))
def test_post_json_pick_several_items_out_of_sieve(self):
"""
Make sure posting a list of items as read will remove them from the sieve.
"""
# login as uA and make sure it succeeds
self.assertTrue(self.client.login(username="uA",password="pA"))
# check presence of r1 reference
resp = self.client.get(reverse("user_river_sieve",
kwargs={"owner_name":"uA"}))
items = resp.context["oldest_unread_references"]
num_ref_r1 = [r.reference.url for r in items].count("http://r1")
self.assertLessEqual(1,num_ref_r1)
num_ref_r3 = [r.reference.url for r in items].count("http://r3")
self.assertLessEqual(1,num_ref_r3)
# mark the first reference as read.
resp = self.client.post(reverse("user_river_sieve",
kwargs={"owner_name":"uA"}),
json.dumps({"action":"read",
"references":["http://r1","http://r3"]}),
content_type="application/json")
self.assertEqual(200,resp.status_code)
resp_dic = json.loads(resp.content)
self.assertEqual("read",resp_dic["action"])
self.assertEqual("success",resp_dic["status"])
self.assertLessEqual(num_ref_r1+num_ref_r3,resp_dic["count"])
# check absence of r1 reference
resp = self.client.get(reverse("user_river_sieve",
kwargs={"owner_name":"uA"}))
items = resp.context["oldest_unread_references"]
self.assertEqual(0,[r.reference.url for r in items].count("http://r1"))
self.assertEqual(0,[r.reference.url for r in items].count("http://r3"))
def test_post_json_drop_sieve_content(self):
"""
Make sure posting an item as read will remove it from the sieve.
"""
# login as uA and make sure it succeeds
self.assertTrue(self.client.login(username="uA",password="pA"))
# check presence of r1 reference
resp = self.client.get(reverse("user_river_sieve",
kwargs={"owner_name":"uA"}))
items = resp.context["oldest_unread_references"]
num_refs = len(items)
self.assertGreaterEqual(num_refs, 1)
# mark the first reference as read.
resp = self.client.post(reverse("user_river_sieve",
kwargs={"owner_name":"uA"}),
json.dumps({"action":"drop"}),
content_type="application/json")
self.assertEqual(200,resp.status_code)
resp_dic = json.loads(resp.content)
self.assertEqual("drop",resp_dic["action"])
self.assertEqual("success",resp_dic["status"])
self.assertLessEqual(num_refs,resp_dic["count"])
# check emptyness of sieve
resp = self.client.get(reverse("user_river_sieve",
kwargs={"owner_name":"uA"}))
items = resp.context["oldest_unread_references"]
self.assertEqual(0,len(items))
def test_post_malformed_json_returns_error(self):
"""
Make sure when the json is malformed an error that is not a server error is returned.
"""
# login as uA and make sure it succeeds
self.assertTrue(self.client.login(username="uA",password="pA"))
# mark a of uB reference as read.
resp = self.client.post(reverse("user_river_sieve",
kwargs={"owner_name":"uA"}),
"action=read,references=(http://r1)",
content_type="application/json")
self.assertEqual(400,resp.status_code)
def test_post_json_for_non_owner_logged_user_is_forbidden(self):
"""
Make sure when the json is malformed an error that is not a server error is returned.
"""
# login as uA and make sure it succeeds
self.assertTrue(self.client.login(username="uA",password="pA"))
# mark a of uB reference as read.
resp = self.client.post(reverse("user_river_sieve",
kwargs={"owner_name":"uB"}),
json.dumps({"action":"read","references":["http://r2"]}),
content_type="application/json")
self.assertEqual(403,resp.status_code)
def test_post_json_for_anonymous_redirects(self):
"""
Make sure an anonymous (ie. not logged) user can see a user's river.
"""
resp = self.client.post(reverse("user_river_sieve",
kwargs={"owner_name":"uA"}),
json.dumps({"action":"read","references":["http://r1"]}),
content_type="application/json")
self.assertEqual(302,resp.status_code)
class UserSourcesViewTest(TestCase):
def setUp(self):
# Create 2 users and 3 feed sources (1 exclusive to each and a
# shared one) and 3 non-feed sources.
self.user1 = User.objects.create_user(username="uA",password="pA")
user1_profile = UserProfile.objects.create(owner=self.user1)
self.user2 = User.objects.create_user(username="uB",password="pB")
user2_profile = UserProfile.objects.create(owner=self.user2)
date = datetime.now(timezone.utc)
r1 = Reference.objects.create(url="http://mouf",title="f1",pub_date=date)
f1 = WebFeed.objects.create(xmlURL="http://mouf/rss.xml",
last_update_check=date,
source=r1)
r2 = Reference.objects.create(url="http://bla",title="f2",pub_date=date)
f2 = WebFeed.objects.create(xmlURL="http://bla/rss.xml",
last_update_check=date,
source=r2)
r3 = Reference.objects.create(url="http://greuh",title="f3",pub_date=date)
f3 = WebFeed.objects.create(xmlURL="http://greuh/rss.xml",
last_update_check=date,
source=r3)
user1_profile.web_feeds.add(f1)
user1_profile.web_feeds.add(f3)
user2_profile.web_feeds.add(f2)
user2_profile.web_feeds.add(f3)
user1_profile.sources.add(r1)
user1_profile.sources.add(r3)
user2_profile.sources.add(r2)
user2_profile.sources.add(r3)
# also add plain sources
s1 = Reference.objects.create(url="http://s1",title="s1",pub_date=date)
s2 = Reference.objects.create(url="http://s2",title="s2",pub_date=date)
s3 = Reference.objects.create(url="http://s3",title="s3",pub_date=date)
user1_profile.sources.add(s1)
user1_profile.public_sources.add(s1)
user1_profile.sources.add(s3)
user2_profile.sources.add(s2)
user2_profile.public_sources.add(s2)
user2_profile.sources.add(s3)
def test_get_html_for_owner_returns_separate_source_and_feed(self):
"""
Make sure a user can see its sources in two categories.
"""
# login as uA and make sure it succeeds
self.assertTrue(self.client.login(username="uA",password="pA"))
# request uA's river
resp = self.client.get(reverse("user_river_sources",
kwargs={"owner_name":"uA"}))
self.assertEqual(200,resp.status_code)
self.assertIn("sources.html",[t.name for t in resp.templates])
self.assertIn("source_add_bookmarklet", resp.context)
self.assertIn("tagged_web_feeds", resp.context)
self.assertIn("other_sources", resp.context)
items = resp.context["other_sources"]
sourceNames = set([int(s.title[1]) for s in items])
self.assertEqual(sourceNames,set((1,3)))
sourceTypes = set([s.title[0] for s in items])
self.assertEqual(set(("s",)),sourceTypes)
feed_items = resp.context["tagged_web_feeds"]
feedNames = set([int(s.source.title[1]) for s in feed_items])
self.assertEqual(feedNames,set((1,3)))
feedTypes = set([s.source.title[0] for s in feed_items])
self.assertEqual(set(("f",)),feedTypes)
feedTags = set([s.main_tag_name for s in feed_items])
self.assertEqual(set(("",)),feedTags)
def test_get_html_for_non_owner_logged_user_returns_public_source_only(self):
"""
Make sure a logged in user can see another user's sources.
"""
# login as uA and make sure it succeeds
self.assertTrue(self.client.login(username="uA",password="pA"))
# request uB's river
resp = self.client.get(reverse("user_river_sources",
kwargs={"owner_name":"uB"}))
self.assertEqual(200,resp.status_code)
self.assertIn("sources.html",[t.name for t in resp.templates])
self.assertIn("source_add_bookmarklet", resp.context)
self.assertIn("tagged_web_feeds", resp.context)
self.assertIn("other_sources", resp.context)
items = resp.context["other_sources"]
sourceNames = set([int(s.title[1]) for s in items])
self.assertEqual(sourceNames,set((2,)))
sourceTypes = set([s.title[0] for s in items])
self.assertEqual(set(("s",)),sourceTypes)
# All feeds being systematically public they should all be
# visible (NB: in practice the app guarantees that a source
# associated to a feed is always public which is not the case
# here with s3)
feed_items = resp.context["tagged_web_feeds"]
feedNames = set([int(s.source.title[1]) for s in feed_items])
self.assertEqual(feedNames,set((2,3)))
feedTypes = set([s.source.title[0] for s in feed_items])
self.assertEqual(set(("f",)),feedTypes)
def test_get_html_for_anonymous_returns_all_sources(self):
"""
Make sure an anonymous user can see users' sources.
"""
# request uA's river
resp = self.client.get(reverse("user_river_sources",
kwargs={"owner_name":"uA"}))
self.assertEqual(200,resp.status_code)
self.assertIn("sources.html",[t.name for t in resp.templates])
self.assertIn("source_add_bookmarklet", resp.context)
self.assertIn("tagged_web_feeds", resp.context)
self.assertIn("other_sources", resp.context)
items = resp.context["other_sources"]
sourceNames = set([int(s.title[1]) for s in items])
self.assertEqual(sourceNames,set((1,)))
sourceTypes = set([s.title[0] for s in items])
self.assertEqual(set(("s",)),sourceTypes)
# All feeds being systematically public they should all be
# visible (NB: in practice the app guarantees that a source
# associated to a feed is always public which is not the case
# here with s3)
feed_items = resp.context["tagged_web_feeds"]
feedNames = set([int(s.source.title[1]) for s in feed_items])
self.assertEqual(feedNames,set((1,3)))
feedTypes = set([s.source.title[0] for s in feed_items])
self.assertEqual(set(("f",)),feedTypes)
def test_get_opml_for_anonymous_returns_all_sources(self):
"""
Make sure an anonymous user can see users' sources as OPML.
"""
# request uA's river
resp = self.client.get(reverse("user_river_sources",
kwargs={"owner_name":"uA"})+"?format=opml")
self.assertEqual(200,resp.status_code)
self.assertIn("sources_opml.xml",[t.name for t in resp.templates])
self.assertIn("tagged_web_feeds", resp.context)
# All feeds being systematically public they should all be
# visible (NB: in practice the app guarantees that a source
# associated to a feed is always public which is not the case
# here with s3)
feed_items = resp.context["tagged_web_feeds"]
feedNames = set([int(s.source.title[1]) for s in feed_items])
self.assertEqual(feedNames,set((1,3)))
feedTypes = set([s.source.title[0] for s in feed_items])
self.assertEqual(set(("f",)),feedTypes)
class ReferenceUserStatusModelTest(TestCase):
def setUp(self):
self.date = datetime.now(timezone.utc)
self.reference = Reference.objects.create(url="http://mouf",
title="glop",
pub_date=self.date)
self.user = User.objects.create(username="name")
def test_construction_defaults(self):
"""
This tests just makes it possible to double check that a
change in the default is voluntary.
"""
s = Reference.objects.create(url="http://source",title="source",pub_date=self.date)
rust = ReferenceUserStatus.objects.create(reference=self.reference,
owner=self.user,
reference_pub_date=self.date,
main_source=s)
self.assertFalse(rust.has_been_read)
self.assertFalse(rust.has_been_saved)
def test_disappear_when_reference_is_cleaned(self):
src = self.reference
ref = Reference(url="http://source",title="other",pub_date=self.date)
ref.save()
rust = ReferenceUserStatus(reference=ref,
owner=self.user,
reference_pub_date=self.date,
main_source=src)
rust.save()
self.assertTrue(Reference.objects.filter(title="other").exists())
self.assertTrue(ReferenceUserStatus.objects.filter(main_source=src).exists())
Reference.objects.filter(title="other").delete()
self.assertFalse(Reference.objects.filter(title="other").exists())
self.assertFalse(ReferenceUserStatus.objects.filter(main_source=src).exists())
class UserSourceItemViewTest(TestCase):
"""Test the single source view."""
def setUp(self):
self.date = datetime.now(timezone.utc)
self.source = Reference.objects.create(
url="http://mouf",
title="a mouf",
pub_date=self.date)
self.user = User.objects.create_user(username="uA",
password="pA")
self.feed_source = Reference.objects.create(url="http://barf",
title="a barf",
pub_date=self.date)
self.web_feed = WebFeed.objects.create(
xmlURL="http://barf/bla.xml",
last_update_check=self.date,
source=self.feed_source)
self.user_profile = UserProfile.objects.create(owner=self.user)
self.user_profile.sources.add(self.source)
self.user_profile.sources.add(self.feed_source)
self.user_profile.web_feeds.add(self.web_feed)
self.other_user = User.objects.create_user(username="uB",
password="pB")
def change_request(self,username,source_url,optionsDict,expectedStatusCode=200):
"""
Send the request as a JSON loaded POST.
"""
url_code = build_safe_code_from_url(source_url)
resp = self.client.post(reverse("user_river_source_item",
kwargs={"owner_name": username,
"source_url_code": url_code}),
json.dumps(optionsDict),
content_type="application/json")
self.assertEqual(expectedStatusCode,resp.status_code)
return resp
def test_get_html_user_source(self):
# login as uA and make sure it succeeds
self.assertTrue(self.client.login(username="uA",password="pA"))
url_code = build_safe_code_from_url(self.source.url)
resp = self.client.get(reverse("user_river_source_item",
kwargs={
"owner_name": "uA",
"source_url_code": url_code}))
self.assertEqual(200,resp.status_code)
self.assertIn("source_edit.html",[t.name for t in resp.templates])
self.assertIn("ref_form", resp.context)
self.assertIn("feed_forms", resp.context)
self.assertEqual(0, len(resp.context["feed_forms"]))
self.assertIn("ref_url", resp.context)
self.assertEqual(self.source.url, resp.context["ref_url"])
self.assertIn("ref_title", resp.context)
self.assertEqual(self.source.title, resp.context["ref_title"])
def test_get_html_other_user_source_is_forbidden(self):
self.assertTrue(self.client.login(username="uB",password="pB"))
url_code = build_safe_code_from_url(self.source.url)
resp = self.client.get(reverse("user_river_source_item",
kwargs={
"owner_name": "uA",
"source_url_code": url_code}))
self.assertEqual(403,resp.status_code)
def test_get_html_user_source_with_feed_has_feed_forms_filled(self):
# login as uA and make sure it succeeds
self.assertTrue(self.client.login(username="uA",password="pA"))
url_code = build_safe_code_from_url(self.feed_source.url)
resp = self.client.get(reverse("user_river_source_item",
kwargs={
"owner_name": "uA",
"source_url_code": url_code}))
self.assertEqual(200,resp.status_code)
self.assertIn("feed_forms", resp.context)
self.assertEqual(1, len(resp.context["feed_forms"]))
self.assertEqual(self.web_feed.xmlURL,list(resp.context["feed_forms"].keys())[0])
def test_change_user_source_title_updates_title_in_db(self):
# login as uA and make sure it succeeds
self.assertTrue(self.client.login(username="uA",password="pA"))
newTitle = self.source.title + "MOUF"
self.change_request("uA",self.source.url,
{"ref-title": newTitle,
"ref-description": "blah"}, 302)
self.assertEqual(newTitle, Reference.objects.get(url=self.source.url).title)
def test_change_user_source_title_updates_dont_mess_subscriptions(self):
# login as uA and make sure it succeeds
self.assertTrue(self.client.login(username="uA",password="pA"))
formerFeedCount = self.user_profile.web_feeds.count()
self.change_request("uA",self.feed_source.url,
{"ref-title": self.feed_source.title+"MOUF",
"ref-description": "blah"}, 302)
self.assertEqual(formerFeedCount, self.user_profile.web_feeds.count())
def test_unsubscribe_from_feed(self):
# login as uA and make sure it succeeds
self.assertTrue(self.client.login(username="uA",password="pA"))
self.assertEqual(1,self.user_profile.web_feeds.count())
self.change_request("uA",self.feed_source.url,
{"feed0-follow": False}, 302)
self.assertEqual(0, self.user_profile.web_feeds.count())
self.assertEqual(1, WebFeed.objects.filter(xmlURL=self.web_feed.xmlURL).count())
def test_subscribe_twice_does_not_add_twice_the_feed(self):
# login as uA and make sure it succeeds
self.assertTrue(self.client.login(username="uA",password="pA"))
self.assertEqual(1,self.user_profile.web_feeds.count())
self.change_request("uA",self.feed_source.url,
{"feed0-follow": True}, 302)
self.assertEqual(1, self.user_profile.web_feeds.count())
self.change_request("uA",self.feed_source.url,
{"feed0-follow": True}, 302)
self.assertEqual(1, self.user_profile.web_feeds.count())
self.assertEqual(1, WebFeed.objects.filter(xmlURL=self.web_feed.xmlURL).count())
def test_collate_feed(self):
# login as uA and make sure it succeeds
self.assertTrue(self.client.login(username="uA",password="pA"))
self.assertEqual(0, self.user_profile.collating_feeds.count())
self.change_request("uA",self.feed_source.url,
{"feed0-collate": True}, 302)
self.assertEqual(1, self.user_profile.collating_feeds.count())
self.assertEqual(1, WebFeedCollation.objects.filter(feed=self.web_feed).count())
def test_collate_twice_does_not_add_2_collations(self):
# login as uA and make sure it succeeds
self.assertTrue(self.client.login(username="uA",password="pA"))
self.assertEqual(0, self.user_profile.collating_feeds.count())
self.change_request("uA",self.feed_source.url,
{"feed0-collate": True}, 302)
self.assertEqual(1, self.user_profile.collating_feeds.count())
self.change_request("uA",self.feed_source.url,
{"feed0-collate": | |
to identify the main channel at each divergence:
• A 50-meter buffer polygon is drawn around each flowline feature. A flat end-cap is used, so that only areas perpendicular to the flowlines are included in each buffer.
• Zonal statistics for the lidar-based DEM values within each buffer polygon are computed using the `rasterstats python package <https://pythonhosted.org/rasterstats/>`_. The tenth percentile elevation is selected as a metric for discriminating between the main channel and minor distributaries. Lower elevation percentiles would be more likely to represent areas of overlap between the buffers for the main channel and minor distributaries (resulting in minor distributary values that are similar to the main channel), while higher elevation percentiles might miss the lowest parts of the main channel or even represent parts of the channel banks instead.
• At each divergence, the distributary with the lowest tenth percentile elevation is assumed to be the main channel.
In the MAP region, comparison of the sampled DEM values with the NHDPlus elevation attribute data revealed a high bias in many of the attribute values, especially in the vicinity of diversions. This may be a result of the upstream smoothing process described by McKay and others (2012, p 123) when it encounters distributaries of unequal values such as the example shown in Figure 5. To remedy this issue, the 10th percentile values obtained from the buffer zonal statistics were assigned to each flowline, and then smoothed in the downstream direction to ensure that no flowlines had negative (uphill) slopes.
Finally, routing connections to minor distributaries are removed, and arbolate sums recomputed for the entire stream network, with arbolate sums at minor distributaries starting at zero. In this way, the minor distributaries are treated like headwater streams in that they will only receive flow if the water table is greater than their assigned elevation, otherwise they are simulated as dry and are not part of the groundwater model solution. Similar to :func:`~sfrmaker.preprocessing.cull_flowlines`, the first ``asum_thresh`` km of minor distributaries are trimmed from the stream network.
If a shapefile is specified for the ``narwidth_shapefile`` argument, the :func:`~sfrmaker.preprocessing.sample_narwidth` function is called.
"""
# check that all the input files exist
files_list = [flowlines_file,
pfvaa_file,
pf_file,
elevslope_file,
]
if run_zonal_statistics:
files_list.append(demfile)
if narwidth_shapefile is not None:
if waterbody_shapefiles is None:
raise ValueError("NARWidth option ")
else:
if isinstance(waterbody_shapefiles, str):
waterbody_shapefiles = [waterbody_shapefiles]
files_list += waterbody_shapefiles
for f in files_list:
assert os.path.exists(f), "missing {}".format(f)
if known_connections is None:
known_connections = {}
if logger is None:
logger = Logger()
logger.log('Preprocessing Flowlines')
# read NHDPlus files into pandas dataframes
for f in [flowlines_file, pfvaa_file, pf_file, elevslope_file]:
logger.log_file_and_date_modified(f)
# get the flowline CRS, if geographic,
# verify that project_crs is specified
prjfile = os.path.splitext(flowlines_file)[0] + '.prj'
if os.path.exists(prjfile):
flowline_crs = get_shapefile_crs(prjfile)
else:
msg = ("{} not found; flowlines must have a valid projection file."
.format(prjfile))
logger.lraise(msg)
if project_epsg is not None and dest_crs is None:
project_crs = get_crs(epsg=project_epsg, crs=dest_crs)
if flowline_crs.is_geographic:
if project_crs is None or project_crs.is_geographic:
msg = ("project_epsg for a valid Projected CRS (i.e. in units of meters)\n"
" must be specified if flowlines are in a Geographic CRS\n"
"specified project_epsg: {}".format(project_epsg))
logger.lraise(msg)
# get bounds of flowlines
with fiona.open(flowlines_file) as src:
flowline_bounds = src.bounds
fl = shp2df(flowlines_file) # flowlines clipped to model area
pfvaa = shp2df(pfvaa_file)
pf = shp2df(pf_file)
elevslope = shp2df(elevslope_file)
# index dataframes by common-identifier numbers
pfvaa.index = pfvaa.ComID
pf.index = pf.FROMCOMID
elevslope.index = elevslope.COMID
fl.index = fl.COMID
# subset attribute tables to clipped flowlines
pfvaa = pfvaa.loc[fl.index]
pf = pf.loc[fl.index]
elevslope = elevslope.loc[fl.index]
# to_crs the flowlines if they are not in project_crs
if project_crs is not None and flowline_crs is not None and project_crs != flowline_crs:
fl['geometry'] = project(fl.geometry, flowline_crs, project_crs)
# option to reuse shapefile from previous run instead of re-running zonal statistics
# which can take an hour for large problems
if run_zonal_statistics:
assert Path(demfile).exists(), \
"If run_zonal_statistics=True (default), a demfile is needed."
# draw buffers
flbuffers = [g.buffer(buffersize_meters, cap_style=2) # 2 (flat cap) very important!
for g in fl.geometry]
# Create buffer around flowlines with flat cap, so that ends are flush with ends of lines
# compute zonal statistics on buffer
logger.log('Creating buffers and running zonal statistics')
logger.log_package_version('rasterstats')
logger.statement('buffersize: {} m'.format(buffersize_meters), log_time=False)
logger.log_file_and_date_modified(demfile, prefix='DEM file: ')
# if DEM has different crs, project buffer polygons to DEM crs
with rasterio.open(demfile) as src:
meta = src.meta
dem_crs = get_authority_crs(meta['crs'])
dem_res = src.res[0]
flbuffers_pr = flbuffers
if project_crs is not None and dem_crs != project_crs:
flbuffers_pr = project(flbuffers, project_crs, dem_crs)
# run zonal statistics on buffers
# this step takes at least ~ 20 min for the full 1-mi MERAS model
# with large cell sizes, count all cells that are touched by each buffer
# (not just the cell centers that are intersected)
all_touched = False
if buffersize_meters < dem_res:
all_touched = True
results = zonal_stats(flbuffers_pr,
demfile,
stats=['min', 'mean', 'std',
'percentile_1', 'percentile_10',
'percentile_20', 'percentile_80'],
all_touched=all_touched)
#results = {'mean': np.zeros(len(fl)),
# 'min': np.zeros(len(fl)),
# 'percentile_10': np.zeros(len(fl)),
# 'percentile_20': np.zeros(len(fl)),
# 'percentile_80': np.zeros(len(fl))}
df = pd.DataFrame(results)
# warn if there are more than 10% nan values
n_nan = df.isna().any(axis=1).sum()
pct_nan = n_nan/len(df)
if pct_nan > 0.1:
logger.warn("Warning: {} ({:.1%}) of sampled DEM values are NaNs. "
"Check the extent and resolution of {}".format(n_nan, pct_nan, demfile))
dem_units_to_output_units = convert_length_units(dem_length_units, output_length_units)
fl['mean'] = df['mean'].values * dem_units_to_output_units
fl['min'] = df['min'].values * dem_units_to_output_units
fl['std'] = df['std'].values * dem_units_to_output_units
fl['pct01'] = df.percentile_1.values * dem_units_to_output_units
fl['pct10'] = df.percentile_10.values * dem_units_to_output_units
fl['pct20'] = df.percentile_20.values * dem_units_to_output_units
fl['pct80'] = df.percentile_80.values * dem_units_to_output_units
fl['buffpoly'] = flbuffers
logger.log('Creating buffers and running zonal statistics')
# write a shapefile of the flowline buffers for GIS visualization
logger.statement('Writing shapefile of buffers used to determine distributary routing...')
flccb = fl.copy()
flccb['geometry'] = flccb.buffpoly
df2shp(flccb.drop('buffpoly', axis=1),
os.path.join(outfolder, 'flowlines_gt{:.0f}km_buffers.shp'.format(asum_thresh)),
index=False, epsg=project_epsg)
else:
assert Path(flowline_elevations_file).exists(), \
("If run_zonal_statistics=False a flowline_elevations_file produced by"
"a previous run of the sfrmaker.preprocessing.preprocess_nhdplus() "
"function is needed.")
flccb = shp2df(flowline_elevations_file)
flccb.index = flccb['COMID']
flccb['buffpoly'] = flccb['geometry']
merge_cols = [c for c in flccb.columns if c not in fl.columns]
fl = fl.join(flccb[merge_cols])
# cull COMIDS with invalid values
minelev = -10
logger.statement('Culling COMIDs with smoothed values < {} cm'.format(minelev))
badstrtop = (elevslope.MAXELEVSMO < minelev) | (elevslope.MINELEVSMO < minelev)
badstrtop_comids = elevslope.loc[badstrtop].COMID.values
badstrtop = [True if c in badstrtop_comids else False for c in fl.COMID]
flcc = fl.loc[~np.array(badstrtop)].copy()
# add some attributes from pfvaa file
flcc['Divergence'] = pfvaa.loc[flcc.index, 'Divergence']
flcc['LevelPathI'] = pfvaa.loc[flcc.index, 'LevelPathI']
flcc['nhd_asum'] = pfvaa.loc[flcc.index, 'ArbolateSu']
# dictionary with routing info by COMID
graph = make_graph(pf.FROMCOMID.values, pf.TOCOMID.values)
in_model = set(fl.COMID)
graph = {k: v for k, v in graph.items() if k in in_model}
# use the 10th percentile from zonal_statistics for setting end elevation of each flowline
# (hopefully distinguishes flowlines that run along channels vs.
# those perpendicular to channels that route across upland areas)
elevcol = 'pct10'
# use zonal statistics elevation to determine routing at divergences
# (many of these do not appear to be coded correctly in NHDPlus)
# route to the segment with the lowest 20th percentile elevation
logger.log(f'Determining routing at divergences using elevations sampled from {demfile}')
txt = 'Primary distributary determined from lowest {}th percentile '.format(elevcol[-2:]) +\
'elevation value among distributaries at the confluence.\n'
# ensure these connections between comids
# fromcomid: tocomid
txt += 'Pre-determined routing at divergences (known_connections):\n'
for k, v in known_connections.items():
txt += '{} --> {}\n'.format(k, v)
logger.statement(txt)
# dictionary of values for selecting main channel at diversions
valid_comids = set(flcc.index)
div_elevs = dict(zip(flcc.COMID, flcc[elevcol]))
tocomids = {}
diversionminorcomids = set()
for k, v in graph.items():
# limit distributaries to those still in the dataset
v = v.intersection(valid_comids)
# known connections have to be handled first
if k in known_connections.keys():
# primary dist.
tocomids[k] = known_connections[k]
# update minorcomids with minor distribs.
diversionminorcomids.update(v.difference({tocomids[k]}))
# comid routes to only one comid
# | |
arguments.
revision = int(request.args.get('revision',
ts.Machine.DEFAULT_BASELINE_REVISION))
field = fields.get(request.args.get('field', None), metric_fields[0])
# Get the list of all runs we might be interested in.
recent_runs = session.query(ts.Run) \
.filter(ts.Run.start_time > yesterday) \
.all()
# Aggregate the runs by machine.
recent_runs_by_machine = multidict.multidict()
for run in recent_runs:
recent_runs_by_machine[run.machine] = run
# Get a sorted list of recent machines.
recent_machines = sorted(recent_runs_by_machine.keys(),
key=lambda m: m.name)
# We use periods in our machine names. css does not like this
# since it uses periods to demark classes. Thus we convert periods
# in the names of our machines to dashes for use in css. It is
# also convenient for our computations in the jinja page to have
# access to
def get_machine_keys(m):
m.css_name = m.name.replace('.', '-')
return m
recent_machines = list(map(get_machine_keys, recent_machines))
# For each machine, build a table of the machine, the baseline run, and the
# most recent run. We also computed a list of all the runs we are reporting
# over.
machine_run_info = []
reported_run_ids = []
for machine in recent_machines:
runs = recent_runs_by_machine[machine]
# Get the baseline run for this machine.
baseline = machine.get_closest_previously_reported_run(session,
revision)
# Choose the "best" run to report on. We want the most recent one with
# the most recent order.
run = max(runs, key=lambda r: (r.order, r.start_time))
if baseline:
machine_run_info.append((baseline, run))
reported_run_ids.append(baseline.id)
reported_run_ids.append(run.id)
# Get the set all tests reported in the recent runs.
reported_tests = session.query(ts.Test.id, ts.Test.name).filter(
sqlalchemy.sql.exists('*', sqlalchemy.sql.and_(
ts.Sample.run_id.in_(reported_run_ids),
ts.Sample.test_id == ts.Test.id))).all()
# Load all of the runs we are interested in.
runinfo = lnt.server.reporting.analysis.RunInfo(session, ts,
reported_run_ids)
# Build the test matrix. This is a two dimensional table index by
# (machine-index, test-index), where each entry is the percent change.
test_table = []
for i, (test_id, test_name) in enumerate(reported_tests):
# Create the row, starting with the test name and worst entry.
row = [(test_id, test_name), None]
# Compute comparison results for each machine.
row.extend((runinfo.get_run_comparison_result(
run, baseline, test_id, field,
ts.Sample.get_hash_of_binary_field),
run.id)
for baseline, run in machine_run_info)
# Compute the worst cell value.
if len(row) > 2:
row[1] = max(cr.pct_delta
for cr, _ in row[2:])
test_table.append(row)
# Order the table by worst regression.
test_table.sort(key=lambda row: 0 if row[1] is None else row[1], reverse=True)
return render_template("v4_global_status.html",
tests=test_table,
machines=recent_machines,
fields=metric_fields,
selected_field=field,
selected_revision=revision,
**ts_data(ts))
@v4_route("/daily_report")
def v4_daily_report_overview():
# Redirect to the report for the most recent submitted run's date.
session = request.session
ts = request.get_testsuite()
# Get the latest run.
latest = session.query(ts.Run).\
order_by(ts.Run.start_time.desc()).limit(1).first()
# If we found a run, use it's start time.
if latest:
date = latest.start_time
else:
# Otherwise, just use today.
date = datetime.date.today()
extra_args = request.args.copy()
extra_args.pop("year", None)
extra_args.pop("month", None)
extra_args.pop("day", None)
return v4_redirect(v4_url_for(".v4_daily_report",
year=date.year, month=date.month, day=date.day,
**extra_args))
@v4_route("/daily_report/<int:year>/<int:month>/<int:day>")
def v4_daily_report(year, month, day):
num_days_str = request.args.get('num_days')
if num_days_str is not None:
num_days = int(num_days_str)
else:
num_days = 3
day_start_str = request.args.get('day_start')
if day_start_str is not None:
day_start = int(day_start_str)
else:
day_start = 16
filter_machine_regex = request.args.get('filter-machine-regex')
ts = request.get_testsuite()
# Create the report object.
report = lnt.server.reporting.dailyreport.DailyReport(
ts, year, month, day, num_days, day_start,
filter_machine_regex=filter_machine_regex)
# Build the report.
try:
report.build(request.session)
except ValueError:
return abort(400)
return render_template("v4_daily_report.html", report=report,
analysis=lnt.server.reporting.analysis,
**ts_data(ts))
###
# Cross Test-Suite V4 Views
def get_summary_config_path():
return os.path.join(current_app.old_config.tempDir,
'summary_report_config.json')
@db_route("/summary_report/edit", methods=('GET', 'POST'))
def v4_summary_report_ui():
# If this is a POST request, update the saved config.
session = request.session
if request.method == 'POST':
# Parse the config data.
config_data = request.form.get('config')
config = flask.json.loads(config_data)
# Write the updated config.
with open(get_summary_config_path(), 'w') as f:
flask.json.dump(config, f, indent=2)
# Redirect to the summary report.
return v4_redirect(db_url_for(".v4_summary_report"))
config_path = get_summary_config_path()
if os.path.exists(config_path):
with open(config_path) as f:
config = flask.json.load(f)
else:
config = {
"machine_names": [],
"orders": [],
"machine_patterns": [],
}
# Get the list of available test suites.
testsuites = request.get_db().testsuite.values()
# Gather the list of all run orders and all machines.
def to_key(name):
first = name.split('.', 1)[0]
if first.isdigit():
return (int(first), name)
return (first, name)
all_machines = set()
all_orders = set()
for ts in testsuites:
for name, in session.query(ts.Machine.name):
all_machines.add(name)
for name, in session.query(ts.Order.llvm_project_revision):
all_orders.add(name)
all_machines = sorted(all_machines)
all_orders = sorted(all_orders, key=to_key)
return render_template("v4_summary_report_ui.html",
config=config, all_machines=all_machines,
all_orders=all_orders, **ts_data(ts))
@v4_route("/latest_runs_report")
def v4_latest_runs_report():
session = request.session
ts = request.get_testsuite()
num_runs_str = request.args.get('num_runs')
if num_runs_str is not None:
num_runs = int(num_runs_str)
else:
num_runs = 10
report = lnt.server.reporting.latestrunsreport.LatestRunsReport(ts, num_runs)
report.build(request.session)
return render_template("v4_latest_runs_report.html", report=report,
analysis=lnt.server.reporting.analysis,
**ts_data(ts))
@db_route("/summary_report")
def v4_summary_report():
session = request.session
# Load the summary report configuration.
config_path = get_summary_config_path()
if not os.path.exists(config_path):
return render_template("error.html", message="""\
You must define a summary report configuration first.""")
with open(config_path) as f:
config = flask.json.load(f)
# Create the report object.
report = lnt.server.reporting.summaryreport.SummaryReport(
request.get_db(), config['orders'], config['machine_names'],
config['machine_patterns'])
# Build the report.
report.build(session)
if bool(request.args.get('json')):
json_obj = dict()
json_obj['ticks'] = report.report_orders
data = []
for e in report.normalized_data_table.items():
header, samples = e
raw_samples = samples.getvalue()
data.append([header, raw_samples])
json_obj['data'] = data
return flask.jsonify(**json_obj)
return render_template("v4_summary_report.html", report=report)
@frontend.route('/rules')
def rules():
discovered_rules = lnt.server.db.rules_manager.DESCRIPTIONS
return render_template("rules.html", rules=discovered_rules)
@frontend.route('/log')
def log():
with open(current_app.config['log_file_name'], 'r') as f:
log_lines = f.readlines()
r'2017-07-21 15:02:15,143 ERROR:'
return render_template("log.html", log_lines=log_lines)
@frontend.route('/debug')
def debug():
assert not current_app.debug
@frontend.route('/__health')
def health():
"""Our instance health. If queue is too long or we use too much mem,
return 500. Monitor might reboot us for this."""
is_bad_state = False
msg = "Ok"
import resource
stats = resource.getrusage(resource.RUSAGE_SELF)
mem = stats.ru_maxrss
if mem > 1024**3:
is_bad_state = True
msg = "Over memory " + str(mem) + ">" + str(1024**3)
if is_bad_state:
return msg, 500
return msg, 200
@v4_route("/search")
def v4_search():
def _isint(i):
try:
int(i)
return True
except Exception:
return False
session = request.session
ts = request.get_testsuite()
query = request.args.get('q')
l_arg = request.args.get('l', 8)
default_machine = request.args.get('m', None)
assert query
results = lnt.server.db.search.search(session, ts, query,
num_results=l_arg,
default_machine=default_machine)
return json.dumps(
[('%s #%s' % (r.machine.name, r.order.llvm_project_revision),
r.id)
for r in results])
class MatrixDataRequest(object):
def __init__(self, machine, test, field):
self.machine = machine
self.test = test
self.field = field
def __repr__(self):
return "{}:{}({} samples)" \
.format(self.machine.name,
self.test.name,
len(self.samples) if self.samples else "No")
# How much data to render in the Matrix view.
MATRIX_LIMITS = [
('12', 'Small'),
('50', 'Medium'),
('250', 'Large'),
('-1', 'All'),
]
class MatrixOptions(Form):
limit = SelectField('Size', choices=MATRIX_LIMITS)
def baseline():
# type: () -> Optional[testsuitedb.Baseline]
"""Get the baseline object from the user's current session baseline value
or None if one is not defined.
"""
session = request.session
ts = request.get_testsuite()
base_id = flask.session.get(baseline_key(ts.name))
if not base_id:
return None
try:
base = session.query(ts.Baseline).get(base_id)
except NoResultFound:
return None
return base
@v4_route("/matrix", methods=['GET', 'POST'])
def v4_matrix():
"""A table view for Run sample data, because *some* people really
like to be able to see results textually.
request.args.limit limits the number of samples.
for each dataset to add, there will be a "plot.n=.m.b.f" where m is machine
ID, b is benchmark ID and f os field kind offset. "n" is used to unique
the paramters, and is ignored.
"""
session = request.session
ts = request.get_testsuite()
# Load the matrix request parameters.
form = MatrixOptions(request.form)
if request.method == 'POST':
post_limit = form.limit.data
else:
post_limit = MATRIX_LIMITS[0][0]
data_parameters = [] # type: List[MatrixDataRequest]
for name, value in request.args.items():
# plot.<unused>=<machine id>.<test id>.<field index>
if not name.startswith(str('plot.')):
continue
# Ignore the extra part of the key, it is unused.
machine_id_str, test_id_str, field_index_str = value.split('.')
try:
machine_id = int(machine_id_str)
test_id = int(test_id_str)
field_index = int(field_index_str)
except ValueError:
err_msg = "data {} was malformed. {} must be int.int.int"
return abort(400, err_msg.format(name, value))
if not (0 <= field_index < len(ts.sample_fields)):
return abort(404, "Invalid field index: {}".format(field_index))
try:
machine = session.query(ts.Machine) \
.filter(ts.Machine.id == machine_id) \
.one()
except NoResultFound:
return abort(404, "Invalid machine ID: {}".format(machine_id))
try:
test = session.query(ts.Test).filter(ts.Test.id == test_id).one()
except NoResultFound:
return abort(404, "Invalid test ID: {}".format(test_id))
try:
field = ts.sample_fields[field_index]
except NoResultFound:
return abort(404, "Invalid field_index: {}".format(field_index))
valid_request = MatrixDataRequest(machine, test, field)
data_parameters.append(valid_request)
if not data_parameters:
abort(404, "Request requires some data arguments.")
# Feature: if all of the results are from the same machine, hide the name
# to make the headers more compact.
dedup = True
for r in data_parameters:
if r.machine.id != data_parameters[0].machine.id:
dedup = False
if dedup:
machine_name_common = data_parameters[0].machine.name
machine_id_common = data_parameters[0].machine.id
else:
machine_name_common = machine_id_common | |
# Copyright 2016 IBM, Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""etcd storage backend."""
from datetime import datetime
import etcd
from oslo_concurrency import lockutils
from oslo_log import log
from oslo_serialization import jsonutils as json
from oslo_utils import strutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
import six
from zun.common import exception
from zun.common.i18n import _
from zun.common import singleton
import zun.conf
from zun.db.etcd import models
LOG = log.getLogger(__name__)
CONF = zun.conf.CONF
def get_connection():
connection = EtcdAPI(host=CONF.etcd.etcd_host,
port=CONF.etcd.etcd_port)
return connection
def clean_all_data():
conn = get_connection()
conn.clean_all_zun_data()
def add_identity_filter(query, value):
"""Adds an identity filter to a query.
Filters results by ID, if supplied value is a valid integer.
Otherwise attempts to filter results by UUID.
:param query: Initial query to add filter to.
:param value: Value for filtering results by.
:return: Modified query.
"""
if strutils.is_int_like(value):
return query.filter_by(id=value)
elif uuidutils.is_uuid_like(value):
return query.filter_by(uuid=value)
else:
raise exception.InvalidIdentity(identity=value)
def translate_etcd_result(etcd_result, model_type):
"""Translate etcd unicode result to etcd models."""
try:
data = json.loads(etcd_result.value)
ret = None
if model_type == 'container':
ret = models.Container(data)
elif model_type == 'zun_service':
ret = models.ZunService(data)
elif model_type == 'image':
ret = models.Image(data)
elif model_type == 'resource_class':
ret = models.ResourceClass(data)
elif model_type == 'compute_node':
ret = models.ComputeNode(data)
else:
raise exception.InvalidParameterValue(
_('The model_type value: %s is invalid.'), model_type)
return ret
except (ValueError, TypeError) as e:
LOG.error("Error occurred while translating etcd result: %s",
six.text_type(e))
raise
@six.add_metaclass(singleton.Singleton)
class EtcdAPI(object):
"""etcd API."""
def __init__(self, host, port):
self.client = etcd.Client(host=host, port=port)
@lockutils.synchronized('etcd-client')
def clean_all_zun_data(self):
try:
for d in self.client.read('/').children:
if d.key in ('/containers',):
self.client.delete(d.key, recursive=True)
except etcd.EtcdKeyNotFound as e:
LOG.error('Error occurred while cleaning zun data: %s',
six.text_type(e))
raise
def _add_tenant_filters(self, context, filters):
filters = filters or {}
if context.is_admin and context.all_tenants:
return filters
if context.project_id:
filters['project_id'] = context.project_id
else:
filters['user_id'] = context.user_id
return filters
def _filter_resources(self, resources, filters):
for c in list(resources):
for k, v in filters.items():
if c.get(k) != v:
resources.remove(c)
break
return resources
def _process_list_result(self, res_list, limit=None, sort_key=None):
if len(res_list) == 0:
return []
sorted_res_list = res_list
if sort_key:
if not hasattr(res_list[0], sort_key):
raise exception.InvalidParameterValue(
err='Container has no attribute: %s' % sort_key)
sorted_res_list = sorted(res_list, key=lambda k: k.get(sort_key))
if limit:
sorted_res_list = sorted_res_list[0:limit]
return sorted_res_list
def list_containers(self, context, filters=None, limit=None,
marker=None, sort_key=None, sort_dir=None):
try:
res = getattr(self.client.read('/containers'), 'children', None)
except etcd.EtcdKeyNotFound:
# Before the first container been created, path '/containers'
# does not exist.
return []
except Exception as e:
LOG.error(
"Error occurred while reading from etcd server: %s",
six.text_type(e))
raise
containers = []
for c in res:
if c.value is not None:
containers.append(translate_etcd_result(c, 'container'))
filters = self._add_tenant_filters(context, filters)
filtered_containers = self._filter_resources(
containers, filters)
return self._process_list_result(filtered_containers,
limit=limit, sort_key=sort_key)
def _validate_unique_container_name(self, context, name):
if not CONF.compute.unique_container_name_scope:
return
lowername = name.lower()
filters = {'name': name}
if CONF.compute.unique_container_name_scope == 'project':
filters['project_id'] = context.project_id
elif CONF.compute.unique_container_name_scope == 'global':
pass
else:
return
try:
containers = self.list_containers(context, filters=filters)
except etcd.EtcdKeyNotFound:
return
except Exception as e:
LOG.error('Error occurred while retrieving container: %s',
six.text_type(e))
raise
if len(containers) > 0:
raise exception.ContainerAlreadyExists(field='name',
value=lowername)
@lockutils.synchronized('etcd_container')
def create_container(self, context, container_data):
# ensure defaults are present for new containers
if not container_data.get('uuid'):
container_data['uuid'] = uuidutils.generate_uuid()
if container_data.get('name'):
self._validate_unique_container_name(context,
container_data['name'])
container = models.Container(container_data)
try:
container.save()
except Exception:
raise
return container
def get_container_by_uuid(self, context, container_uuid):
try:
res = self.client.read('/containers/' + container_uuid)
container = translate_etcd_result(res, 'container')
filtered_containers = self._filter_resources(
[container], self._add_tenant_filters(context, {}))
if len(filtered_containers) > 0:
return filtered_containers[0]
else:
raise exception.ContainerNotFound(container=container_uuid)
except etcd.EtcdKeyNotFound:
raise exception.ContainerNotFound(container=container_uuid)
except Exception as e:
LOG.error('Error occurred while retrieving container: %s',
six.text_type(e))
raise
def get_container_by_name(self, context, container_name):
try:
filters = self._add_tenant_filters(
context, {'name': container_name})
containers = self.list_containers(context, filters=filters)
except etcd.EtcdKeyNotFound:
raise exception.ContainerNotFound(container=container_name)
except Exception as e:
LOG.error('Error occurred while retrieving container: %s',
six.text_type(e))
raise
if len(containers) > 1:
raise exception.Conflict('Multiple containers exist with same '
'name. Please use the container uuid '
'instead.')
elif len(containers) == 0:
raise exception.ContainerNotFound(container=container_name)
return containers[0]
@lockutils.synchronized('etcd_container')
def destroy_container(self, context, container_uuid):
container = self.get_container_by_uuid(context, container_uuid)
self.client.delete('/containers/' + container.uuid)
@lockutils.synchronized('etcd_container')
def update_container(self, context, container_uuid, values):
# NOTE(yuywz): Update would fail if any other client
# write '/containers/$CONTAINER_UUID' in the meanwhile
if 'uuid' in values:
msg = _("Cannot overwrite UUID for an existing Container.")
raise exception.InvalidParameterValue(err=msg)
if 'name' in values:
self._validate_unique_container_name(context, values['name'])
try:
target_uuid = self.get_container_by_uuid(
context, container_uuid).uuid
target = self.client.read('/containers/' + target_uuid)
target_value = json.loads(target.value)
target_value.update(values)
target.value = json.dump_as_bytes(target_value)
self.client.update(target)
except etcd.EtcdKeyNotFound:
raise exception.ContainerNotFound(container=container_uuid)
except Exception as e:
LOG.error('Error occurred while updating container: %s',
six.text_type(e))
raise
return translate_etcd_result(target, 'container')
@lockutils.synchronized('etcd_zunservice')
def create_zun_service(self, values):
values['created_at'] = datetime.isoformat(timeutils.utcnow())
zun_service = models.ZunService(values)
zun_service.save()
return zun_service
def list_zun_services(self, filters=None, limit=None,
marker=None, sort_key=None, sort_dir=None):
try:
res = getattr(self.client.read('/zun_services'), 'children', None)
except etcd.EtcdKeyNotFound:
LOG.error(
("Path '/zun_services' does not exist, seems etcd server "
"was not been initialized appropriately for Zun."))
raise
except Exception as e:
LOG.error(
"Error occurred while reading from etcd server: %s",
six.text_type(e))
raise
services = []
for c in res:
if c.value is not None:
services.append(translate_etcd_result(c, 'zun_service'))
if filters:
services = self._filter_resources(services, filters)
return self._process_list_result(
services, limit=limit, sort_key=sort_key)
def list_zun_services_by_binary(self, binary):
services = self.list_zun_services(filters={'binary': binary})
return self._process_list_result(services)
def get_zun_service(self, host, binary):
try:
service = None
res = self.client.read('/zun_services/' + host + '_' + binary)
service = translate_etcd_result(res, 'zun_service')
except etcd.EtcdKeyNotFound:
raise exception.ZunServiceNotFound(host=host, binary=binary)
except Exception as e:
LOG.error('Error occurred while retrieving zun service: %s',
six.text_type(e))
raise
finally:
return service
@lockutils.synchronized('etcd_zunservice')
def destroy_zun_service(self, host, binary):
try:
self.client.delete('/zun_services/' + host + '_' + binary)
except etcd.EtcdKeyNotFound:
raise exception.ZunServiceNotFound(host=host, binary=binary)
except Exception as e:
LOG.error('Error occurred while destroying zun service: %s',
six.text_type(e))
raise
@lockutils.synchronized('etcd_zunservice')
def update_zun_service(self, host, binary, values):
try:
target = self.client.read('/zun_services/' + host + '_' + binary)
target_value = json.loads(target.value)
values['updated_at'] = datetime.isoformat(timeutils.utcnow())
target_value.update(values)
target.value = json.dump_as_bytes(target_value)
self.client.update(target)
except etcd.EtcdKeyNotFound:
raise exception.ZunServiceNotFound(host=host, binary=binary)
except Exception as e:
LOG.error('Error occurred while updating service: %s',
six.text_type(e))
raise
@lockutils.synchronized('etcd_image')
def pull_image(self, context, values):
if not values.get('uuid'):
values['uuid'] = uuidutils.generate_uuid()
repo = values.get('repo')
tag = values.get('tag')
image = self.get_image_by_repo_and_tag(context, repo, tag)
if image:
raise exception.ImageAlreadyExists(repo=repo, tag=tag)
image = models.Image(values)
image.save()
return image
@lockutils.synchronized('etcd_image')
def update_image(self, image_uuid, values):
if 'uuid' in values:
msg = _('Cannot overwrite UUID for an existing image.')
raise exception.InvalidParameterValue(err=msg)
try:
target = self.client.read('/images/' + image_uuid)
target_value = json.loads(target.value)
target_value.update(values)
target.value = json.dump_as_bytes(target_value)
self.client.update(target)
except etcd.EtcdKeyNotFound:
raise exception.ImageNotFound(image=image_uuid)
except Exception as e:
LOG.error('Error occurred while updating image: %s',
six.text_type(e))
raise
return translate_etcd_result(target, 'image')
def list_images(self, context, filters=None, limit=None, marker=None,
sort_key=None, sort_dir=None):
try:
res = getattr(self.client.read('/images'), 'children', None)
except etcd.EtcdKeyNotFound:
# Before the first image been pulled, path '/image' does
# not exist.
return []
except Exception as e:
LOG.error(
"Error occurred while reading from etcd server: %s",
six.text_type(e))
raise
images = []
for i in res:
if i.value is not None:
images.append(translate_etcd_result(i, 'image'))
filters = self._add_tenant_filters(context, filters)
filtered_images = self._filter_resources(images, filters)
return self._process_list_result(filtered_images,
limit=limit, sort_key=sort_key)
def get_image_by_uuid(self, context, image_uuid):
try:
res = self.client.read('/images/' + image_uuid)
image = translate_etcd_result(res, 'image')
filtered_images = self._filter_resources(
[image], self._add_tenant_filters(context, {}))
if len(filtered_images) > 0:
return filtered_images[0]
else:
raise exception.ImageNotFound(image=image_uuid)
except etcd.EtcdKeyNotFound:
raise exception.ImageNotFound(image=image_uuid)
except Exception as e:
LOG.error('Error occurred while retrieving image: %s',
six.text_type(e))
raise
def get_image_by_repo_and_tag(self, context, repo, tag):
filters = {'repo': repo, 'tag': tag}
images = self.list_images(context, filters=filters)
if len(images) == 0:
return None
return images[0]
def list_resource_classes(self, context, filters=None, limit=None,
marker=None, sort_key=None, sort_dir=None):
try:
res = getattr(self.client.read('/resource_classes'),
'children', None)
except etcd.EtcdKeyNotFound:
return []
except Exception as e:
LOG.error(
'Error occurred while reading from etcd server: %s',
six.text_type(e))
raise
resource_classes = []
for r in res:
if r.value is not None:
resource_classes.append(
translate_etcd_result(r, 'resource_class'))
if filters:
resource_classes = self._filter_resources(
resource_classes, filters)
return self._process_list_result(
resource_classes, limit=limit, sort_key=sort_key)
@lockutils.synchronized('etcd_resource_class')
def create_resource_class(self, context, values):
resource_class = models.ResourceClass(values)
resource_class.save()
return resource_class
def get_resource_class(self, context, ident):
if uuidutils.is_uuid_like(ident):
return self._get_resource_class_by_uuid(context, ident)
else:
return self._get_resource_class_by_name(context, ident)
def | |
<gh_stars>0
# -*- coding: utf-8 -*-
import json
import os
from datetime import datetime, timedelta
from urllib.parse import urlencode
from django.conf import settings
from django.core import mail
from django.core.files.storage import default_storage as storage
from django.test import RequestFactory
from django.urls import reverse
from django.utils.encoding import force_str
from django.utils.translation import trim_whitespace
from unittest import mock
import pytest
import responses
from pyquery import PyQuery as pq
from waffle.testutils import override_switch
from olympia import amo, core
from olympia.accounts.views import API_TOKEN_COOKIE
from olympia.activity.models import ActivityLog
from olympia.addons.models import Addon, AddonCategory, AddonUser
from olympia.amo.storage_utils import copy_stored_file
from olympia.amo.templatetags.jinja_helpers import (
format_date,
url as url_reverse,
urlparams,
)
from olympia.amo.tests import TestCase, addon_factory, user_factory, version_factory
from olympia.amo.tests.test_helpers import get_image_path
from olympia.api.models import SYMMETRIC_JWT_TYPE, APIKey, APIKeyConfirmation
from olympia.applications.models import AppVersion
from olympia.constants.promoted import RECOMMENDED
from olympia.devhub.decorators import dev_required
from olympia.devhub.models import BlogPost
from olympia.devhub.views import get_next_version_number
from olympia.files.models import FileUpload
from olympia.files.tests.test_models import UploadTest as BaseUploadTest
from olympia.ratings.models import Rating
from olympia.translations.models import Translation, delete_translation
from olympia.users.models import IPNetworkUserRestriction, UserProfile
from olympia.users.tests.test_views import UserViewBase
from olympia.versions.models import ApplicationsVersions, Version, VersionPreview
from olympia.zadmin.models import set_config
class HubTest(TestCase):
fixtures = ['base/addon_3615', 'base/users']
def setUp(self):
super(HubTest, self).setUp()
self.url = reverse('devhub.index')
assert self.client.login(email='<EMAIL>')
assert self.client.get(self.url).status_code == 200
self.user_profile = UserProfile.objects.get(id=999)
not_their_addon = addon_factory(users=[user_factory()])
AddonUser.unfiltered.create(
addon=not_their_addon, user=self.user_profile, role=amo.AUTHOR_ROLE_DELETED
)
def clone_addon(self, num, addon_id=3615):
addons = []
source = Addon.objects.get(id=addon_id)
for i in range(num):
data = {
'type': source.type,
'status': source.status,
'name': 'cloned-addon-%s-%s' % (addon_id, i),
'users': [self.user_profile],
}
addons.append(addon_factory(**data))
return addons
class TestDashboard(HubTest):
def setUp(self):
super(TestDashboard, self).setUp()
self.url = reverse('devhub.addons')
self.themes_url = reverse('devhub.themes')
assert self.client.get(self.url).status_code == 200
self.addon = Addon.objects.get(pk=3615)
self.addon.addonuser_set.create(user=self.user_profile)
def test_addons_layout(self):
doc = pq(self.client.get(self.url).content)
assert doc('title').text() == (
'Manage My Submissions :: Developer Hub :: Add-ons for Firefox'
)
assert doc('.links-footer').length == 1
assert doc('#copyright').length == 1
assert doc('#footer-links .mobile-link').length == 0
def get_action_links(self, addon_id):
response = self.client.get(self.url)
doc = pq(response.content)
selector = '.item[data-addonid="%s"] .item-actions li > a' % addon_id
links = [a.text.strip() for a in doc(selector)]
return links
def test_no_addons(self):
"""Check that no add-ons are displayed for this user."""
response = self.client.get(self.url)
doc = pq(response.content)
assert doc('.item item').length == 0
def test_addon_pagination(self):
"""Check that the correct info. is displayed for each add-on:
namely, that add-ons are paginated at 10 items per page, and that
when there is more than one page, the 'Sort by' header and pagination
footer appear.
"""
# Create 10 add-ons. We going to make the existing one from the setUp
# and a static theme which shouldn't show up as an addon in this list.
addons = self.clone_addon(10)
self.addon.update(type=amo.ADDON_STATICTHEME)
response = self.client.get(self.url)
doc = pq(response.content)
assert len(doc('.item .item-info')) == 10
assert len(doc('.item .info.extension')) == 10
assert doc('nav.paginator').length == 0
for addon in addons:
assert addon.get_icon_url(64) in doc('.item .info h3 a').html()
# Create 5 add-ons -have to change self.addon back to clone extensions.
self.addon.update(type=amo.ADDON_EXTENSION)
self.clone_addon(5)
self.addon.update(type=amo.ADDON_STATICTHEME)
response = self.client.get(self.url, {'page': 2})
doc = pq(response.content)
assert len(doc('.item .item-info')) == 5
assert doc('nav.paginator').length == 1
def test_themes(self):
"""Check themes show on dashboard."""
# Create 2 themes.
staticthemes = []
for x in range(2):
addon = addon_factory(type=amo.ADDON_STATICTHEME, users=[self.user_profile])
VersionPreview.objects.create(version=addon.current_version)
staticthemes.append(addon)
response = self.client.get(self.themes_url)
doc = pq(response.content)
assert len(doc('.item .item-info')) == 2
assert len(doc('.item .info.statictheme')) == 2
for addon in staticthemes:
assert addon.current_previews[0].thumbnail_url in [
img.attrib['src'] for img in doc('.info.statictheme h3 img')
]
def test_show_hide_statistics_and_new_version_for_disabled(self):
# Not disabled: show statistics and new version links.
self.addon.update(disabled_by_user=False)
links = self.get_action_links(self.addon.pk)
assert 'Statistics' in links, 'Unexpected: %r' % links
assert 'New Version' in links, 'Unexpected: %r' % links
# Disabled (user): hide new version link.
self.addon.update(disabled_by_user=True)
links = self.get_action_links(self.addon.pk)
assert 'New Version' not in links, 'Unexpected: %r' % links
# Disabled (admin): hide statistics and new version links.
self.addon.update(disabled_by_user=False, status=amo.STATUS_DISABLED)
links = self.get_action_links(self.addon.pk)
assert 'Statistics' not in links, 'Unexpected: %r' % links
assert 'New Version' not in links, 'Unexpected: %r' % links
def test_public_addon(self):
assert self.addon.status == amo.STATUS_APPROVED
doc = pq(self.client.get(self.url).content)
item = doc('.item[data-addonid="%s"]' % self.addon.id)
assert item.find('h3 a').attr('href') == self.addon.get_dev_url()
assert item.find('p.downloads'), 'Expected weekly downloads'
assert item.find('p.users'), 'Expected ADU'
assert item.find('.item-details'), 'Expected item details'
assert not item.find(
'p.incomplete'
), 'Unexpected message about incomplete add-on'
appver = self.addon.current_version.apps.all()[0]
appver.delete()
def test_dev_news(self):
for i in range(7):
bp = BlogPost(
title='hi %s' % i, date_posted=datetime.now() - timedelta(days=i)
)
bp.save()
response = self.client.get(self.url)
doc = pq(response.content)
assert doc('.blog-posts').length == 1
assert doc('.blog-posts li').length == 5
assert doc('.blog-posts li a').eq(0).text() == 'hi 0'
assert doc('.blog-posts li a').eq(4).text() == 'hi 4'
def test_sort_created_filter(self):
response = self.client.get(self.url + '?sort=created')
doc = pq(response.content)
assert doc('.item-details').length == 1
elm = doc('.item-details .date-created')
assert elm.length == 1
assert elm.remove('strong').text() == (format_date(self.addon.created))
def test_sort_updated_filter(self):
response = self.client.get(self.url)
doc = pq(response.content)
assert doc('.item-details').length == 1
elm = doc('.item-details .date-updated')
assert elm.length == 1
assert elm.remove('strong').text() == (
trim_whitespace(format_date(self.addon.last_updated))
)
def test_purely_unlisted_addon_are_not_shown_as_incomplete(self):
self.make_addon_unlisted(self.addon)
assert self.addon.has_complete_metadata()
response = self.client.get(self.url)
doc = pq(response.content)
# It should not be considered incomplete despite having STATUS_NULL,
# since it's purely unlisted.
assert not doc('.incomplete')
# Rest of the details should be shown, but not the AMO-specific stuff.
assert not doc('.item-info')
assert doc('.item-details')
def test_mixed_versions_addon_with_incomplete_metadata(self):
self.make_addon_unlisted(self.addon)
version = version_factory(addon=self.addon, channel=amo.RELEASE_CHANNEL_LISTED)
version.update(license=None)
self.addon.reload()
assert not self.addon.has_complete_metadata()
response = self.client.get(self.url)
doc = pq(response.content)
assert doc('.incomplete').text() == (
'This add-on is missing some required information before it can be'
' submitted for publication.'
)
assert doc('form.resume').attr('action') == (
url_reverse('devhub.request-review', self.addon.slug)
)
assert doc('button.link').text() == 'Resume'
def test_no_versions_addon(self):
self.addon.current_version.delete()
response = self.client.get(self.url)
doc = pq(response.content)
assert doc('.incomplete').text() == ("This add-on doesn't have any versions.")
class TestUpdateCompatibility(TestCase):
fixtures = ['base/users', 'base/addon_3615']
def setUp(self):
super(TestUpdateCompatibility, self).setUp()
assert self.client.login(email='<EMAIL>')
self.url = reverse('devhub.addons')
# These aren't realistic but work with existing tests and the 3615
# addon
self.create_appversion('android', '3.7a1pre')
self.create_appversion('android', '4.0')
def create_appversion(self, name, version):
return AppVersion.objects.create(application=amo.APPS[name].id, version=version)
def test_no_compat(self):
addon = Addon.objects.get(pk=3615)
addon.update(type=amo.ADDON_DICT)
self.client.logout()
assert self.client.login(email='<EMAIL>')
response = self.client.get(self.url)
doc = pq(response.content)
assert not doc('.item[data-addonid="3615"] li.compat')
response = self.client.get(
reverse(
'devhub.ajax.compat.update', args=[addon.slug, addon.current_version.id]
)
)
assert response.status_code == 404
response = self.client.get(
reverse('devhub.ajax.compat.status', args=[addon.slug])
)
assert response.status_code == 404
def test_compat(self):
addon = Addon.objects.get(pk=3615)
response = self.client.get(self.url)
doc = pq(response.content)
cu = doc('.item[data-addonid="3615"] .tooltip.compat-update')
assert not cu
addon.current_version.files.update(strict_compatibility=True)
response = self.client.get(self.url)
doc = pq(response.content)
cu = doc('.item[data-addonid="3615"] .tooltip.compat-update')
assert cu
update_url = reverse(
'devhub.ajax.compat.update', args=[addon.slug, addon.current_version.id]
)
assert cu.attr('data-updateurl') == update_url
status_url = reverse('devhub.ajax.compat.status', args=[addon.slug])
selector = '.item[data-addonid="3615"] li.compat'
assert doc(selector).attr('data-src') == status_url
assert doc('.item[data-addonid="3615"] .compat-update-modal')
def test_incompat_firefox(self):
addon = Addon.objects.get(pk=3615)
addon.current_version.files.update(strict_compatibility=True)
versions = ApplicationsVersions.objects.all()[0]
versions.max = AppVersion.objects.get(version='2.0')
versions.save()
doc = pq(self.client.get(self.url).content)
assert doc('.item[data-addonid="3615"] .tooltip.compat-error')
def test_incompat_android(self):
addon = Addon.objects.get(pk=3615)
addon.current_version.files.update(strict_compatibility=True)
appver = AppVersion.objects.get(version='2.0')
appver.update(application=amo.ANDROID.id)
av = ApplicationsVersions.objects.all()[0]
av.application = amo.ANDROID.id
av.max = appver
av.save()
doc = pq(self.client.get(self.url).content)
assert doc('.item[data-addonid="3615"] .tooltip.compat-error')
class TestDevRequired(TestCase):
fixtures = ['base/users', 'base/addon_3615']
def setUp(self):
super(TestDevRequired, self).setUp()
self.addon = Addon.objects.get(id=3615)
self.edit_page_url = self.addon.get_dev_url('edit')
self.get_url = self.addon.get_dev_url('versions')
self.post_url = self.addon.get_dev_url('delete')
assert self.client.login(email='<EMAIL>')
self.au = self.addon.addonuser_set.get(user__email='<EMAIL>')
assert self.au.role == amo.AUTHOR_ROLE_OWNER
def test_anon(self):
self.client.logout()
self.assertLoginRedirects(self.client.get(self.get_url), self.get_url)
self.assertLoginRedirects(
self.client.get(self.edit_page_url), self.edit_page_url
)
def test_dev_get(self):
assert self.client.get(self.get_url).status_code == 200
assert self.client.get(self.edit_page_url).status_code == 200
def test_dev_post(self):
self.assert3xx(self.client.post(self.post_url), self.get_url)
def test_disabled_post_dev(self):
self.addon.update(status=amo.STATUS_DISABLED)
assert self.client.post(self.get_url).status_code == 403
def test_disabled_post_admin(self):
self.addon.update(status=amo.STATUS_DISABLED)
assert self.client.login(email='<EMAIL>')
self.assert3xx(self.client.post(self.post_url), self.get_url)
class TestVersionStats(TestCase):
fixtures = ['base/users', 'base/addon_3615']
def setUp(self):
super(TestVersionStats, self).setUp()
assert self.client.login(email='<EMAIL>')
def test_counts(self):
addon = Addon.objects.get(id=3615)
version = addon.current_version
user = UserProfile.objects.get(email='<EMAIL>')
for _ in range(10):
Rating.objects.create(addon=addon, user=user, version=addon.current_version)
url = reverse('devhub.versions.stats', args=[addon.slug])
data = json.loads(force_str(self.client.get(url).content))
exp = {
str(version.id): {
'reviews': 10,
'files': 1,
'version': version.version,
'id': version.id,
}
}
self.assertDictEqual(data, exp)
class TestDelete(TestCase):
fixtures = ['base/addon_3615']
def setUp(self):
super(TestDelete, self).setUp()
self.get_addon = lambda: Addon.objects.filter(id=3615)
assert self.client.login(email='<EMAIL>')
self.user = UserProfile.objects.get(email='<EMAIL>')
self.get_url = lambda: self.get_addon()[0].get_dev_url('delete')
def test_post_not(self):
response = self.client.post(self.get_url(), follow=True)
assert pq(response.content)('.notification-box').text() == (
'URL name was incorrect. Add-on was not deleted.'
)
assert self.get_addon().exists()
self.assert3xx(response, self.get_addon()[0].get_dev_url('versions'))
def test_post(self):
self.get_addon().get().update(slug='addon-slug')
response = self.client.post(self.get_url(), {'slug': 'addon-slug'}, follow=True)
assert pq(response.content)('.notification-box').text() == ('Add-on deleted.')
assert not self.get_addon().exists()
self.assert3xx(response, reverse('devhub.addons'))
def test_post_wrong_slug(self):
self.get_addon().get().update(slug='addon-slug')
response = self.client.post(self.get_url(), {'slug': 'theme-slug'}, follow=True)
assert pq(response.content)('.notification-box').text() == (
'URL name was incorrect. Add-on was not deleted.'
)
assert self.get_addon().exists()
self.assert3xx(response, self.get_addon()[0].get_dev_url('versions'))
def test_post_statictheme(self):
theme = addon_factory(
name='xpi name',
type=amo.ADDON_STATICTHEME,
slug='stheme-slug',
users=[self.user],
)
response = self.client.post(
theme.get_dev_url('delete'), {'slug': 'stheme-slug'}, follow=True
)
assert pq(response.content)('.notification-box').text() == ('Theme deleted.')
assert not Addon.objects.filter(id=theme.id).exists()
self.assert3xx(response, reverse('devhub.themes'))
def test_post_statictheme_wrong_slug(self):
theme = addon_factory(
name='xpi name',
type=amo.ADDON_STATICTHEME,
slug='stheme-slug',
users=[self.user],
)
response = self.client.post(
theme.get_dev_url('delete'), {'slug': 'foo-slug'}, follow=True
)
assert pq(response.content)('.notification-box').text() == (
'URL | |
'gaussian', 'gaussian2', etc. If False, only the segment
with exact name match gets a replacement.
Raises:
ValueError: If the argument can not be matched (either the argument
name does not match or the argument number is wrong).
ValueError: If the name can not be matched.
"""
# TODO: is there any reason to use tuples internally?
if replaceeverywhere:
basename = BluePrint._basename
name = basename(name)
nmlst = self._namelist
replacelist = [nm for nm in nmlst if basename(nm) == name]
else:
replacelist = [name]
# Validation
if name not in self._namelist:
raise ValueError('No segment of that name in blueprint.'
' Contains segments: {}'.format(self._namelist))
for name in replacelist:
position = self._namelist.index(name)
function = self._funlist[position]
sig = signature(function)
# Validation
if isinstance(arg, str):
if arg not in sig.parameters:
raise ValueError('No such argument of function '
'{}.'.format(function.__name__) +
'Has arguments '
'{}.'.format(sig.parameters.keys()))
# Each function has two 'secret' arguments, SR and dur
user_params = len(sig.parameters)-2
if isinstance(arg, int) and (arg not in range(user_params)):
raise ValueError('No argument {} '.format(arg) +
'of function {}.'.format(function.__name__) +
' Has {} '.format(user_params) +
'arguments.')
# allow the user to input single values instead of (val,)
no_of_args = len(self._argslist[position])
if not isinstance(value, tuple) and no_of_args == 1:
value = (value,)
if isinstance(arg, str):
for ii, param in enumerate(sig.parameters):
if arg == param:
arg = ii
break
# Mutating the immutable...
larg = list(self._argslist[position])
larg[arg] = value
self._argslist[position] = tuple(larg)
def changeDuration(self, name, dur, replaceeverywhere=False):
"""
Change the duration of one or more segments in the blueprint
Args:
name (str): The name of the segment in which to change duration
dur (Union[float, int]): The new duration.
replaceeverywhere (Optional[bool]): If True, the duration(s)
is(are) overwritten in ALL segments where the name matches.
E.g. 'gaussian1' will match 'gaussian', 'gaussian2',
etc. If False, only the segment with exact name match
gets a replacement.
Raises:
ValueError: If durations are not specified for the blueprint
ValueError: If too many or too few durations are given.
ValueError: If no segment matches the name.
ValueError: If dur is not positive
ValueError: If SR is given for the blueprint and dur is less than
1/SR.
"""
if (not(isinstance(dur, float)) and not(isinstance(dur, int))):
raise ValueError('New duration must be an int or a float. '
'Received {}'.format(type(dur)))
if replaceeverywhere:
basename = BluePrint._basename
name = basename(name)
nmlst = self._namelist
replacelist = [nm for nm in nmlst if basename(nm) == name]
else:
replacelist = [name]
# Validation
if name not in self._namelist:
raise ValueError('No segment of that name in blueprint.'
' Contains segments: {}'.format(self._namelist))
for name in replacelist:
position = self._namelist.index(name)
if dur <= 0:
raise ValueError('Duration must be strictly greater '
'than zero.')
if self.SR is not None:
if dur*self.SR < 1:
raise ValueError('Duration too short! Must be at'
' least 1/sample rate.')
self._durslist[position] = dur
def setSR(self, SR):
"""
Set the associated sample rate
Args:
SR (Union[int, float]): The sample rate in Sa/s.
"""
self._SR = SR
def setSegmentMarker(self, name, specs, markerID):
"""
Bind a marker to a specific segment.
Args:
name (str): Name of the segment
specs (tuple): Marker specification tuple, (delay, duration),
where the delay is relative to the segment start
markerID (int): Which marker channel to output on. Must be 1 or 2.
"""
if markerID not in [1, 2]:
raise ValueError('MarkerID must be either 1 or 2.'
' Received {}.'.format(markerID))
markerselect = {1: self._segmark1, 2: self._segmark2}
position = self._namelist.index(name)
# TODO: Do we need more than one bound marker per segment?
markerselect[markerID][position] = specs
def removeSegmentMarker(self, name: str, markerID: int) -> None:
"""
Remove all bound markers from a specific segment
Args:
name (str): Name of the segment
markerID (int): Which marker channel to remove from (1 or 2).
number (int): The number of the marker, in case several markers are
bound to one element. Default: 1 (the first marker).
"""
if markerID not in [1, 2]:
raise ValueError('MarkerID must be either 1 or 2.'
' Received {}.'.format(markerID))
markerselect = {1: self._segmark1, 2: self._segmark2}
try:
position = self._namelist.index(name)
except ValueError:
raise KeyError('No segment named {} in this BluePrint.'
''.format(name))
markerselect[markerID][position] = (0, 0)
def copy(self):
"""
Returns a copy of the BluePrint
"""
# Needed because of input validation in __init__
namelist = [self._basename(name) for name in self._namelist.copy()]
return BluePrint(self._funlist.copy(),
self._argslist.copy(),
namelist,
self.marker1.copy(),
self.marker2.copy(),
self._segmark1.copy(),
self._segmark2.copy(),
self._SR,
self._durslist)
def insertSegment(self, pos, func, args=(), dur=None, name=None,
durs=None):
"""
Insert a segment into the bluePrint.
Args:
pos (int): The position at which to add the segment. Counts like
a python list; 0 is first, -1 is last. Values below -1 are
not allowed, though.
func (function): Function describing the segment. Must have its
duration as the last argument (unless its a special function).
args (Optional[Tuple[Any]]): Tuple of arguments BESIDES duration.
Default: ()
dur (Optional[Union[int, float]]): The duration of the
segment. Must be given UNLESS the segment is
'waituntil' or 'ensureaverage_fixed_level'
name Optional[str]: Name of the segment. If none is given,
the segment will receive the name of its function,
possibly with a number appended.
Raises:
ValueError: If the position is negative
ValueError: If the name ends in a number
"""
# Validation
has_ensureavg = ('ensureaverage_fixed_level' in self._funlist or
'ensureaverage_fixed_dur' in self._funlist)
if func == 'ensureaverage_fixed_level' and has_ensureavg:
raise ValueError('Can not have more than one "ensureaverage"'
' segment in a blueprint.')
if durs is not None:
warnings.warn('Deprecation warning: please specify "dur" rather '
'than "durs" when inserting a segment')
if dur is None:
dur = durs
else:
raise ValueError('You can not specify "durs" AND "dur"!')
# Take care of 'waituntil'
# allow users to input single values
if not isinstance(args, tuple):
args = (args,)
if pos < -1:
raise ValueError('Position must be strictly larger than -1')
if name is None or name == '':
if func == 'waituntil':
name = 'waituntil'
else:
name = func.__name__
elif isinstance(name, str):
if len(name) > 0:
if name[-1].isdigit():
raise ValueError('Segment name must not end in a number')
if pos == -1:
self._namelist.append(name)
self._namelist = self._make_names_unique(self._namelist)
self._funlist.append(func)
self._argslist.append(args)
self._segmark1.append((0, 0))
self._segmark2.append((0, 0))
self._durslist.append(dur)
else:
self._namelist.insert(pos, name)
self._namelist = self._make_names_unique(self._namelist)
self._funlist.insert(pos, func)
self._argslist.insert(pos, args)
self._segmark1.insert(pos, (0, 0))
self._segmark2.insert(pos, (0, 0))
self._durslist.insert(pos, dur)
def removeSegment(self, name):
"""
Remove the specified segment from the blueprint.
Args:
name (str): The name of the segment to remove.
"""
try:
position = self._namelist.index(name)
except ValueError:
raise KeyError('No segment called {} in blueprint.'.format(name))
del self._funlist[position]
del self._argslist[position]
del self._namelist[position]
del self._segmark1[position]
del self._segmark2[position]
del self._durslist[position]
self._namelist = self._make_names_unique(self._namelist)
@marked_for_deletion(replaced_by='broadbean.plotting.plotter')
def plot(self, SR=None):
pass
def __add__(self, other):
"""
Add two BluePrints. The second argument is appended to the first
and a new BluePrint is returned.
Args:
other (BluePrint): A BluePrint instance
Returns:
BluePrint: A new blueprint.
Raises:
ValueError: If the input is not a BluePrint instance
"""
if not isinstance(other, BluePrint):
raise ValueError("""
BluePrint can only be added to another Blueprint.
Received an object of type {}
""".format(type(other)))
nl = [self._basename(name) for name in self._namelist]
nl += [self._basename(name) for name in other._namelist]
al = self._argslist + other._argslist
fl = self._funlist + other._funlist
m1 = self.marker1 + other.marker1
m2 = self.marker2 + other.marker2
sm1 = self._segmark1 + other._segmark1
sm2 = self._segmark2 + other._segmark2
dl = self._durslist + other._durslist
new_bp = BluePrint()
new_bp._namelist = new_bp._make_names_unique(nl.copy())
new_bp._funlist = fl.copy()
new_bp._argslist = al.copy()
new_bp.marker1 = m1.copy()
new_bp.marker2 = m2.copy()
new_bp._segmark1 = sm1.copy()
new_bp._segmark2 = sm2.copy()
new_bp._durslist = dl.copy()
if self.SR is not None:
new_bp.setSR(self.SR)
return new_bp
def __eq__(self, other):
"""
Compare two blueprints. They are the same iff all
lists are identical.
Args:
other (BluePrint): A BluePrint instance
Returns:
bool: whether the two blueprints are identical
Raises:
ValueError: If the input is not a BluePrint instance
"""
if not isinstance(other, BluePrint):
raise ValueError("""
Blueprint can only be compared to another
Blueprint.
Received an object of type {}
""".format(type(other)))
if not self._namelist == other._namelist:
return False
if not self._funlist == other._funlist:
return False
if not self._argslist == other._argslist:
return | |
, OOo ]
lisp . lisp_start_rloc_probe_timer ( 1 , II )
O0o0Ooo = { "type" : "itr-crypto-port" , "port" : iiI1iIiI }
lisp . lisp_write_to_dp_socket ( O0o0Ooo )
if 84 - 84: ooOoO0o + i11iIiiIii - OOooOOo * ooOoO0o
if 33 - 33: ooOoO0o % i1IIi - oO0o . O0 / O0
if 96 - 96: OoooooooOO + IiII * O0
if 86 - 86: Ii1I
if 29 - 29: iIii1I11I1II1 - OoO0O00 + I1IiiI % iIii1I11I1II1 % OOooOOo
lisp . lisp_ipc_write_xtr_parameters ( lisp . lisp_debug_logging ,
lisp . lisp_data_plane_logging )
return
if 84 - 84: IiII + I1ii11iIi11i + Ii1I + iII111i
if 62 - 62: i11iIiiIii + OoOoOO00 + i1IIi
if 69 - 69: OoOoOO00
if 63 - 63: OoO0O00 / OoOoOO00 * iIii1I11I1II1 . I1Ii111
if 85 - 85: i11iIiiIii / i11iIiiIii . OoO0O00 . O0
OooOo = {
"lisp xtr-parameters" : [ O00ooooo00 , {
"rloc-probing" : [ True , "yes" , "no" ] ,
"nonce-echoing" : [ True , "yes" , "no" ] ,
"data-plane-security" : [ True , "yes" , "no" ] ,
"data-plane-logging" : [ True , "yes" , "no" ] ,
"frame-logging" : [ True , "yes" , "no" ] ,
"flow-logging" : [ True , "yes" , "no" ] ,
"nat-traversal" : [ True , "yes" , "no" ] ,
"checkpoint-map-cache" : [ True , "yes" , "no" ] ,
"ipc-data-plane" : [ True , "yes" , "no" ] ,
"decentralized-push-xtr" : [ True , "yes" , "no" ] ,
"decentralized-pull-xtr-modulus" : [ True , 1 , 0xff ] ,
"decentralized-pull-xtr-dns-suffix" : [ True ] ,
"register-reachable-rtrs" : [ True , "yes" , "no" ] ,
"program-hardware" : [ True , "yes" , "no" ] } ] ,
"lisp interface" : [ lispconfig . lisp_interface_command , {
"interface-name" : [ True ] ,
"device" : [ True ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"dynamic-eid" : [ True ] ,
"dynamic-eid-device" : [ True ] ,
"lisp-nat" : [ True , "yes" , "no" ] ,
"dynamic-eid-timeout" : [ True , 0 , 0xff ] } ] ,
"lisp map-resolver" : [ II1I1ii1ii11 , {
"mr-name" : [ True ] ,
"ms-name" : [ True ] ,
"dns-name" : [ True ] ,
"address" : [ True ] } ] ,
"lisp map-cache" : [ lispconfig . lisp_map_cache_command , {
"prefix" : [ ] ,
"mr-name" : [ True ] ,
"ms-name" : [ True ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"eid-prefix" : [ True ] ,
"group-prefix" : [ True ] ,
"send-map-request" : [ True , "yes" , "no" ] ,
"rloc" : [ ] ,
"rloc-record-name" : [ True ] ,
"rle-name" : [ True ] ,
"elp-name" : [ True ] ,
"address" : [ True ] ,
"priority" : [ True , 0 , 255 ] ,
"weight" : [ True , 0 , 100 ] } ] ,
"lisp rtr-map-cache" : [ lispconfig . lisp_map_cache_command , {
"prefix" : [ ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"eid-prefix" : [ True ] ,
"group-prefix" : [ True ] ,
"rloc" : [ ] ,
"rloc-record-name" : [ True ] ,
"rle-name" : [ True ] ,
"elp-name" : [ True ] ,
"address" : [ True ] ,
"priority" : [ True , 0 , 255 ] ,
"weight" : [ True , 0 , 100 ] } ] ,
"lisp explicit-locator-path" : [ lispconfig . lisp_elp_command , {
"elp-name" : [ False ] ,
"elp-node" : [ ] ,
"address" : [ True ] ,
"probe" : [ True , "yes" , "no" ] ,
"strict" : [ True , "yes" , "no" ] ,
"eid" : [ True , "yes" , "no" ] } ] ,
"lisp replication-list-entry" : [ lispconfig . lisp_rle_command , {
"rle-name" : [ False ] ,
"rle-node" : [ ] ,
"address" : [ True ] ,
"level" : [ True , 0 , 255 ] } ] ,
"lisp json" : [ lispconfig . lisp_json_command , {
"json-name" : [ False ] ,
"json-string" : [ False ] } ] ,
"lisp database-mapping" : [ OooOooooOOoo0 , {
"prefix" : [ ] ,
"mr-name" : [ True ] ,
"ms-name" : [ True ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"secondary-instance-id" : [ True , 0 , 0xffffffff ] ,
"eid-prefix" : [ True ] ,
"group-prefix" : [ True ] ,
"dynamic-eid" : [ True , "yes" , "no" ] ,
"signature-eid" : [ True , "yes" , "no" ] ,
"rloc" : [ ] ,
"rloc-record-name" : [ True ] ,
"elp-name" : [ True ] ,
"geo-name" : [ True ] ,
"rle-name" : [ True ] ,
"json-name" : [ True ] ,
"address" : [ True ] ,
"interface" : [ True ] ,
"priority" : [ True , 0 , 255 ] ,
"weight" : [ True , 0 , 100 ] } ] ,
"lisp glean-mapping" : [ OOOoO0O0o , {
"instance-id" : [ False ] ,
"eid-prefix" : [ True ] ,
"rloc-prefix" : [ True ] ,
"rloc-probe" : [ True , "yes" , "no" ] } ] ,
"show rtr-rloc-probing" : [ IIIIiiII111 , { } ] ,
"show rtr-keys" : [ I1i1iii , { } ] ,
"show rtr-map-cache" : [ oO , { } ] ,
"show rtr-map-cache-dns" : [ oo , { } ]
}
if 67 - 67: Oo0Ooo / O0
if 88 - 88: OoOoOO00 - OOooOOo
if 63 - 63: IiII * OoooooooOO
if 19 - 19: IiII - o0oOOo0O0Ooo . iIii1I11I1II1 . OoOoOO00 / OOooOOo
if 87 - 87: OoOoOO00 - ooOoO0o - OOooOOo + Oo0Ooo % iIii1I11I1II1 / i11iIiiIii
if 12 - 12: ooOoO0o
def oOOO0ooOO ( lisp_socket ) :
if 3 - 3: OoooooooOO
if 71 - 71: IiII + i1IIi - iII111i - i11iIiiIii . I11i - ooOoO0o
if 85 - 85: I1ii11iIi11i - OoOoOO00 / I1ii11iIi11i + OOooOOo - iII111i
if 49 - 49: OoO0O00 - O0 / OoO0O00 * OoOoOO00 + I1Ii111
Ii , Ii1IIiiIIi , ooooo , OOOoO000 = lisp . lisp_receive ( lisp_socket , False )
ii1IOoo000000 = lisp . lisp_trace ( )
if ( ii1IOoo000000 . decode ( OOOoO000 ) == False ) : return
if 80 - 80: II111iiii - OOooOOo % OoooooooOO . iIii1I11I1II1 - ooOoO0o + I1IiiI
if 21 - 21: i11iIiiIii
if 89 - 89: iII111i . i11iIiiIii * O0
if 44 - 44: i1IIi . I1IiiI / i11iIiiIii + IiII
if 27 - 27: OOooOOo
ii1IOoo000000 . rtr_cache_nat_trace ( Ii1IIiiIIi , ooooo )
if 52 - 52: I1Ii111 % OoOoOO00 + iIii1I11I1II1 * oO0o . Ii1I
if 95 - 95: iIii1I11I1II1 . IiII - OoooooooOO * OoO0O00 / o0oOOo0O0Ooo
if 74 - 74: oO0o
if 34 - 34: iII111i
if 44 - 44: i1IIi % I1IiiI % o0oOOo0O0Ooo
if 9 - 9: Oo0Ooo % OoooooooOO - Ii1I
if 43 - 43: OoO0O00 % OoO0O00
if ( I11i11i1 ( ) == False ) :
lisp . lprint ( "lisp_rtr_startup() failed" )
lisp . lisp_print_banner ( "RTR abnormal exit" )
exit ( 1 )
if 46 - 46: Oo0Ooo % iIii1I11I1II1 . iII111i . O0 * ooOoO0o / OoooooooOO
if 7 - 7: oO0o - O0 * I11i - o0oOOo0O0Ooo - II111iiii
Ii11iiI1 = [ II1Ii1iI1i , Oo0oO0oo0oO00 ,
i111I , oO0oIIII ]
oO0OOOoooO00o0o = [ II1Ii1iI1i ] * 3
if 10 - 10: Ii1I - i11iIiiIii . I1ii11iIi11i % i1IIi
while ( True ) :
try : OooOOOoOoo0O0 , O0OOOOo0 , OOooO0Oo00 = select . select ( Ii11iiI1 , [ ] , [ ] )
except : break
if 9 | |
length == 1:
primary.addPathsFrom(parts[piece[0]])
elif length > 1:
primary.addPathsFrom(parts[piece[0]])
for idx in range(1, length):
secondary.addPathsFrom(parts[piece[idx]])
data = [
primary,
secondary]
self.jewelrySets[key].append(data)
self.currentJewelry = {
'LEar': [
0,
0,
0],
'REar': [
0,
0,
0],
'LBrow': [
0,
0,
0],
'RBrow': [
0,
0,
0],
'Nose': [
0,
0,
0],
'Mouth': [
0,
0,
0],
'LHand': [
0,
0,
0],
'RHand': [
0,
0,
0] }
self.hairLODs = []
self.hairCutLODs = []
for item in hairList:
itemInfo = { }
for lod in [
'500',
'1000',
'2000']:
itemInfo[lod] = self.pirate.getLOD(lod).findAllMatches(item)
self.hairLODs.append(itemInfo)
for item in hairCutList:
itemInfo = { }
for lod in [
'500',
'1000',
'2000']:
itemInfo[lod] = self.pirate.getLOD(lod).findAllMatches(item)
self.hairCutLODs.append(itemInfo)
self.generateHairSets()
self.hair.stash()
def setBlendValue(self, val, attr):
self.pirate.setBlendValue(0.0, self.blendShapes[attr][0])
if len(self.blendShapes[attr]) > 1:
self.pirate.setBlendValue(0.0, self.blendShapes[attr][1])
if val >= 0.0:
if len(self.blendShapes[attr]) > 1:
blendName = self.blendShapes[attr][1]
else:
blendName = self.blendShapes[attr][0]
else:
blendName = self.blendShapes[attr][0]
val = -val
self.pirate.setBlendValue(val, blendName)
def setupBody(self, lodName = 2000):
geom = self.pirate.getGeomNode()
self.body = self.pirate.findAllMatches('**/body_*')
faceParts = []
for i in xrange(self.body.getNumPaths()):
if self.body[i].getName().find('master_face') >= 0:
faceParts.append(self.body[i])
for part in faceParts:
self.body.removePath(part)
if self.newAvatars:
self.stripTexture(self.body)
self.bodyPiecesToGroup = {
0: 0,
1: 0,
2: 0,
3: 0,
4: 0,
5: 0,
6: 0,
7: 0,
8: 2,
9: 1,
10: 2,
11: 1,
12: 2,
13: 1,
14: 2,
15: 1,
16: 0,
17: 0,
18: 0,
19: 0,
20: 0,
21: 0 }
self.groupsToBodyPieces = [
[
0,
1,
2,
3,
4,
5,
6,
7,
16,
17,
18,
19,
20,
21],
[
9,
11,
13,
15],
[
8,
10,
12,
14]]
layerBList = [
'**/body_neck*',
'**/body_torso_base',
'**/body_torso_back',
'**/body_torso_front',
'**/body_collar_sharp',
'**/body_collar_round',
'**/body_belt',
'**/body_waist',
'**/body_armpit_right',
'**/body_armpit_left',
'**/body_shoulder_right',
'**/body_shoulder_left',
'**/body_forearm_right',
'**/body_forearm_left',
'**/body_hand_right',
'**/body_hand_left',
'**/body_knee_right',
'**/body_knee_left',
'**/body_shin_right',
'**/body_shin_left',
'**/body_foot_right',
'**/body_foot_left']
self.layerBodyLODs = []
for part in layerBList:
bodyParts = { }
for lod in [
'2000',
'1000',
'500']:
bodyParts[lod] = self.pirate.getLOD(lod).find(part)
self.layerBodyLODs.append(bodyParts)
chest = NodePathCollection()
leftArm = NodePathCollection()
rightArm = NodePathCollection()
self.currentTattooZones = [
chest,
leftArm,
rightArm,
self.faces[0]]
for i in self.bodyPiecesToGroup.items():
self.currentTattooZones[i[1]].addPath(self.layerBodyLODs[i[0]]['2000'])
self.currentTattooZones[i[1]].addPath(self.layerBodyLODs[i[0]]['1000'])
self.currentTattooZones[i[1]].addPath(self.layerBodyLODs[i[0]]['500'])
self.bodys = []
self.bodyIdx = 0
self.bodys.append(geom.findAllMatches('**/body_neck*'))
self.bodys.append(geom.findAllMatches('**/body_torso_base'))
self.bodys.append(geom.findAllMatches('**/body_torso_back'))
self.bodys.append(geom.findAllMatches('**/body_torso_front'))
self.bodys.append(geom.findAllMatches('**/body_collar_sharp'))
self.bodys.append(geom.findAllMatches('**/body_collar_round'))
self.bodys.append(geom.findAllMatches('**/body_belt'))
self.bodys.append(geom.findAllMatches('**/body_waist'))
self.bodys.append(geom.findAllMatches('**/body_armpit_right'))
self.bodys.append(geom.findAllMatches('**/body_armpit_left'))
self.bodys.append(geom.findAllMatches('**/body_shoulder_right'))
self.bodys.append(geom.findAllMatches('**/body_shoulder_left'))
self.bodys.append(geom.findAllMatches('**/body_forearm_right'))
self.bodys.append(geom.findAllMatches('**/body_forearm_left'))
self.bodys.append(geom.findAllMatches('**/body_hand_right'))
self.bodys.append(geom.findAllMatches('**/body_hand_left'))
self.bodys.append(geom.findAllMatches('**/body_knee_right'))
self.bodys.append(geom.findAllMatches('**/body_knee_left'))
self.bodys.append(geom.findAllMatches('**/body_shin_right'))
self.bodys.append(geom.findAllMatches('**/body_shin_left'))
self.bodys.append(geom.findAllMatches('**/body_foot_right'))
self.bodys.append(geom.findAllMatches('**/body_foot_left'))
for part in self.bodys:
part.stash()
self.currentBody = NodePathCollection()
self.bodyTextures = loader.loadModel('models/misc/male_body.bam')
self.numBodys = len(body_textures[0])
self.bodyTextureIdx = self.pirate.style.getBodySkin()
self.lowLODSkinColor = VBase4(0.81000000000000005, 0.68999999999999995, 0.62, 1.0)
self.faceTextures = loader.loadModel('models/misc/male_face.bam')
self.faceTexturesSet = []
self.numFaces = len(face_textures)
self.faceTextureIdx = self.pirate.style.getHeadTexture()
self.numEyeColors = len(eye_iris_textures)
self.eyesColorIdx = self.pirate.style.getEyesColor()
self.skinColorIdx = self.pirate.style.getBodyColor()
self.hairColorIdx = self.pirate.style.getHairColor()
for shape in body_textures:
for texName in shape:
tex = self.bodyTextures.findTexture(texName)
if tex:
self.texDict[texName] = tex
continue
self.texDict[texName] = None
for texName in face_textures:
tex = self.bodyTextures.findTexture(texName)
if tex:
self.texDict[texName] = tex
continue
self.texDict[texName] = None
for texName in face_textures:
tex = self.faceTextures.findTexture(texName)
if tex:
self.faceTexturesSet.append(tex)
continue
self.notify.error('missing texture')
def setupClothing(self, lodName = '2000'):
geom = self.pirate.getGeomNode()
self.clothing = self.pirate.findAllMatches('**/clothing_*')
self.clothingsAcc = []
self.clothingAccIdx = 0
self.clothingsLayer1 = []
self.clothingsLayer2 = []
self.clothingsLayer3 = []
self.clothingsShirt = []
self.clothingShirtIdx = 0
self.clothingShirtTexture = 0
self.clothingsVest = []
self.clothingVestIdx = 0
self.clothingVestTexture = 0
self.clothingsCoat = []
self.clothingCoatIdx = 0
self.clothingCoatTexture = 0
self.clothingsPant = []
self.clothingPantIdx = 0
self.clothingPantTexture = 0
self.clothingsBelt = []
self.clothingBeltIdx = 0
self.clothingBeltTexture = 0
self.clothingsSock = []
self.clothingSockIdx = 0
self.clothingSockTexture = 0
self.clothingsShoe = []
self.clothingShoeIdx = 0
self.clothingShoeTexture = 0
self.clothingsHat = []
self.clothingHatIdx = 0
self.clothingHatTexture = 0
self.partLayer = { }
layer1List = [
'**/clothing_layer1_shirt_common_open_base',
'**/clothing_layer1_shirt_common_bottom_open_base',
'**/clothing_layer1_shirt_common_closed_base',
'**/clothing_layer1_shirt_common_closed_front',
'**/clothing_layer1_shirt_common_bottom_out_closed_base',
'**/clothing_layer1_shirt_common_bottom_out_closed_front',
'**/clothing_layer1_shirt_common_bottom_in_closed_base',
'**/clothing_layer1_shirt_common_bottom_in_closed_front',
'**/clothing_layer1_shirt_common_collar_square',
'**/clothing_layer1_shirt_common_collar_v_high1',
'**/clothing_layer1_shirt_common_collar_v_high2',
'**/clothing_layer1_shirt_common_collar_v_low',
'**/clothing_layer1_shirt_common_veryshort_sleeve',
'**/clothing_layer1_shirt_common_short_sleeve',
'**/clothing_layer1_shirt_common_long_straight_sleeve',
'**/clothing_layer1_shirt_common_long_puffy_sleeve',
'**/clothing_layer1_pant_tucked_*',
'**/clothing_layer1_pant_untucked_*',
'**/clothing_layer1_pant_shorts_*',
'**/clothing_layer1_pant_short_*',
'**/clothing_layer1_pant_navy*',
'**/clothing_layer1_pant_eitc*',
'**/clothing_layer1_shoe_none_*',
'**/clothing_layer1_shoe_boot_tall_*',
'**/clothing_layer1_shoe_boot_short_*',
'**/clothing_layer1_shoe_boot_cuff_*',
'**/clothing_layer1_shoe_navy*',
'**/clothing_layer1_shoe_india_navy*',
'**/clothing_layer1_shirt_apron',
'**/clothing_layer1_pant_apron',
'**/clothing_layer1_pant_apron_skirt',
'**/clothing_layer1_hat_captain;+s',
'**/clothing_layer1_hat_tricorn;+s',
'**/clothing_layer1_hat_navy;+s',
'**/clothing_layer1_hat_india_navy;+s',
'**/clothing_layer1_hat_admiral;+s',
'**/clothing_layer1_hat_bandanna_full;+s',
'**/clothing_layer1_hat_bandanna_reg*;+s',
'**/clothing_layer1_hat_band_beanie;+s',
'**/clothing_layer1_hat_barbossa;+s',
'**/clothing_layer1_hat_barbossa_feather;+s',
'**/clothing_layer1_hat_french;+s',
'**/clothing_layer1_hat_french_feather;+s',
'**/clothing_layer1_hat_spanish;+s',
'**/clothing_layer1_hat_spanish_feather;+s',
'**/clothing_layer1_hat_french_1;+s',
'**/clothing_layer1_hat_french_2;+s',
'**/clothing_layer1_hat_french_3;+s',
'**/clothing_layer1_hat_spanish_1;+s',
'**/clothing_layer1_hat_spanish_2;+s',
'**/clothing_layer1_hat_spanish_3;+s',
'**/clothing_layer1_hat_land_1;+s',
'**/clothing_layer1_hat_land_2;+s',
'**/clothing_layer1_hat_land_3;+s',
'**/clothing_layer1_hat_holiday;+s',
'**/clothing_layer1_hat_party_1;+s',
'**/clothing_layer1_hat_party_2;+s',
'**/clothing_layer1_hat_GM;+s',
'**/clothing_layer1_shirt_common_collar_v_high3']
self.layer1LODs = []
for item in layer1List:
itemInfo = { }
for lod in [
'500',
'1000',
'2000']:
itemInfo[lod] = self.pirate.getLOD(lod).findAllMatches(item)
self.layer1LODs.append(itemInfo)
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_shirt_common_open_base'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_shirt_common_bottom_open_base'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_shirt_common_closed_base'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_shirt_common_closed_front'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_shirt_common_bottom_out_closed_base'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_shirt_common_bottom_out_closed_front'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_shirt_common_bottom_in_closed_base'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_shirt_common_bottom_in_closed_front'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_shirt_common_collar_square'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_shirt_common_collar_v_high1'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_shirt_common_collar_v_high2'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_shirt_common_collar_v_low'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_shirt_common_veryshort_sleeve'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_shirt_common_short_sleeve'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_shirt_common_long_straight_sleeve'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_shirt_common_long_puffy_sleeve'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_pant_tucked_*'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_pant_untucked_*'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_pant_shorts_*'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_pant_short_*'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_pant_navy*'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_pant_eitc*'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_shoe_none_*'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_shoe_boot_tall_*'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_shoe_boot_short_*'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_shoe_boot_cuff_*'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_shoe_navy*'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_shoe_india_navy*'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_shirt_apron'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_pant_apron'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_pant_apron_skirt'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_hat_captain;+s'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_hat_tricorn;+s'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_hat_navy;+s'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_hat_india_navy;+s'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_hat_admiral;+s'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_hat_bandanna_full;+s'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_hat_bandanna_reg;+s'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_hat_band_beanie;+s'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_hat_barbossa;+s'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_hat_barbossa_feather;+s'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_hat_french;+s'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_hat_french_feather;+s'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_hat_spanish;+s'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_hat_spanish_feather;+s'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_hat_french_1;+s'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_hat_french_2;+s'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_hat_french_3;+s'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_hat_spanish_1;+s'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_hat_spanish_2;+s'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_hat_spanish_3;+s'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_hat_land_1;+s'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_hat_land_2;+s'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_hat_land_3;+s'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_hat_holiday;+s'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_hat_party_1;+s'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_hat_party_2;+s'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_hat_GM;+s'))
self.clothingsLayer1.append(geom.findAllMatches('**/clothing_layer1_shirt_common_collar_v_high3'))
self.partLayer['SHIRT'] = self.clothingsLayer1
self.partLayer['PANT'] = self.clothingsLayer1
self.partLayer['SHOE'] = self.clothingsLayer1
self.partLayer['HAT'] = self.clothingsLayer1
self.clothesTextures = loader.loadModel('models/misc/male_clothes.bam')
self.clothingsShirt.append([
[]])
self.clothingsShirt.append([
[
2,
3,
4,
5,
9],
-1,
-2,
-3,
-4,
-5])
self.clothingsShirt.append([
[
2,
3,
4,
5,
9],
-1,
-2,
-3,
-4,
-5])
self.clothingsShirt.append([
[
2,
3,
4,
5,
10,
13],
-1,
-2,
-3,
-4,
-5,
-8,
-9,
-10,
-11])
self.clothingsShirt.append([
[
2,
3,
4,
5,
9,
13],
-1,
-2,
-3,
-4,
-5,
-8,
-9,
-10,
-11])
self.clothingsShirt.append([
[
0,
1,
13],
-1,
-2,
-8,
-9,
-10,
-11])
self.clothingsShirt.append([
[
2,
3,
4,
5,
11,
15],
-1,
-2,
-3,
-8,
-9,
-10,
-11,
-12,
-13])
self.clothingsShirt.append([
[
2,
3,
4,
5,
9,
14],
-1,
-2,
-3,
-4,
-5,
-8,
-9,
-10,
-11,
-12,
-13])
self.clothingsShirt.append([
[
0,
1,
14],
-1,
-2,
-8,
-9,
-10,
-11,
-12,
-13])
self.clothingsShirt.append([
[
28],
-1,
-2,
-3,
-4,
-8,
-9,
-10,
-11])
self.clothingsShirt.append([
[
2,
3,
4,
5,
9,
14],
-1,
-2,
-3,
-4,
-5,
-8,
-9,
-10,
-11,
-12,
-13])
self.clothingsShirt.append([
[
2,
3,
4,
5,
10,
15],
-1,
-2,
-3,
-4,
-5,
-6,
-8,
-9,
-10,
-11,
-12,
-13])
self.clothingsShirt.append([
[
2,
3,
4,
5,
15,
58],
-1,
-2,
-3,
-4,
-5,
-6,
-7,
-8,
-9,
-10,
-11,
-12,
-13])
self.clothingsHat.append([
[]])
self.clothingsHat.append([
[
31]])
self.clothingsHat.append([
[
32]])
self.clothingsHat.append([
[
33]])
self.clothingsHat.append([
[
34]])
self.clothingsHat.append([
[
35]])
self.clothingsHat.append([
[
36]])
self.clothingsHat.append([
[
37]])
self.clothingsHat.append([
[
38]])
self.clothingsHat.append([
[
39,
40]])
self.clothingsHat.append([
[
41,
42]])
self.clothingsHat.append([
[
43,
44]])
self.clothingsHat.append([
[
45]])
self.clothingsHat.append([
[
46]])
self.clothingsHat.append([
[
47]])
self.clothingsHat.append([
[
48]])
self.clothingsHat.append([
[
49]])
self.clothingsHat.append([
[
50]])
self.clothingsHat.append([
[
51]])
self.clothingsHat.append([
[
52]])
self.clothingsHat.append([
[
53]])
self.clothingsHat.append([
[
54]])
self.clothingsHat.append([
[
55]])
self.clothingsHat.append([
[
56]])
self.clothingsHat.append([
[
57]])
self.clothingsPant.append([
[
16],
-6,
-7,
-16,
-17])
self.clothingsPant.append([
[
17],
-6,
-7,
-16,
-17])
self.clothingsPant.append([
[
18],
-6,
-7])
self.clothingsPant.append([
[
19],
-6,
-7])
self.clothingsPant.append([
[
20],
-6,
-7,
-16,
-17,
-18,
-19,
-20,
-21])
self.clothingsPant.append([
[
21],
-6,
-7,
-16,
-17,
-18,
-19,
-20,
-21])
self.clothingsPant.append([
[
30,
29],
-6,
-7,
-16,
-17,
-18,
-19])
self.clothingsShoe.append([
[
22]])
self.clothingsShoe.append([
[
23],
-18,
-19,
-20,
-21])
self.clothingsShoe.append([
[
24],
-20,
-21])
self.clothingsShoe.append([
[
26],
-18,
-19,
-20,
-21])
self.clothingsShoe.append([
[
27],
-18,
-19,
-20,
-21])
self.clothingsShoe.append([
[
25],
-20,
-21,
-18,
-19])
layer2List = [
'**/clothing_layer2_vest_none*',
'**/clothing_layer2_vest_open*',
'**/clothing_layer2_vest_closed*',
'**/clothing_layer2_vest_long_closed*',
'**/clothing_layer2_belt_none*',
'**/clothing_layer2_belt_sash_reg_base',
'**/clothing_layer2_belt_sash_reg_front',
'**/clothing_layer2_belt_oval',
'**/clothing_layer2_belt_buckle_oval',
'**/clothing_layer2_belt_square',
'**/clothing_layer2_belt_buckle_square']
self.clothingsLayer2.append(geom.findAllMatches('**/clothing_layer2_vest_none*'))
self.clothingsLayer2.append(geom.findAllMatches('**/clothing_layer2_vest_open*'))
self.clothingsLayer2.append(geom.findAllMatches('**/clothing_layer2_vest_closed*'))
self.clothingsLayer2.append(geom.findAllMatches('**/clothing_layer2_vest_long_closed*'))
self.clothingsLayer2.append(geom.findAllMatches('**/clothing_layer2_belt_none*'))
self.clothingsLayer2.append(geom.findAllMatches('**/clothing_layer2_belt_sash_reg_base'))
self.clothingsLayer2.append(geom.findAllMatches('**/clothing_layer2_belt_sash_reg_front'))
self.clothingsLayer2.append(geom.findAllMatches('**/clothing_layer2_belt_oval'))
self.clothingsLayer2.append(geom.findAllMatches('**/clothing_layer2_belt_buckle_oval'))
self.clothingsLayer2.append(geom.findAllMatches('**/clothing_layer2_belt_square'))
self.clothingsLayer2.append(geom.findAllMatches('**/clothing_layer2_belt_buckle_square'))
self.partLayer['VEST'] = self.clothingsLayer2
self.partLayer['BELT'] = self.clothingsLayer2
self.clothingsVest.append([
[
0]])
self.clothingsVest.append([
[
1],
-1,
-2])
self.clothingsVest.append([
[
2],
-1,
-2,
-3])
self.clothingsVest.append([
[
3],
-1,
-2,
-3])
self.clothingsBelt.append([
[
4]])
self.clothingsBelt.append([
[
5,
6]])
self.clothingsBelt.append([
[
5,
6]])
self.clothingsBelt.append([
[
7,
8]])
self.clothingsBelt.append([
[
7,
8]])
self.clothingsBelt.append([
[
9,
10]])
self.clothingsBelt.append([
[
7,
8]])
self.clothingsBelt.append([
[
7,
8]])
self.clothingsBelt.append([
[
7,
8]])
self.clothingsBelt.append([
[
5,
6]])
self.clothingsBelt.append([
[
5,
6]])
self.clothingsBelt.append([
[
5,
6]])
self.clothingsBelt.append([
[
5,
6]])
self.clothingsBelt.append([
[
7,
8]])
self.clothingsBelt.append([
[
7,
8]])
self.clothingsBelt.append([
[
9,
10]])
self.clothingsBelt.append([
[
9,
10]])
self.clothingsBelt.append([
[
5,
6]])
self.clothingsBelt.append([
[
9,
10]])
self.clothingsBelt.append([
[
9,
10]])
self.clothingsBelt.append([
[
5,
6]])
self.clothingsBelt.append([
[
5,
6]])
self.clothingsBelt.append([
[
5,
6]])
self.clothingsBelt.append([
[
5,
6]])
self.clothingsBelt.append([
[
5,
6]])
self.clothingsBelt.append([
[
9,
10]])
self.clothingsBelt.append([
[
9,
10]])
self.clothingsBelt.append([
[
9,
10]])
self.clothingsBelt.append([
[
9,
10]])
self.clothingsLayer3.append(geom.findAllMatches('**/clothing_layer3_coat_none*'))
self.clothingsLayer3.append(geom.findAllMatches('**/clothing_layer3_coat_long*'))
self.clothingsLayer3.append(geom.findAllMatches('**/clothing_layer3_coat_short*'))
self.clothingsLayer3.append(geom.findAllMatches('**/clothing_layer3_coat_navy*'))
self.clothingsLayer3.append(geom.findAllMatches('**/clothing_layer3_coat_eitc*'))
layer3List = [
'**/clothing_layer3_coat_none*',
'**/clothing_layer3_coat_long*',
'**/clothing_layer3_coat_short*',
'**/clothing_layer3_coat_navy*',
'**/clothing_layer3_coat_eitc*']
if base.config.GetBool('want-gen-pics-buttons'):
self.clothesByType = {
'SHIRT': self.clothingsLayer1[:16] + | |
partition_hint=100, count_downsamplings=POPS,
impose_high_af_cutoff_here=not impose_high_af_cutoff_upfront)
ht = ht.join(possible_ht, 'outer')
return ht
def build_models(coverage_ht: hl.Table, trimers: bool = False, weighted: bool = False, half_cutoff = False,
) -> Tuple[Tuple[float, float], Dict[str, Tuple[float, float]]]:
keys = ['context', 'ref', 'alt', 'methylation_level', 'mu_snp']
cov_cutoff = (HIGH_COVERAGE_CUTOFF / half_cutoff) if half_cutoff else HIGH_COVERAGE_CUTOFF
all_high_coverage_ht = coverage_ht.filter(coverage_ht.exome_coverage >= cov_cutoff)
agg_expr = {
'observed_variants': hl.agg.sum(all_high_coverage_ht.variant_count),
'possible_variants': hl.agg.sum(all_high_coverage_ht.possible_variants)
}
for pop in POPS:
agg_expr[f'observed_{pop}'] = hl.agg.array_sum(all_high_coverage_ht[f'downsampling_counts_{pop}'])
high_coverage_ht = all_high_coverage_ht.group_by(*keys).aggregate(**agg_expr)
high_coverage_ht = annotate_variant_types(high_coverage_ht, not trimers)
plateau_models = build_plateau_models_pop(high_coverage_ht, weighted=weighted)
high_coverage_scale_factor = all_high_coverage_ht.aggregate(
hl.agg.sum(all_high_coverage_ht.variant_count) /
hl.agg.sum(all_high_coverage_ht.possible_variants * all_high_coverage_ht.mu_snp))
all_low_coverage_ht = coverage_ht.filter((coverage_ht.exome_coverage < cov_cutoff) &
(coverage_ht.exome_coverage > 0))
low_coverage_ht = all_low_coverage_ht.group_by(log_coverage=hl.log10(all_low_coverage_ht.exome_coverage)).aggregate(
low_coverage_obs_exp=hl.agg.sum(all_low_coverage_ht.variant_count) /
(high_coverage_scale_factor * hl.agg.sum(all_low_coverage_ht.possible_variants * all_low_coverage_ht.mu_snp)))
coverage_model = build_coverage_model(low_coverage_ht)
# TODO: consider weighting here as well
return coverage_model, plateau_models
def add_most_severe_csq_to_tc_within_ht(t):
annotation = t.vep.annotate(transcript_consequences=t.vep.transcript_consequences.map(
add_most_severe_consequence_to_consequence))
return t.annotate_rows(vep=annotation) if isinstance(t, hl.MatrixTable) else t.annotate(vep=annotation)
def take_one_annotation_from_tc_within_ht(t):
annotation = t.vep.annotate(transcript_consequences=t.vep.transcript_consequences[0])
return t.annotate_rows(vep=annotation) if isinstance(t, hl.MatrixTable) else t.annotate(vep=annotation)
def get_proportion_observed(exome_ht: hl.Table, context_ht: hl.Table, mutation_ht: hl.Table,
plateau_models: Dict[str, Tuple[float, float]], coverage_model: Tuple[float, float],
recompute_possible: bool = False, remove_from_denominator: bool = True,
custom_model: str = None, dataset: str = 'gnomad',
impose_high_af_cutoff_upfront: bool = True, half_cutoff = False) -> hl.Table:
exome_ht = add_most_severe_csq_to_tc_within_ht(exome_ht)
context_ht = add_most_severe_csq_to_tc_within_ht(context_ht)
if custom_model == 'syn_canonical':
context_ht = take_one_annotation_from_tc_within_ht(fast_filter_vep(context_ht))
context_ht = context_ht.transmute(transcript_consequences=context_ht.vep.transcript_consequences)
exome_ht = take_one_annotation_from_tc_within_ht(fast_filter_vep(exome_ht))
exome_ht = exome_ht.transmute(transcript_consequences=exome_ht.vep.transcript_consequences)
elif custom_model == 'worst_csq':
context_ht = process_consequences(context_ht)
context_ht = context_ht.transmute(worst_csq_by_gene=context_ht.vep.worst_csq_by_gene)
context_ht = context_ht.explode(context_ht.worst_csq_by_gene)
exome_ht = process_consequences(exome_ht)
exome_ht = exome_ht.transmute(worst_csq_by_gene=exome_ht.vep.worst_csq_by_gene)
exome_ht = exome_ht.explode(exome_ht.worst_csq_by_gene)
elif custom_model == 'tx_annotation':
tx_ht = load_tx_expression_data()
context_ht = context_ht.annotate(**tx_ht[context_ht.key])
context_ht = context_ht.explode(context_ht.tx_annotation)
tx_ht = load_tx_expression_data(context=False)
exome_ht = exome_ht.annotate(**tx_ht[exome_ht.key])
exome_ht = exome_ht.explode(exome_ht.tx_annotation)
elif custom_model == 'distance_to_splice':
context_ht = annotate_distance_to_splice(context_ht)
exome_ht = annotate_distance_to_splice(exome_ht)
# TODO:
# context_ht = context_ht.explode(context_ht.tx_annotation)
# exome_ht = exome_ht.explode(exome_ht.tx_annotation)
else:
context_ht = context_ht.transmute(transcript_consequences=context_ht.vep.transcript_consequences)
context_ht = context_ht.explode(context_ht.transcript_consequences)
exome_ht = exome_ht.transmute(transcript_consequences=exome_ht.vep.transcript_consequences)
exome_ht = exome_ht.explode(exome_ht.transcript_consequences)
context_ht, _ = annotate_constraint_groupings(context_ht, custom_model=custom_model)
exome_ht, grouping = annotate_constraint_groupings(exome_ht, custom_model=custom_model)
context_ht = context_ht.filter(hl.is_defined(context_ht.exome_coverage)).select(
'context', 'ref', 'alt', 'methylation_level', *grouping)
exome_ht = exome_ht.select(
'context', 'ref', 'alt', 'methylation_level', 'freq', 'pass_filters', *grouping)
af_cutoff = 0.001
freq_index = exome_ht.freq_index_dict.collect()[0][dataset]
def keep_criteria(ht):
crit = (ht.freq[freq_index].AC > 0) & ht.pass_filters & (ht.coverage > 0)
if impose_high_af_cutoff_upfront:
crit &= (ht.freq[freq_index].AF <= af_cutoff)
return crit
exome_join = exome_ht[context_ht.key]
if remove_from_denominator:
context_ht = context_ht.filter(hl.is_missing(exome_join) | keep_criteria(exome_join))
exome_ht = exome_ht.filter(keep_criteria(exome_ht))
possible_file = f'{root}/model/possible_data/possible_transcript_pop_{custom_model}.ht'
if recompute_possible:
ht = count_variants(context_ht, additional_grouping=grouping, partition_hint=2000, force_grouping=True)
ht = annotate_with_mu(ht, mutation_ht)
ht = ht.transmute(possible_variants=ht.variant_count)
ht = annotate_variant_types(ht.annotate(mu_agg=ht.mu_snp * ht.possible_variants))
model = hl.literal(plateau_models.total)[ht.cpg]
cov_cutoff = (HIGH_COVERAGE_CUTOFF / half_cutoff) if half_cutoff else HIGH_COVERAGE_CUTOFF
ann_expr = {
'adjusted_mutation_rate': ht.mu_agg * model[1] + model[0],
'coverage_correction': hl.case()
.when(ht.coverage == 0, 0)
.when(ht.coverage >= cov_cutoff, 1)
.default(coverage_model[1] * hl.log10(ht.coverage) + coverage_model[0])
}
for pop in POPS:
pop_model = hl.literal(plateau_models[pop])
slopes = hl.map(lambda f: f[ht.cpg][1], pop_model)
intercepts = hl.map(lambda f: f[ht.cpg][0], pop_model)
ann_expr[f'adjusted_mutation_rate_{pop}'] = ht.mu_agg * slopes + intercepts
ht = ht.annotate(**ann_expr)
ann_expr = {
'expected_variants': ht.adjusted_mutation_rate * ht.coverage_correction,
'mu': ht.mu_agg * ht.coverage_correction
}
for pop in POPS:
ann_expr[f'expected_variants_{pop}'] = ht[f'adjusted_mutation_rate_{pop}'] * ht.coverage_correction
ht = ht.annotate(**ann_expr)
ht.write(possible_file, True)
possible_variants_ht = hl.read_table(possible_file)
ht = count_variants(exome_ht, additional_grouping=grouping, partition_hint=2000, force_grouping=True,
count_downsamplings=POPS, impose_high_af_cutoff_here=not impose_high_af_cutoff_upfront)
ht = ht.join(possible_variants_ht, 'outer')
ht.write(f'{root}/model/possible_data/all_data_transcript_pop_{custom_model}.ht', True)
ht = hl.read_table(f'{root}/model/possible_data/all_data_transcript_pop_{custom_model}.ht')
grouping.remove('coverage')
agg_expr = {
'variant_count': hl.agg.sum(ht.variant_count),
'adjusted_mutation_rate': hl.agg.sum(ht.adjusted_mutation_rate),
'possible_variants': hl.agg.sum(ht.possible_variants),
'expected_variants': hl.agg.sum(ht.expected_variants),
'mu': hl.agg.sum(ht.mu)
}
for pop in POPS:
agg_expr[f'adjusted_mutation_rate_{pop}'] = hl.agg.array_sum(ht[f'adjusted_mutation_rate_{pop}'])
agg_expr[f'expected_variants_{pop}'] = hl.agg.array_sum(ht[f'expected_variants_{pop}'])
agg_expr[f'downsampling_counts_{pop}'] = hl.agg.array_sum(ht[f'downsampling_counts_{pop}'])
ht = ht.group_by(*grouping).partition_hint(1000).aggregate(**agg_expr)
return ht.annotate(obs_exp=ht.variant_count / ht.expected_variants)
def finalize_dataset(po_ht: hl.Table, keys: Tuple[str] = ('gene', 'transcript', 'canonical'),
n_partitions: int = 1000) -> hl.Table:
# This function aggregates over genes in all cases, as XG spans PAR and non-PAR X
po_ht = po_ht.repartition(n_partitions).persist()
# Getting classic LoF annotations (no LOFTEE)
classic_lof_annotations = hl.literal({'stop_gained', 'splice_donor_variant', 'splice_acceptor_variant'})
lof_ht_classic = po_ht.filter(classic_lof_annotations.contains(po_ht.annotation) &
((po_ht.modifier == 'HC') | (po_ht.modifier == 'LC')))
lof_ht_classic = collapse_lof_ht(lof_ht_classic, keys, False)
lof_ht_classic = lof_ht_classic.rename({x: f'{x}_classic' for x in list(lof_ht_classic.row_value)})
# Getting all LoF annotations (LOFTEE HC + OS)
lof_ht_classic_hc = po_ht.filter((po_ht.modifier == 'HC') | (po_ht.modifier == 'OS'))
lof_ht_classic_hc = collapse_lof_ht(lof_ht_classic_hc, keys, False)
lof_ht_classic_hc = lof_ht_classic_hc.rename({x: f'{x}_with_os' for x in list(lof_ht_classic_hc.row_value)})
# Getting all LoF annotations (LOFTEE HC)
lof_ht = po_ht.filter(po_ht.modifier == 'HC')
lof_ht = collapse_lof_ht(lof_ht, keys, False)
mis_ht = po_ht.filter(po_ht.annotation == 'missense_variant')
agg_expr = {
'obs_mis': hl.agg.sum(mis_ht.variant_count),
'exp_mis': hl.agg.sum(mis_ht.expected_variants),
'oe_mis': hl.agg.sum(mis_ht.variant_count) / hl.agg.sum(mis_ht.expected_variants),
'mu_mis': hl.agg.sum(mis_ht.mu),
'possible_mis': hl.agg.sum(mis_ht.possible_variants)
}
for pop in POPS:
agg_expr[f'exp_mis_{pop}'] = hl.agg.array_sum(mis_ht[f'expected_variants_{pop}'])
agg_expr[f'obs_mis_{pop}'] = hl.agg.array_sum(mis_ht[f'downsampling_counts_{pop}'])
mis_ht = mis_ht.group_by(*keys).aggregate(**agg_expr)
pphen_mis_ht = po_ht.filter(po_ht.modifier == 'probably_damaging')
pphen_mis_ht = pphen_mis_ht.group_by(*keys).aggregate(obs_mis_pphen=hl.agg.sum(pphen_mis_ht.variant_count),
exp_mis_pphen=hl.agg.sum(pphen_mis_ht.expected_variants),
oe_mis_pphen=hl.agg.sum(pphen_mis_ht.variant_count) / hl.agg.sum(pphen_mis_ht.expected_variants),
possible_mis_pphen=hl.agg.sum(pphen_mis_ht.possible_variants))
syn_ht = po_ht.filter(po_ht.annotation == 'synonymous_variant').key_by(*keys)
agg_expr = {
'obs_syn': hl.agg.sum(syn_ht.variant_count),
'exp_syn': hl.agg.sum(syn_ht.expected_variants),
'oe_syn': hl.agg.sum(syn_ht.variant_count) / hl.agg.sum(syn_ht.expected_variants),
'mu_syn': hl.agg.sum(syn_ht.mu),
'possible_syn': hl.agg.sum(syn_ht.possible_variants)
}
for pop in POPS:
agg_expr[f'exp_syn_{pop}'] = hl.agg.array_sum(syn_ht[f'expected_variants_{pop}'])
agg_expr[f'obs_syn_{pop}'] = hl.agg.array_sum(syn_ht[f'downsampling_counts_{pop}'])
syn_ht = syn_ht.group_by(*keys).aggregate(**agg_expr)
ht = lof_ht_classic.annotate(**mis_ht[lof_ht_classic.key], **pphen_mis_ht[lof_ht_classic.key],
**syn_ht[lof_ht_classic.key], **lof_ht[lof_ht_classic.key],
**lof_ht_classic_hc[lof_ht_classic.key])
syn_cis = oe_confidence_interval(ht, ht.obs_syn, ht.exp_syn, prefix='oe_syn')
mis_cis = oe_confidence_interval(ht, ht.obs_mis, ht.exp_mis, prefix='oe_mis')
lof_cis = oe_confidence_interval(ht, ht.obs_lof, ht.exp_lof, prefix='oe_lof')
ht = ht.annotate(**syn_cis[ht.key], **mis_cis[ht.key], **lof_cis[ht.key])
return calculate_all_z_scores(ht) # .annotate(**oe_confidence_interval(ht, ht.obs_lof, ht.exp_lof)[ht.key])
def collapse_lof_ht(lof_ht: hl.Table, keys: Tuple[str], calculate_pop_pLI: bool = False) -> hl.Table:
agg_expr = {
'obs_lof': hl.agg.sum(lof_ht.variant_count),
'mu_lof': hl.agg.sum(lof_ht.mu),
'possible_lof': hl.agg.sum(lof_ht.possible_variants),
'exp_lof': hl.agg.sum(lof_ht.expected_variants)
}
for pop in POPS:
agg_expr[f'exp_lof_{pop}'] = hl.agg.array_sum(lof_ht[f'expected_variants_{pop}'])
agg_expr[f'obs_lof_{pop}'] = hl.agg.array_sum(lof_ht[f'downsampling_counts_{pop}'])
lof_ht = lof_ht.group_by(*keys).aggregate(**agg_expr).persist()
lof_ht = lof_ht.filter(lof_ht.exp_lof > 0)
if calculate_pop_pLI:
pop_lengths = get_all_pop_lengths(lof_ht, 'obs_lof_')
print(pop_lengths)
for pop_length, pop in pop_lengths:
print(f'Calculating pLI for {pop}...')
plis = []
for i in range(8, pop_length):
print(i)
ht = lof_ht.filter(lof_ht[f'exp_lof_{pop}'][i] > 0)
pli_ht = pLI(ht, ht[f'obs_lof_{pop}'][i], ht[f'exp_lof_{pop}'][i])
plis.append(pli_ht[lof_ht.key])
lof_ht = lof_ht.annotate(**{
f'pLI_{pop}': [pli.pLI for pli in plis],
f'pRec_{pop}': [pli.pRec for pli in plis],
f'pNull_{pop}': [pli.pNull for pli in plis],
})
return lof_ht.annotate(
**pLI(lof_ht, lof_ht.obs_lof, lof_ht.exp_lof)[lof_ht.key],
oe_lof=lof_ht.obs_lof / lof_ht.exp_lof).key_by(*keys)
def annotate_constraint_groupings(ht: Union[hl.Table, hl.MatrixTable],
custom_model: str = None) -> Tuple[Union[hl.Table, hl.MatrixTable], List[str]]:
"""
HT must be exploded against whatever axis
Need to add `'coverage': ht.exome_coverage` here (which will get corrected out later)
"""
if custom_model == 'worst_csq':
groupings = {
'annotation': ht.worst_csq_by_gene.most_severe_consequence,
'modifier': hl.case()
.when(hl.is_defined(ht.worst_csq_by_gene.lof),
ht.worst_csq_by_gene.lof)
.when(hl.is_defined(ht.worst_csq_by_gene.polyphen_prediction),
ht.worst_csq_by_gene.polyphen_prediction)
.default('None'),
'gene': ht.worst_csq_by_gene.gene_symbol,
'coverage': ht.exome_coverage
}
elif custom_model == 'tx_annotation':
groupings = {
'annotation': ht.tx_annotation.csq,
'modifier': ht.tx_annotation.lof,
'gene': ht.tx_annotation.symbol,
'expressed': hl.case(missing_false=True).when(
ht.tx_annotation.mean_expression >= 0.9, 'high').when(
ht.tx_annotation.mean_expression > 0.1, 'medium').when(
hl.is_defined(ht.tx_annotation.mean_expression), 'low').default('missing'),
'coverage': ht.exome_coverage
}
else:
groupings = {
'annotation': ht.transcript_consequences.most_severe_consequence,
'modifier': hl.case()
.when(hl.is_defined(ht.transcript_consequences.lof),
ht.transcript_consequences.lof)
.when(hl.is_defined(ht.transcript_consequences.polyphen_prediction),
ht.transcript_consequences.polyphen_prediction)
.default('None'),
'transcript': ht.transcript_consequences.transcript_id,
'gene': ht.transcript_consequences.gene_symbol,
'canonical': hl.or_else(ht.transcript_consequences.canonical == 1, False),
'coverage': ht.exome_coverage
}
if custom_model == 'splice_region':
groupings['distance_splice'] = ht.transcript_consequences
ht = ht.annotate(**groupings) if isinstance(ht, hl.Table) else ht.annotate_rows(**groupings)
return ht, list(groupings.keys())
# Model building
def build_coverage_model(coverage_ht: hl.Table) -> (float, float):
"""
Calibrates coverage model (returns intercept and slope)
"""
return tuple(coverage_ht.aggregate(hl.agg.linreg(coverage_ht.low_coverage_obs_exp, [1, coverage_ht.log_coverage])).beta)
def build_plateau_models(ht: hl.Table, weighted: bool = False) -> Dict[str, Tuple[float, float]]:
"""
Calibrates high coverage model (returns intercept and slope)
"""
# TODO: try square weighting
ht = ht.annotate(high_coverage_proportion_observed=ht.observed_variants / ht.possible_variants)
return ht.aggregate(hl.agg.group_by(ht.cpg,
hl.agg.linreg(ht.high_coverage_proportion_observed, [1, ht.mu_snp],
weight=ht.possible_variants if weighted else None)
).map_values(lambda x: x.beta))
def build_plateau_models_pop(ht: hl.Table, weighted: bool = False) -> Dict[str, Tuple[float, float]]:
"""
Calibrates high coverage model (returns intercept and slope)
"""
pop_lengths = get_all_pop_lengths(ht)
agg_expr = {
pop: [hl.agg.group_by(ht.cpg,
hl.agg.linreg(ht[f'observed_{pop}'][i] / ht.possible_variants, [1, ht.mu_snp],
weight=ht.possible_variants if weighted else None)
).map_values(lambda x: x.beta) for i in range(length)]
for length, pop in pop_lengths
}
agg_expr['total'] = hl.agg.group_by(ht.cpg,
hl.agg.linreg(ht.observed_variants / ht.possible_variants, [1, ht.mu_snp],
weight=ht.possible_variants if weighted else None)
).map_values(lambda x: x.beta)
return ht.aggregate(hl.struct(**agg_expr))
def get_all_pop_lengths(ht, prefix: str = 'observed_', pops: List[str] = POPS, skip_assertion: bool = False):
ds_lengths = ht.aggregate([hl.agg.min(hl.len(ht[f'{prefix}{pop}'])) for pop in pops])
# temp_ht = ht.take(1)[0]
# ds_lengths = [len(temp_ht[f'{prefix}{pop}']) for pop in pops]
pop_lengths = list(zip(ds_lengths, pops))
print('Found: ', pop_lengths)
if not skip_assertion:
assert ht.all(hl.all(lambda f: f, [hl.len(ht[f'{prefix}{pop}']) == length for length, pop in pop_lengths]))
return pop_lengths
def get_downsamplings(ht):
freq_meta = ht.freq_meta.collect()[0]
downsamplings = [(i, int(x.get('downsampling'))) for i, x in enumerate(freq_meta)
if x.get('group') == 'adj' and x.get('pop') == 'global'
and x.get('downsampling') is not None]
return downsamplings
# Plotting
def old_new_compare(source, axis_type='log'):
p1 = figure(title="Mutation rate comparison", x_axis_type=axis_type, y_axis_type=axis_type, tools=TOOLS)
p1.xaxis.axis_label = 'Old mutation rate'
p1.yaxis.axis_label = 'New mutation rate'
p1.scatter(x='old_mu_snp', y='mu_snp', fill_color='colors', line_color='colors', legend='variant_type',
source=source)
p1.select_one(HoverTool).tooltips = [(x, f'@{x}') for x in
('context', 'ref', 'alt', 'methylation_level', 'old_mu_snp', 'mu_snp') if
x in list(source.data)]
# p1.ray(x=[1e-9], y=[1e-9], length=0, angle=[45], angle_units="deg", color="#FB8072", line_width=2)
p1.legend.location = "top_left"
return p1
def pLI(ht: hl.Table, obs: hl.expr.Int32Expression, exp: hl.expr.Float32Expression) -> hl.Table:
last_pi = {'Null': 0, 'Rec': 0, 'LI': 0}
pi = {'Null': 1 / 3, 'Rec': 1 / | |
from __future__ import print_function
from __future__ import unicode_literals
from builtins import str
import os
import sys
import subprocess
import string
import argparse
from time import gmtime, strftime
version = 'v2.0'
print('createImageSeq {}'.format(version))
print('python {}'.format(sys.version))
python3 = sys.version_info > (3, 0)
nodate_prefix = 'nodate_'
stand_alone_testing = False
def conv_command(fileName, fps, hor, vert, targetDirName, start_given, stop_given):
inFile = os.path.join(movieDir, fileName)
outFiles = os.path.join(sequenceParentDir, targetDirName, 'image-%05d.png')
return [libavpath,
'-loglevel', 'quiet',
'-i', inFile,
'-r', str(fps),
'-s', str(hor) + 'x' + str(vert),
'-ss', str(start_given),
'-t', str(stop_given - start_given),
'-f', 'image2',
outFiles]
def which(cmd, mode=os.F_OK | os.X_OK, path=None):
"""Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
of os.environ.get("PATH"), or can be overridden with a custom search
path.
Copied from 3.6 source referred to at https://docs.python.org/3.6/library/shutil.html,
also works for 2.7.
"""
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(fn, mode):
return (os.path.exists(fn) and os.access(fn, mode)
and not os.path.isdir(fn))
# If we're given a path with a directory part, look it up directly rather
# than referring to PATH directories. This includes checking relative to the
# current directory, e.g. ./script
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
if path is None:
path = os.environ.get('PATH', os.defpath)
if not path:
return None
path = path.split(os.pathsep)
if sys.platform == u"win32":
# The current directory takes precedence on Windows.
if not os.curdir in path:
path.insert(0, os.curdir)
# PATHEXT is necessary to check on Windows.
pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
# See if the given file matches any of the expected path extensions.
# This will allow us to short circuit when given "python.exe".
# If it does match, only test that one, otherwise we have to try
# others.
if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
files = [cmd]
else:
files = [cmd + ext for ext in pathext]
else:
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
for dir in path:
normdir = os.path.normcase(dir)
if not normdir in seen:
seen.add(normdir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
return name
return None
parser = argparse.ArgumentParser()
parser.add_argument('-moviepath')
parser.add_argument('-imagepath')
parser.add_argument('-libavpath')
parser.add_argument('-exiftoolpath')
parser.add_argument('-x')
parser.add_argument('-y')
parser.add_argument('-fps')
parser.add_argument('-nsec')
parser.add_argument('-start')
parser.add_argument('-stop')
parser.add_argument('-ext', nargs='*')
parser.add_argument('-verbose')
parser.add_argument('-logfile')
if stand_alone_testing:
from argparse import Namespace
args = Namespace()
args.moviepath = 'Movies'
args.imagepath = 'ImageSequences'
args.libavpath = 'avconv'
args.exiftoolpath = 'exiftool'
args.x = '1920'
args.y = '1080'
args.fps = '15'
args.nsec = '2'
args.start = 'NULL'
args.stop = 'NULL'
args.ext = ['mts', 'avi']
args.verbose = 'TRUE'
args.logfile = 'TRUE'
else:
args = parser.parse_args()
if args.logfile not in ['FALSE', 'TRUE']:
print('Unknown value %s for logfile. Converting to FALSE' % args.verbose)
logtofile = False
else:
logtofile = args.logfile == 'TRUE'
if logtofile:
timeofday = gmtime()
logfilename = os.path.join(os.getcwd(), 'trackdem_' + strftime("%Y-%m-%d_%H-%M-%S", timeofday) + '.log')
logfile = open(logfilename, 'w')
logfile.write('CreateImageSeq ' + version + ' log created on ' + strftime("%Y-%m-%d %H:%M:%S", timeofday) + '\n\n')
print('Writing to log file ' + logfilename)
if logtofile:
logfile.write('args = %s\n\n' % str(args))
# process arguments
libavpath = args.libavpath
exiftoolpath = args.exiftoolpath
if logtofile:
logfile.write('Calling "which" with command: "%s"\n\n' % 'libavpath')
avconv = which(libavpath)
if logtofile:
logfile.write('Calling "which" with command: "%s"\n\n' % 'exiftool')
exiftool = which(exiftoolpath)
if avconv:
if logtofile:
logfile.write('Using avconv found at %s\n\n' % avconv)
else:
if logtofile:
logfile.write('Cannot find avconv executable, exiting\n')
print('Cannot find avconv executable, specify with "libavpath="')
print('Cannot continue, aborting execution')
exit(1)
if exiftool:
if logtofile:
logfile.write('Using exiftool found at %s\n\n' % which('exiftool'))
else:
if logtofile:
logfile.write('Cannnot find exiftool executable, falling back to defaults for folder names and duration\n\n')
print('Cannot find exiftool executable, specify with "exiftoolpath="')
print('Falling back to defaults for folder names and duration')
if args.moviepath == 'NULL':
if logtofile:
logfile.write('moviepath must not be NULL, exiting\n')
print('moviepath must not be NULL. Stop')
exit(1)
else:
moviepath = args.moviepath
if args.imagepath == 'NULL':
if logtofile:
logfile.write('imagepath must not be NULL, exiting\n')
print('imagepath must not be NULL. Stop')
exit(1)
else:
imagepath = args.imagepath
if args.x == 'NULL':
if logtofile:
logfile.write('x must not be NULL, exiting\n')
print('x must not be NULL. Stop')
exit(1)
else:
x = args.x
if args.y == 'NULL':
if logtofile:
logfile.write('y must not be NULL, exiting\n')
print('y must not be NULL. Stop')
exit(1)
else:
y = args.y
if args.fps == 'NULL':
if logtofile:
logfile.write('fps must not be NULL, exiting\n')
print('fps must not be NULL. Stop')
exit(1)
else:
fps = args.fps
if args.nsec == 'NULL':
nsec = None
else:
nsec = float(args.nsec)
if args.start == 'NULL':
start_given = None
else:
start_given = float(args.start)
if args.stop == 'NULL':
stop_given = None
else:
stop_given = float(args.stop)
if args.ext == ['NULL']:
if logtofile:
logfile.write('ext must not be NULL, exiting\n')
print('ext must not be NULL. Stop')
exit(1)
else:
extensions = []
for e in args.ext:
extensions.append(e.lower())
if args.verbose not in ['FALSE', 'TRUE']:
if logtofile:
logfile.write('Unknown value %s for verbose. Converting to FALSE\n\n')
print('Unknown value %s for verbose. Converting to FALSE' % args.verbose)
verbose = False
else:
verbose = args.verbose == 'TRUE'
if start_given and stop_given and not start_given < stop_given:
if logtofile:
logfile.write('Error: start (%.2f) must be smaller than stop (%.2f), exiting\n' % (start_given, stop_given))
print('Error: start (%.2f) must be smaller than stop (%.2f). Stop.' % (start_given, stop_given))
exit(1)
if start_given and start_given < 0:
if logtofile:
logfile.write('Error: start must not be negative, exiting\n' % (start_given, stop_given))
print('Error: start must not be negative. Stop.')
exit(1)
if verbose:
print('createImageSec %s' % version)
movieDir = os.path.abspath(moviepath)
sequenceParentDir = os.path.abspath(imagepath)
if logtofile:
logfile.write('Running script with values: localdir: %s, moviedir: ./%s, sequencedir: ./%s, x: %s, y: %s, fps: %s, nsec: %s, start_given: %s, stop_given: %s, extensions: %s\n\n' % (
os.getcwd(), moviepath, imagepath, x, y, fps, nsec, start_given, stop_given, extensions
))
def getDuration(filePath):
if exiftool:
try:
durationRawFirst = subprocess.check_output([
exiftool,
'-api', 'LargeFileSupport',
'-n',
'-s3',
'-duration',
filePath
], stderr=open('/dev/null')).split()[0]
if python3:
# print('durationRawFirst: ', durationRawFirst, type(durationRawFirst))
duration = float(durationRawFirst.decode())
else:
# print('durationRawFirst: ', durationRawFirst, type(durationRawFirst))
duration = float(str(durationRawFirst))
# print(duration)
except subprocess.CalledProcessError as e:
if logtofile:
logfile.write('Error in determining duration for file %s: %s\n\n' % (filePath, e))
duration = None
else:
duration = None
return duration
def getTargetDirNamePrefix(fileName):
if logtofile:
logfile.write('Entering getTargetDirNamePrefix with fileName = "%s"\n\n' % fileName)
if exiftool:
try:
targetDirNamePrefixFromExif = subprocess.check_output([
exiftool,
'-api', 'LargeFileSupport',
'-DateTimeOriginal',
'-T',
os.path.join(movieDir, fileName)
], stderr=open('/dev/null'))
if python3:
targetDirNamePrefixFromExif = targetDirNamePrefixFromExif.decode()
# print(targetDirNamePrefixFromExif, type(targetDirNamePrefixFromExif))
targetDirNamePrefixRaw = str(targetDirNamePrefixFromExif).strip().strip('-')
# Note: '-' is sometimes returned by exiftool if DateTimeOriginal field empty
if logtofile:
logfile.write('Got targetDirNamePrefixRaw = "%s"\n\n' % targetDirNamePrefixRaw)
if targetDirNamePrefixRaw == '':
targetDirNamePrefix = nodate_prefix
else:
targetDirNamePrefixFirst = targetDirNamePrefixRaw.split()[0]
targetDirNamePrefix = ''.join([c for c in targetDirNamePrefixFirst if c != ':']) + '_'
except subprocess.CalledProcessError as e:
if logtofile:
logfile.write('Error in determining date for file %s: %s\n\n' % (os.path.join(movieDir, fileName), e))
logfile.write('Got raw prefix: "%s"\n\n' % targetDirNamePrefixRaw)
targetDirNamePrefix = nodate_prefix
if not len(targetDirNamePrefix) == 8 and targetDirNamePrefix.isdigit():
targetDirNamePrefix = nodate_prefix
return targetDirNamePrefix
else:
return nodate_prefix
movieNames = []
if logtofile:
logfile.write('Found the following entries in moviedir (%s):\n\n+++\n\n' % movieDir)
for fileName in os.listdir(movieDir):
if logtofile:
logfile.write('%s\n' % fileName)
movieName, movieExtension = os.path.splitext(fileName)
if not (os.path.isfile(os.path.join(movieDir, fileName))
and movieExtension[1:].lower() in extensions):
if logtofile:
logfile.write('File %s has the wrong name or is a directory, skipping file\n\n' % fileName)
else:
movieNames.append(fileName)
if logtofile:
logfile.write('\n+++\n\n')
if not os.path.exists(sequenceParentDir):
os.mkdir(sequenceParentDir)
if start_given and stop_given and nsec:
if logtofile:
logfile.write('both start and stop given: nsec ignored\n\n')
if verbose:
print('Warning - both start and stop given: nsec ignored.')
for fileName in movieNames:
movieName, movieExtension = os.path.splitext(fileName)
targetDirNamePrefix = getTargetDirNamePrefix(fileName)
targetDirName = targetDirNamePrefix + fileName
if os.path.exists(os.path.join(sequenceParentDir, targetDirName)):
if logtofile:
logfile.write('Folder %s already exists, file %s skipped\n\n' % (targetDirName, fileName))
if verbose:
print('Folder %s already exists, file %s skipped' % (targetDirName, fileName))
else:
if targetDirNamePrefix == nodate_prefix:
if logtofile:
logfile.write('File %s: exiftool cannot determine date\n\n' % fileName)
if verbose:
print('File %s: exiftool cannot determine date' % fileName)
if start_given:
filestart = start_given
else:
filestart = 0
if stop_given:
filestop = stop_given
else:
if python3:
filestop = sys.maxsize
else:
filestop = sys.maxint
duration = getDuration(os.path.join(movieDir, fileName))
if logtofile:
logfile.write('Duration %s found for file %s\n\n' % (duration, fileName))
if not duration:
if logtofile:
logfile.write('File %s: exiftool cannot determine length\n\n' % fileName)
if verbose:
print('File %s: exiftool cannot determine length.' % fileName)
if start_given and stop_given:
pass
elif start_given:
if nsec:
filestop = filestart + nsec
elif stop_given:
if nsec:
filestart = filestop - nsec
else:
if nsec:
if duration:
filestart = duration/2 - nsec/2
filestop = duration/2 + nsec/2
else:
filestop = nsec
if duration and not filestart < duration:
if logtofile:
logfile.write('File %s: Start (%.2f) should be smaller dan duration (%.2f sec). Skipping video\n\n' % (fileName, filestart, duration))
if verbose:
print('File %s: Start (%.2f) should be smaller dan duration (%.2f sec). Skipping video.' % (fileName, filestart, duration))
continue
filestart = max(filestart, 0)
if duration and filestop > duration:
if logtofile:
logfile.write('File %s: Requested end past end of file, setting end to %.2f\n\n' % (fileName, duration))
if verbose:
print('File %s: Requested end past end of file, setting end to %.2f' % (fileName, duration))
filestop = min(filestop, duration)
if filestop - filestart < nsec:
if logtofile:
logfile.write( 'File %s: Cannot convert requested duration (%.2f sec), converting %.2f sec\n\n' % (fileName, nsec, filestop-filestart))
if verbose:
print('File %s: Cannot convert requested duration (%.2f sec), converting %.2f sec.' % (fileName, nsec, filestop-filestart))
command = conv_command(fileName, fps, x, y, targetDirName, filestart, filestop)
if logtofile:
logfile.write('Command: %s\n\n' % command)
os.mkdir(os.path.join(sequenceParentDir, targetDirName))
try:
subprocess.call(command)
if duration:
if logtofile:
logfile.write('File %s: Converting with start: %.2f, stop: %.2f to folder %s\n\n' % (fileName, filestart, filestop, targetDirName))
if verbose:
print('File %s: Converting with start: %.2f, stop: %.2f to folder | |
a.sampling_interval,
sampling_rate=2 * a.sampling_rate)
npt.assert_equal(ts.Frequency(b.sampling_rate),
ts.Frequency(2 * a.sampling_rate))
npt.assert_equal(b.sampling_interval,
ts.TimeArray(0.5 * a.sampling_rate))
b = ts.UniformTime(duration=10,
sampling_interval=a.sampling_interval)
npt.assert_equal(b.sampling_rate, a.sampling_rate)
b = ts.UniformTime(duration=10,
sampling_rate=a.sampling_rate)
npt.assert_equal(b.sampling_interval, a.sampling_interval)
# make sure the t0 ando other attribute is copied
a = ts.UniformTime(length=1, sampling_rate=1)
b = a.copy()
npt.assert_equal(b.duration, a.duration)
npt.assert_equal(b.sampling_rate, a.sampling_rate)
npt.assert_equal(b.sampling_interval, a.sampling_interval)
npt.assert_equal(b.t0, a.t0)
def test_UniformTime_repr():
"""
>>> time1 = ts.UniformTime(sampling_rate=1000,time_unit='ms',length=3)
>>> time1.sampling_rate
1000.0 Hz
>>> time1
UniformTime([ 0., 1., 2.], time_unit='ms')
>>> time2= ts.UniformTime(sampling_rate=1000,time_unit='s',length=3)
>>> time2.sampling_rate
1000.0 Hz
>>> time2
UniformTime([ 0. , 0.001, 0.002], time_unit='s')
>>> a = ts.UniformTime(length=5,sampling_rate=1,time_unit='ms')
>>> b = ts.UniformTime(a)
>>> b
UniformTime([ 0., 1000., 2000., 3000., 4000.], time_unit='ms')
>>> a
UniformTime([ 0., 1000., 2000., 3000., 4000.], time_unit='ms')
>>> b = ts.UniformTime(a,time_unit='s')
>>> b
UniformTime([ 0., 1., 2., 3., 4.], time_unit='s')
>>> a = ts.UniformTime(length=1,sampling_rate=2)
>>> b = ts.UniformTime(length=10,sampling_interval=a.sampling_interval)
>>> b.sampling_rate
2.0 Hz
"""
def test_Frequency():
"""Test frequency representation object"""
tuc = ts.time_unit_conversion
for unit in ['ns', 'ms', 's', None]:
f = ts.Frequency(1, time_unit=unit)
npt.assert_equal(f.to_period(), tuc[unit])
f = ts.Frequency(1000, time_unit=unit)
npt.assert_equal(f.to_period(), tuc[unit] / 1000)
f = ts.Frequency(0.001, time_unit=unit)
npt.assert_equal(f.to_period(), tuc[unit] * 1000)
def test_TimeSeries():
"""Testing the initialization of the uniform time series object """
#Test initialization with duration:
tseries1 = ts.TimeSeries([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], duration=10)
tseries2 = ts.TimeSeries([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], sampling_interval=1)
npt.assert_equal(tseries1.time, tseries2.time)
#downsampling:
t1 = ts.UniformTime(length=8, sampling_rate=2)
#duration is the same, but we're downsampling to 1Hz
tseries1 = ts.TimeSeries(data=[1, 2, 3, 4], time=t1, sampling_rate=1)
#If you didn't explicitely provide the rate you want to downsample to, that
#is an error:
with pytest.raises(ValueError) as e_info:
ts.TimeSeries(dict(data=[1, 2, 3, 4], time=t1))
tseries2 = ts.TimeSeries(data=[1, 2, 3, 4], sampling_rate=1)
tseries3 = ts.TimeSeries(data=[1, 2, 3, 4], sampling_rate=1000,
time_unit='ms')
#you can specify the sampling_rate or the sampling_interval, to the same
#effect, where specificying the sampling_interval is in the units of that
#time-series:
tseries4 = ts.TimeSeries(data=[1, 2, 3, 4], sampling_interval=1,
time_unit='ms')
npt.assert_equal(tseries4.time, tseries3.time)
#The units you use shouldn't matter - time is time:
tseries6 = ts.TimeSeries(data=[1, 2, 3, 4],
sampling_interval=0.001,
time_unit='s')
npt.assert_equal(tseries6.time, tseries3.time)
#And this too - perverse, but should be possible:
tseries5 = ts.TimeSeries(data=[1, 2, 3, 4],
sampling_interval=ts.TimeArray(0.001,
time_unit='s'),
time_unit='ms')
npt.assert_equal(tseries5.time, tseries3.time)
#initializing with a UniformTime object:
t = ts.UniformTime(length=3, sampling_rate=3)
data = [1, 2, 3]
tseries7 = ts.TimeSeries(data=data, time=t)
npt.assert_equal(tseries7.data, data)
data = [1, 2, 3, 4]
#If the data is not the right length, that should throw an error:
with pytest.raises(ValueError) as e_info:
ts.TimeSeries(dict(data=data, time=t))
# test basic arithmetics wiht TimeSeries
tseries1 = ts.TimeSeries([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], sampling_rate=1)
tseries2 = tseries1 + 1
npt.assert_equal(tseries1.data + 1, tseries2.data)
npt.assert_equal(tseries1.time, tseries2.time)
tseries2 -= 1
npt.assert_equal(tseries1.data, tseries2.data)
npt.assert_equal(tseries1.time, tseries2.time)
tseries2 = tseries1 * 2
npt.assert_equal(tseries1.data * 2, tseries2.data)
npt.assert_equal(tseries1.time, tseries2.time)
tseries2 = tseries2 / 2
npt.assert_equal(tseries1.data, tseries2.data)
npt.assert_equal(tseries1.time, tseries2.time)
tseries_nd1 = ts.TimeSeries(np.random.randn(3, 100), sampling_rate=1)
tseries_nd2 = ts.TimeSeries(np.random.randn(3, 100), sampling_rate=1)
npt.assert_equal((tseries_nd1 + tseries_nd2).data,
tseries_nd1.data + tseries_nd2.data)
npt.assert_equal((tseries_nd1 - tseries_nd2).data,
tseries_nd1.data - tseries_nd2.data)
npt.assert_equal((tseries_nd1 * tseries_nd2).data,
tseries_nd1.data * tseries_nd2.data)
npt.assert_equal((tseries_nd1 / tseries_nd2).data,
tseries_nd1.data / tseries_nd2.data)
def test_TimeSeries_repr():
"""
>>> t=ts.UniformTime(length=3,sampling_rate=3)
>>> tseries1 = ts.TimeSeries(data=[3,5,8],time=t)
>>> t.sampling_rate
3.0 Hz
>>> tseries1.sampling_rate
3.0 Hz
>>> tseries1 = ts.TimeSeries(data=[3,5,8],sampling_rate=3)
>>> tseries1.time
UniformTime([ 0. , 0.3333, 0.6667], time_unit='s')
>>> tseries1.sampling_rate
3.0 Hz
>>> tseries1.sampling_interval
0.333333333333 s
>>> a = ts.UniformTime(length=1,sampling_rate=2)
>>> b = ts.TimeSeries(data=[1,2,3],sampling_interval=a.sampling_interval)
>>> b.sampling_rate
2.0 Hz
>>> a = ts.UniformTime(length=1,sampling_rate=1)
>>> b = ts.TimeSeries(data=[1,2,3],sampling_interval=a.sampling_interval)
>>> b.sampling_rate
1.0 Hz
"""
def test_Epochs():
tms = ts.TimeArray(data=list(range(100)), time_unit='ms')
tmin = ts.TimeArray(data=list(range(100)), time_unit='m')
tsec = ts.TimeArray(data=list(range(100)), time_unit='s')
utms = ts.UniformTime(length=100, sampling_interval=1, time_unit='ms')
utmin = ts.UniformTime(length=100, sampling_interval=1, time_unit='m')
utsec = ts.UniformTime(length=100, sampling_interval=1, time_unit='s')
tsms = ts.TimeSeries(data=list(range(100)), sampling_interval=1, time_unit='ms')
tsmin = ts.TimeSeries(data=list(range(100)), sampling_interval=1, time_unit='m')
tssec = ts.TimeSeries(data=list(range(100)), sampling_interval=1, time_unit='s')
# one millisecond epoch
e1ms = ts.Epochs(0, 1, time_unit='ms')
e09ms = ts.Epochs(0.1, 1, time_unit='ms')
msg = "Seems like a problem with copy=False in TimeArray constructor."
npt.assert_equal(e1ms.duration, ts.TimeArray(1, time_unit='ms'), msg)
# one day
e1d = ts.Epochs(0, 1, time_unit='D')
npt.assert_equal(e1d.duration, ts.TimeArray(1, time_unit='D'), msg)
e1ms_ar = ts.Epochs([0, 0], [1, 1], time_unit='ms')
for t in [tms, tmin, tsec, utms, utmin, utsec]:
# the sample time arrays are all at least 1ms long, so this should
# return a timearray that has exactly one time point in it
npt.assert_equal(len(t.during(e1ms)), 1)
# this time epoch should not contain any point
npt.assert_equal(len(t.during(e09ms)), 0)
# make sure, slicing doesn't change the class
npt.assert_equal(type(t), type(t.during(e1ms)))
for t in [tsms, tsmin, tssec]:
# the sample time series are all at least 1ms long, so this should
# return a timeseries that has exactly one time point in it
npt.assert_equal(len(t.during(e1ms)), 1)
# make sure, slicing doesn't change the class
npt.assert_equal(type(t), type(t.during(e1ms)))
# same thing but now there's an array of epochs
e2 = ts.Epochs([0, 10], [10, 20], time_unit=t.time_unit)
# make sure, slicing doesn't change the class for array of epochs
npt.assert_equal(type(t), type(t.during(e2)))
# Indexing with an array of epochs (all of which are the same length)
npt.assert_equal(t[e2].data.shape, (2, 10))
npt.assert_equal(len(t.during(e2)), 10)
npt.assert_equal(t[e2].data.ndim, 2)
# check the data at some timepoints (a dimension was added)
npt.assert_equal(t[e2][0], (0, 10))
npt.assert_equal(t[e2][1], (1, 11))
# check the data for each epoch
npt.assert_equal(t[e2].data[0], list(range(10)))
npt.assert_equal(t[e2].data[1], list(range(10, 20)))
npt.assert_equal(t[e2].duration, e2[0].duration)
# slice with Epochs of different length (not supported for timeseries,
# raise error, though future jagged array implementation could go here)
ejag = ts.Epochs([0, 10], [10, 40], time_unit=t.time_unit)
# next line is the same as t[ejag]
with pytest.raises(ValueError) as e_info:
t.__getitem__(ejag)
# if an epoch lies entirely between samples in the timeseries,
# return an empty array
eshort = ts.Epochs(2.5, 2.7, time_unit=t.time_unit)
npt.assert_equal(len(t[eshort].data), 0)
e1ms_outofrange = ts.Epochs(200, 300, time_unit=t.time_unit)
# assert that with the epoch moved outside of the time range of our
# data, slicing with the epoch now yields an empty array
with pytest.raises(ValueError) as e_info:
t.during(dict(e=e1ms_outofrange))
# the sample timeseries are all shorter than a day, so this should
# raise an error (instead of padding, or returning a shorter than
# expected array.
with pytest.raises(ValueError) as e_info:
t.during(dict(e=e1d))
def test_basic_slicing():
t = ts.TimeArray(list(range(4)))
for x in range(3):
ep = ts.Epochs(.5,x+.5)
npt.assert_equal(len(t[ep]), x)
# epoch starts before timeseries
npt.assert_equal(len(t[ts.Epochs(-1,3)]), len(t)-1)
# epoch ends after timeseries
npt.assert_equal(len(t[ts.Epochs(.5,5)]), len(t)-1)
# epoch starts before and ends after timeseries
npt.assert_equal(len(t[ts.Epochs(-1,100)]), len(t))
ep = ts.Epochs(20,100)
npt.assert_equal(len(t[ep]), 0)
def test_Events():
# time has to be one-dimensional
with pytest.raises(ValueError) as e_info:
ts.Events(np.zeros((2, 2)))
t = ts.TimeArray([1, 2, 3], time_unit='ms')
x = [1, 2, 3]
y = [2, 4, 6]
z = [10., 20., 30.]
i0 = [0, 0, 1]
i1 = [0, 1, 2]
for unit in [None, 's', 'ns', 'D']:
# events with data
ev1 = ts.Events(t, time_unit=unit, i=x, j=y, k=z)
# events with indices
ev2 = ts.Events(t, time_unit=unit, indices=[i0, i1])
# events with indices and labels
ev3 = ts.Events(t, time_unit=unit, labels=['trial', 'other'],
indices=[i0, i1])
# Note that the length of indices and labels has to be identical:
with pytest.raises(ValueError) as e_info:
ts.Events(t, time_unit=unit,
labels=['trial', 'other'], indices=[i0]) # Only
# one of
# the
# indices!
# make sure the time is retained
npt.assert_equal(ev1.time, t)
npt.assert_equal(ev2.time, t)
# make sure time unit is correct
if unit is not None:
npt.assert_equal(ev1.time_unit, unit)
npt.assert_equal(ev2.time_unit, unit)
else:
npt.assert_equal(ev1.time_unit, t.time_unit)
npt.assert_equal(ev2.time_unit, t.time_unit)
# make sure we can extract data
npt.assert_equal(ev1.data['i'], x)
npt.assert_equal(ev1.data['j'], y)
npt.assert_equal(ev1.data['k'], z)
# make sure we can get the indices by label
npt.assert_equal(ev3.index.trial, i0)
npt.assert_equal(ev3.index.other, i1)
# make sure we can get the indices by position
npt.assert_equal(ev2.index.i0, i0)
npt.assert_equal(ev2.index.i1, i1)
#make sure slicing works
#one_event = ts.Events(t[[0]],time_unit=unit,i=[x[0]],j=[y[0]],k=[z[0]])
#regular indexing
npt.assert_equal(ev1[0].data['i'], x[0])
npt.assert_equal(ev1[0:2].data['i'], x[0:2])
# indexing w/ time
npt.assert_equal(ev1[0.].data['i'], x[0])
# indexing w/ epoch
ep = ts.Epochs(start=0, stop=1.5, time_unit='ms')
npt.assert_equal(ev1[ep].data['i'], x[0])
# fancy indexing (w/ boolean mask)
npt.assert_equal(ev1[ev3.index.trial == 0].data['j'], y[0:2])
# len() function is implemented and working
assert len(t) == len(ev1) == len(ev2) == len(ev3)
def test_Events_scalar():
t = ts.TimeArray(1, time_unit='ms')
i, j = 4, 5
ev = ts.Events(t, i=i, j=j)
# The semantics of scalar indexing into events are such that the returned
# value is always a new Events object (the mental model | |
#
# Copyright (c) 2014 Juniper Networks, Inc. All rights reserved.
#
"""
This file contains implementation of data model for SVC monitor
"""
from pysandesh.gen_py.sandesh.ttypes import SandeshLevel
from cfgm_common.vnc_db import DBBase
from cfgm_common import svc_info
class DBBaseSM(DBBase):
obj_type = __name__
def evaluate(self):
# Implement in the derived class
pass
class LoadbalancerSM(DBBaseSM):
_dict = {}
obj_type = 'loadbalancer'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.virtual_machine_interface = None
self.service_instance = None
self.loadbalancer_listeners = set()
self.last_sent = None
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.fq_name = obj['fq_name']
self.display_name = obj.get('display_name', None)
self.parent_uuid = obj['parent_uuid']
self.id_perms = obj.get('id_perms', None)
self.params = obj.get('loadbalancer_properties', None)
self.provider = obj.get('loadbalancer_provider', None)
self.update_single_ref('virtual_machine_interface', obj)
self.update_single_ref('service_instance', obj)
self.update_multiple_refs('loadbalancer_listener', obj)
# end update
def add(self):
self.last_sent = \
self._manager.loadbalancer_agent.loadbalancer_add(self)
# end add
def evaluate(self):
self.add()
# end evaluate
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
cls._manager.loadbalancer_agent.delete_loadbalancer(obj)
obj.update_single_ref('virtual_machine_interface', {})
obj.update_single_ref('service_instance', {})
obj.update_multiple_refs('loadbalancer_listener', {})
del cls._dict[uuid]
# end delete
# end class LoadbalancerSM
class LoadbalancerListenerSM(DBBaseSM):
_dict = {}
obj_type = 'loadbalancer_listener'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.loadbalancer = None
self.loadbalancer_pool = None
self.last_sent = None
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.display_name = obj.get('display_name', None)
self.parent_uuid = obj['parent_uuid']
self.id_perms = obj.get('id_perms', None)
self.params = obj.get('loadbalancer_listener_properties', None)
self.update_single_ref('loadbalancer', obj)
self.update_single_ref('loadbalancer_pool', obj)
# end update
def add(self):
self.last_sent = \
self._manager.loadbalancer_agent.listener_add(self)
# end add
def evaluate(self):
self.add()
# end evaluate
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
cls._manager.loadbalancer_agent.delete_listener(obj)
obj.update_single_ref('loadbalancer', {})
obj.update_single_ref('loadbalancer_pool', {})
del cls._dict[uuid]
# end delete
# end class LoadbalancerListenerSM
class LoadbalancerPoolSM(DBBaseSM):
_dict = {}
obj_type = 'loadbalancer_pool'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.members = set()
self.loadbalancer_healthmonitors = set()
self.service_instance = None
self.virtual_machine_interface = None
self.virtual_ip = None
self.loadbalancer_listener = None
self.loadbalancer_id = None
self.last_sent = None
self.custom_attributes = []
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.fq_name = obj['fq_name']
self.params = obj.get('loadbalancer_pool_properties', None)
self.provider = obj.get('loadbalancer_pool_provider', None)
kvpairs = obj.get('loadbalancer_pool_custom_attributes', None)
if kvpairs:
self.custom_attributes = kvpairs.get('key_value_pair', [])
self.members = set([lm['uuid']
for lm in obj.get('loadbalancer_members', [])])
self.id_perms = obj.get('id_perms', None)
self.parent_uuid = obj['parent_uuid']
self.display_name = obj.get('display_name', None)
self.update_single_ref('service_instance', obj)
self.update_single_ref('virtual_ip', obj)
self.update_single_ref('loadbalancer_listener', obj)
self.update_single_ref('virtual_machine_interface', obj)
self.update_multiple_refs('loadbalancer_healthmonitor', obj)
# end update
def add(self):
if self.loadbalancer_listener:
ll_obj = LoadbalancerListenerSM.get(self.loadbalancer_listener)
self.loadbalancer_id = ll_obj.loadbalancer
self.last_sent = \
self._manager.loadbalancer_agent.loadbalancer_pool_add(self)
if len(self.members):
for member in self.members:
member_obj = LoadbalancerMemberSM.get(member)
if member_obj:
member_obj.last_sent = \
self._manager.loadbalancer_agent.loadbalancer_member_add(
member_obj)
if self.virtual_ip:
vip_obj = VirtualIpSM.get(self.virtual_ip)
if vip_obj:
vip_obj.last_sent = \
self._manager.loadbalancer_agent.virtual_ip_add(vip_obj)
# end add
def evaluate(self):
self.add()
# end evaluate
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
cls._manager.loadbalancer_agent.delete_loadbalancer_pool(obj)
obj.update_single_ref('service_instance', {})
obj.update_single_ref('virtual_ip', {})
obj.update_single_ref('loadbalancer_listener', {})
obj.update_single_ref('virtual_machine_interface', {})
obj.update_multiple_refs('loadbalancer_healthmonitor', {})
del cls._dict[uuid]
# end delete
# end class LoadbalancerPoolSM
class LoadbalancerMemberSM(DBBaseSM):
_dict = {}
obj_type = 'loadbalancer_member'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.loadbalancer_pool = {}
self.last_sent = None
self.update(obj_dict)
if self.loadbalancer_pool:
parent = LoadbalancerPoolSM.get(self.loadbalancer_pool)
parent.members.add(self.uuid)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.params = obj.get('loadbalancer_member_properties', None)
self.loadbalancer_pool = self.get_parent_uuid(obj)
self.id_perms = obj.get('id_perms', None)
# end update
def evaluate(self):
pass
# end evaluate
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
cls._manager.loadbalancer_agent.delete_loadbalancer_member(obj)
if obj.loadbalancer_pool:
parent = LoadbalancerPoolSM.get(obj.loadbalancer_pool)
if parent:
parent.members.discard(obj.uuid)
del cls._dict[uuid]
# end delete
# end class LoadbalancerMemberSM
class VirtualIpSM(DBBaseSM):
_dict = {}
obj_type = 'virtual_ip'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.virtual_machine_interface = None
self.loadbalancer_pool = None
self.last_sent = None
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.params = obj.get('virtual_ip_properties', None)
self.update_single_ref('virtual_machine_interface', obj)
self.update_single_ref('loadbalancer_pool', obj)
self.id_perms = obj.get('id_perms', None)
self.parent_uuid = obj['parent_uuid']
self.display_name = obj.get('display_name', None)
# end update
def evaluate(self):
pass
# end evaluate
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
cls._manager.loadbalancer_agent.delete_virtual_ip(obj)
obj.update_single_ref('virtual_machine_interface', {})
obj.update_single_ref('loadbalancer_pool', {})
del cls._dict[uuid]
# end delete
# end class VirtualIpSM
class HealthMonitorSM(DBBaseSM):
_dict = {}
obj_type = 'loadbalancer_healthmonitor'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.loadbalancer_pools = set()
self.last_sent = None
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.params = obj.get('loadbalancer_healthmonitor_properties', None)
self.update_multiple_refs('loadbalancer_pool', obj)
self.id_perms = obj.get('id_perms', None)
self.parent_uuid = obj['parent_uuid']
self.display_name = obj.get('display_name', None)
self.last_sent = self._manager.loadbalancer_agent.update_hm(self)
# end update
def evaluate(self):
pass
# end evaluate
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_multiple_refs('loadbalancer_pool', {})
del cls._dict[uuid]
# end delete
# end class HealthMonitorSM
class VirtualMachineSM(DBBaseSM):
_dict = {}
obj_type = 'virtual_machine'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.service_instance = None
self.service_id = None
self.virtual_router = None
self.virtual_machine_interfaces = set()
self.virtualization_type = None
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.fq_name = obj['fq_name']
self.update_single_ref('service_instance', obj)
self.update_single_ref('virtual_router', obj)
self.update_multiple_refs('virtual_machine_interface', obj)
if self.service_instance:
self.service_id = self.service_instance
self.display_name = obj.get('display_name', None)
if self.display_name is None:
return
display_list = self.display_name.split('__')
if self.service_instance:
if len(display_list) == 5:
self.virtualization_type = display_list[-1]
self.proj_fq_name = display_list[0:2]
self.index = int(display_list[-2]) - 1
else:
self.index = -1
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_single_ref('service_instance', {})
obj.update_single_ref('virtual_router', {})
obj.update_multiple_refs('virtual_machine_interface', {})
del cls._dict[uuid]
# end delete
def evaluate(self):
if self.service_id and not self.service_instance:
self._manager.delete_service_instance(self)
# end VirtualMachineSM
class VirtualRouterSM(DBBaseSM):
_dict = {}
obj_type = 'virtual_router'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.agent_state = False
self.agent_down_count = 0
self.virtual_machines = set()
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.fq_name = obj['fq_name']
self.update_multiple_refs('virtual_machine', obj)
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_multiple_refs('virtual_machine', {})
del cls._dict[uuid]
# end delete
def set_agent_state(self, up):
if up:
self.agent_down_count = 0
self.agent_state = True
else:
self.agent_down_count += 1
if not (self.agent_down_count % 3):
self.agent_state = False
def set_netns_version(self, netns_version):
self.netns_version = netns_version
# end VirtualRouterSM
class VirtualMachineInterfaceSM(DBBaseSM):
_dict = {}
obj_type = 'virtual_machine_interface'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.params = None
self.if_type = None
self.virtual_ip = None
self.loadbalancer = None
self.virtual_network = None
self.virtual_machine = None
self.loadbalancer_pool = None
self.logical_interface = None
self.instance_ips = set()
self.floating_ips = set()
self.interface_route_tables = set()
self.service_health_checks = set()
self.security_groups = set()
self.service_instance = None
self.instance_id = None
self.physical_interface = None
self.port_tuple = None
self.fat_flow_ports = set()
self.aaps = None
obj_dict = self.update(obj_dict)
self.add_to_parent(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.fq_name = obj['fq_name']
if obj.get('virtual_machine_interface_properties', None):
self.params = obj['virtual_machine_interface_properties']
self.if_type = self.params.get('service_interface_type', None)
self.aaps = obj.get('virtual_machine_interface_allowed_address_pairs', None)
if self.aaps:
self.aaps = self.aaps.get('allowed_address_pair', None)
self.fat_flow_ports.clear()
ffps = obj.get('virtual_machine_interface_fat_flow_protocols', None)
if ffps:
for ffp in ffps.get('fat_flow_protocol', []):
if ffp['port']:
self.fat_flow_ports.add(ffp['port'])
self.update_single_ref('virtual_ip', obj)
self.update_single_ref('loadbalancer', obj)
self.update_single_ref('loadbalancer_pool', obj)
self.update_multiple_refs('instance_ip', obj)
self.update_multiple_refs('floating_ip', obj)
self.update_single_ref('virtual_network', obj)
self.update_single_ref('virtual_machine', obj)
self.update_single_ref('logical_interface', obj)
self.update_multiple_refs('interface_route_table', obj)
self.update_multiple_refs('service_health_check', obj)
self.update_single_ref('physical_interface',obj)
self.update_multiple_refs('security_group', obj)
self.update_single_ref('port_tuple', obj)
if self.virtual_machine:
vm = VirtualMachineSM.get(self.virtual_machine)
if vm:
self.service_instance = vm.service_instance
return obj
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_single_ref('virtual_ip', {})
obj.update_single_ref('loadbalancer', {})
obj.update_single_ref('loadbalancer_pool', {})
obj.update_multiple_refs('instance_ip', {})
obj.update_multiple_refs('floating_ip', {})
obj.update_single_ref('virtual_network', {})
obj.update_single_ref('virtual_machine', {})
obj.update_single_ref('logical_interface', {})
obj.update_multiple_refs('interface_route_table', {})
obj.update_multiple_refs('service_health_check', {})
obj.update_multiple_refs('security_group', {})
obj.update_single_ref('port_tuple', {})
obj.remove_from_parent()
del cls._dict[uuid]
# end delete
def evaluate(self):
vm = VirtualMachineSM.get(self.virtual_machine)
if vm:
self._manager.port_delete_or_si_link(vm, self)
self._manager.port_tuple_agent.update_port_tuple(self)
# end VirtualMachineInterfaceSM
class ServiceInstanceSM(DBBaseSM):
_dict = {}
obj_type = 'service_instance'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.service_template = None
self.loadbalancer = None
self.loadbalancer_pool = None
self.interface_route_tables = {}
self.service_health_checks = {}
self.instance_ips = set()
self.virtual_machines = set()
self.logical_router = None
self.params = None
self.bindings = None
self.kvps = None
self.state = 'init'
self.launch_count = 0
self.back_off = -1
self.image = None
self.flavor = None
self.max_instances = 0
self.availability_zone = None
self.ha_mode = None
self.vr_id = None
self.vn_changed = False
self.local_preference = [None, None]
self.vn_info = []
self.port_tuples = set()
obj_dict = self.update(obj_dict)
self.set_children('port_tuple', obj_dict)
self.add_to_parent(obj_dict)
if self.ha_mode == 'active-standby':
| |
def test_asinh_taggedData_rank0(self):
arg=Data(46.3645811357,self.functionspace)
arg.setTaggedValue(1,98.4380067047)
res=asinh(arg)
ref=Data(4.52979928711,self.functionspace)
ref.setTaggedValue(1,5.28259995573)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_asinh_taggedData_rank1(self):
arg=Data(numpy.array([-50.957589662824198, 43.941100766909756]),self.functionspace)
arg.setTaggedValue(1,numpy.array([-31.858501623280745, 39.107585495989866]))
res=asinh(arg)
ref=Data(numpy.array([-4.6242371551287169, 4.4761267522983275]),self.functionspace)
ref.setTaggedValue(1,numpy.array([-4.1546976770753421, 4.3596270535740214]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_asinh_taggedData_rank2(self):
arg=Data(numpy.array([[-20.169170810618326, -4.4530711308543118, -5.3065110218440452, -8.4088220772265316,
-56.444316808490115], [-33.229801569473778, -44.603828873814734, 39.260385275691903, -60.813530866399979, -67.011560484373405],
[63.34900773972393, 13.17996875841969, -84.621298599133738, -27.161422270695113, 78.248898320973581], [-98.098038498193404,
95.682616010306447, -58.113208847615525, -79.134026237356125, -29.391569621781727]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[17.836298714213882, -77.588227218715232, -61.26367562584587, 19.375142389965802,
89.334409995076243], [2.9065687663115227, 51.893692489828197, 11.895367000745495, -8.1024096735480953, 71.448735058484459],
[-50.921060735037948, 40.334991542461438, -11.902046289316189, 56.33007303532878, -27.166995246623955], [-82.821608578095123,
-91.599639663887103, 86.585921151704355, 48.186701674446084, -3.9531724905915979]]))
res=asinh(arg)
ref=Data(numpy.array([[-3.6979164153723203, -2.1991164930555258, -2.3708439269598305, -2.8259456306618453,
-4.7264802495600335], [-4.1968206188493351, -4.4910925210264905, 4.3635253359028381, -4.8010270839542413, -4.898067997438317],
[4.8418687142690242, 3.2732814095310392, -5.1313680826244967, -3.9952835475913395, 5.0530827588070446], [-5.2791405298139438,
5.2542111175109474, -4.7556141841594481, -5.0643300499069621, -4.0741443379636699]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[3.5751677238153401, -5.0446044112554631, -4.8084008830850919, 3.6578034038105751,
5.1855652488628774], [1.7884793917224828, 4.642437253245169, 3.1710583006908988, -2.7890952467104695, 4.9621763496592148],
[-4.6235201866096052, 4.3905201792510207, -3.17161767446549, 4.724454508591605, -3.9954885674968175], [-5.109872625312927,
-5.2106043127238761, 5.1543177540685887, 4.5683379150373344, -2.0832921903606496]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_asinh_taggedData_rank3(self):
arg=Data(numpy.array([[[-53.045516481019646, -34.703398617100873], [58.744179081028165, 45.73939652168292]],
[[88.640179862797964, -15.929739850377061], [20.336500323486419, -26.009231077461465]], [[67.483452353018436,
-83.415215077694313], [-43.73819066557256, 0.34649147770160482]], [[94.466567030758256, 57.78821000816265],
[-93.07193931131404, -65.081452487206121]], [[-54.611456218841695, 17.51214150630156], [5.6853926345566492,
38.237862836031212]], [[1.5782708895186488, -79.609362925181571], [47.883885039412519,
99.778654373519828]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[-81.045113203624751, 0.65914527304526871], [32.93544022845623, 58.747988317145939]],
[[-12.311575835767854, -70.143366604591705], [-1.6980341384122681, -27.212534038212041]], [[55.458512265543362,
-94.003044543229095], [-62.792580806533628, -27.521709794921676]], [[41.596851570120577, 44.487697223450283],
[2.7831853943152538, -67.591557346139922]], [[47.14957401263112, -88.752613111599388], [91.038711972236257,
18.784281872602193]], [[66.890360146771712, -3.1392983005148949], [-98.753784215323947, -58.363920786326858]]]))
res=asinh(arg)
ref=Data(numpy.array([[[-4.6643863622180026, -4.2401923259759915], [4.7664116866516526, 4.5162266515773712]],
[[5.17776424960676, -3.4623187168170242], [3.7061684380230009, -3.9519680523364986]], [[4.9050844901356578,
-5.1170138356918358], [-4.4714994864581099, 0.33990819970999336]], [[5.24142117780635, 4.7500068099460622],
[-5.2265487748261839, -4.868845799503112]], [[-4.6934746764153612, 3.5568558212420287], [2.4386934192694749,
4.3371443181086304]], [[1.2374103926768709, -5.0703183356413444], [4.5620352151292289,
5.2961265670636051]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[-5.0881911926886616, 0.61887608041426068], [4.1879268588038681, 4.7664765196432715]],
[[-3.2053324091778697, -4.9437392306615724], [-1.2998232670923699, -3.997162286806859]], [[4.7088636688405723,
-5.2365026413753837], [-4.8330475059782119, -4.008452214765553]], [[4.4213161140266797, 4.4884861572133286],
[1.7475584270504236, -4.9066849811247062]], [[4.5465845923807562, -5.1790317883473698], [5.2044621654621297,
3.6268753961564348]], [[4.8962579135667461, -1.8615996019862997], [-5.2858025387415069, -4.7599184690473226]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_asinh_taggedData_rank4(self):
arg=Data(numpy.array([[[[32.142378157394234, -7.8682084560468724, -32.972614582663724, 50.856847074540553],
[72.329877464044415, 6.0619145811457571, 71.261710531993657, 70.2065904388474], [61.147646057937493, -26.137436099401938,
48.323697144662191, 29.857105568663485]], [[81.14862167131389, -28.070075464932472, 54.484029947945999, 53.274297598689998],
[51.817829777738496, 55.524654561168546, 31.893469267783274, 98.108247444728335], [25.185957882420567, 56.589702849849886,
29.257428051768414, -49.316002216427599]]], [[[91.093502909783012, 30.593790782804035, -52.906781759597266,
37.807168034506248], [91.33559788100942, 46.479645801342286, 45.285940387630603, 17.009006113589351], [98.990499666054916,
20.732810397625983, -52.586859007443024, -97.39008994479434]], [[60.855541035297279, 43.563415593268758, -10.416755000859922,
19.761378421237396], [45.545393669751689, 34.038254695973365, 61.458790464133983, -93.805588539667809], [70.373745615324566,
-69.821983987919253, -17.526059272214738, 99.463265178516878]]], [[[42.375759778528959, -71.513498720101126,
43.403494376930126, 11.702516371887256], [-68.02507709473943, -82.804863052600837, 17.935644233624799, -1.5560052642727271],
[1.3086438337024902, 19.0159623777798, -43.415467037283427, -1.6840694232704436]], [[-76.523723879344232, 36.460220047753864,
74.414529475659975, -40.585507061813097], [61.18925351487826, 60.973990669294437, -56.486512227103702, -91.992194442103738],
[-50.821095523487195, -8.7683370172323407, 99.212906160042508, -49.787947715823513]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[5.2104342432591579, -30.037610349220742, 89.76520642245714, 84.346276912645891],
[-55.935949781763505, 3.6554505577462351, -69.732922792584205, -85.696618441581251], [34.087801962805088, -57.540358433913227,
-66.932756076465267, -61.739307728871459]], [[-38.745454217109639, 47.2458765604907, -48.024451295756876, 98.938828051951276],
[-18.113719915986181, 30.600562603733465, 62.13859672089356, 79.646004829972981], [62.93949402434626, 85.362178604516401,
-79.088554588305286, -30.549957095115914]]], [[[-21.024971919379709, -46.9921546943443, -77.839828653838069,
30.940535083915421], [70.790958255553022, -44.895476702573319, -36.648852352895432, 12.929335827946304], [-6.9478133681051872,
-62.232340492245108, -42.755472485742985, -56.420558326951522]], [[-32.044278205615356, 79.157959500980951,
-76.393704765628769, -52.443645665174962], [16.265823630600167, -55.034754577520985, -47.645861374723552, -89.04121484500331],
[94.688526939952055, -16.61551229436607, -99.980912127854069, -47.709640655711503]]], [[[2.1087843309750127,
-46.754097185308829, -43.01720776980433, 85.276796349298849], [-4.6606838290411474, -81.34895135365592, -85.417222857880887,
-96.332056825957508], [-79.83581002747087, 21.866282224322433, 68.064610754277766, -47.003477247839534]],
[[-62.743770898030562, 72.147582177197421, 69.296613493157508, 28.171166780459345], [75.529397553659948, -35.058371858520204,
-28.47809790522318, -75.017021702145499], [-37.177757115795629, 38.676084888663922, -63.72524923587919, 1.7494417076027844]]]]))
res=asinh(arg)
ref=Data(numpy.array([[[[4.1635644265624778, -2.7599915358235867, -4.1890544070007278, 4.6222585911216543],
[4.9744322502971672, 2.5019077838324715, 4.9595555706500258, 4.9446400848058811], [4.806505402870445, -3.9568815433193461,
4.5711762859685958, 4.0898503596713338]], [[5.0894674517671321, -4.0281683959008951, 4.6911390175212615, 4.6686892479006827],
[4.6409745686395487, 4.7100554093212761, 4.1557941279942145, 5.2792445878073861], [3.9198276751840293, 4.7290522782152147,
4.0695726016881411, -4.5914985748389192]]], [[[5.2050637903432921, 4.1142112460980922, -4.6617680124018888,
4.3258207506496307], [5.2077177590877843, 4.5322773741285998, 4.5062656785445956, 3.5277531738452392], [5.2881965746419173,
3.7254457616153438, -4.6557038316413522, -5.2718979969231512]], [[4.8017175551692963, 4.4674965942390115, -3.0388587872128419,
3.6775161997905594], [4.5119771682426055, 4.2208479181377072, 4.811580237341091, -5.234400023643297], [4.9470179186672389,
-4.9391473745425918, -3.5576489640531608, 5.2929608319555426]]], [[[4.4398628679964025, -4.96308228583203, 4.4638198136319165,
3.1547715938291749], [-4.9130776200696218, -5.1096704317305806, 3.5807134591217551, -1.225433176641918], [1.0837103112235609,
3.6391165749315824, -4.4640955479804072, -1.2927150793063693]], [[-5.0307906787819405, 4.2895569942507352, 5.0028435354728717,
-4.3967099542129784], [4.8071855230902134, 4.8036618081252804, -4.7272274114351864, -5.214880451554639], [-4.6215554958906369,
-2.8675302639627946, 5.2904406860829596, -4.6010209599440053]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[2.3528942953432184, -4.0958744228371096, 5.1903756488916493, 5.1281129886870076],
[-4.7174343552973603, 2.0075716195155695, -4.937871144778371, -5.1439945873654223], [4.22230186808053, -4.7457092677460411,
-4.8968914542943383, -4.8161335672436891]], [[-4.3503271096734606, 4.5486245418147186, -4.5649658426227173,
5.2876744794258697], [-3.5905779156353059, 4.1144324497476124, 4.8225792420650633, 5.0707784647637046], [4.8353841364967165,
5.1400846170845353, -5.0637553150269641, -4.112778220940899]]], [[[-3.7394231192664891, -4.5432410388930284,
-5.0478416752898454, 4.1254753641580617], [4.9529283477765169, -4.4976082378834796, -4.2947153739876791, 3.2541381655755188],
[-2.6367133653925436, -4.8240865342451933, -4.448781110277845, -4.726059308101533]], [[-4.1605091983900415, 5.0646324196519528,
-5.0290903098032347, -4.6529772433272774], [3.4831569454011433, -4.7011945976992884, -4.5570530599693662, -5.1822780622147535],
[5.2437679039808112, -3.5043882377854798, -5.2981514782154209, -4.5583904818282788]]], [[[1.4912530895747718,
-4.5381634201485017, -4.4548824695960123, 5.1390839500525125], [-2.2436246899234331, -5.0919328992003212, -5.1407291969411935,
-5.2609752679328521], [-5.0731585525994474, 3.7786154625242512, 4.9136585515533389, -4.5434819009655412]],
[[-4.8322699827017939, 4.9719089789160158, 4.9315952755712287, 4.0317610464510798], [5.0177129533122331, -4.2503649608354834,
-4.0425905960208626, -5.0109066456916977], [-4.3090386599181487, 4.3485357163877962, -4.8477895988422537,
1.3256207429919689]]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_acosh_taggedData_rank0(self):
arg=Data(49.9810509193,self.functionspace)
arg.setTaggedValue(1,71.3408711101)
res=acosh(arg)
ref=Data(4.60469104168,self.functionspace)
ref.setTaggedValue(1,4.96056744693)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_acosh_taggedData_rank1(self):
arg=Data(numpy.array([75.872128581964489, 31.270745005346555]),self.functionspace)
arg.setTaggedValue(1,numpy.array([91.194940269901991, 60.292904573535402]))
res=acosh(arg)
ref=Data(numpy.array([5.0221531537701187, 4.1355744181179075]),self.functionspace)
ref.setTaggedValue(1,numpy.array([5.2061165345882037, 4.7922928301529595]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_acosh_taggedData_rank2(self):
arg=Data(numpy.array([[13.716727126294922, 18.582048298979366, 7.5020529608606203, 37.240476559713919,
47.923636526032062], [23.137297999502238, 93.601586495900719, 44.214564115710346, 36.167402243946711, 46.702642863490553],
[23.270622841679405, 9.2774257115223389, 59.291871515770787, 33.506154158989204, 38.271499005024928], [46.757553911983621,
6.8257457794847447, 22.981256925823288, 86.170385026518829, 23.420848755718815]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[15.948822919888016, 2.6658485927005215, 60.224282793904251, 44.876404405068655,
34.120337847111642], [62.222746267715351, 21.245738679003445, 45.817023654907636, 40.859047475717304, 58.128988691848726],
[10.190092458920921, 48.417808389183413, 42.896938034834868, 70.93329041076818, 8.3231821063895897], [26.439411367064803,
15.072763430534389, 72.467415365655967, 32.34764058755561, 40.90238765596505]]))
res=acosh(arg)
ref=Data(numpy.array([[3.3104318336497132, 3.6146183386321131, 2.7038519866369914, 4.3103631168831464,
4.5626471481294111], [3.834125802828078, 5.2321659777026861, 4.4820735137429342, 4.2811142287291988, 4.5368332971619001],
[3.839876941498396, 2.9178139584744245, 4.7755482825351914, 4.2046535493993629, 4.3376819221817646], [4.5380086345560136,
2.6084392106579743, 3.8273524505590331, 5.1494400678077143, 3.8463177083491402]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[3.4615479170038235, 1.6364755314613311, 4.7911538849173159, 4.4969351619322433,
4.2228259954588951], [4.8238032312056154, 3.7487492070394848, 4.5176837838527817, 4.4031256752655068, 4.7557376697168952],
[3.0121467628386225, 4.5729082091092721, 4.4518117430828532, 4.9548373538850878, 2.8085633470967837], [3.9676451082613573,
3.4049343192673835, 4.9762365895508731, 4.1694492579956304, 4.4041861546123844]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_acosh_taggedData_rank3(self):
arg=Data(numpy.array([[[96.020179154808503, 91.79778167929949], [95.326949143229541, 45.421316747623791]],
[[30.65219771657458, 74.770295168847696], [77.989358990586055, 11.574100860239977]], [[92.626717442077236, 3.1700861207519435],
[81.107542243865836, 58.693576539606504]], [[19.827981381026582, 98.929766771654783], [93.210281085417222,
17.872534507474096]], [[15.212656462365901, 45.839114797078196], [67.995696601337741, 21.57180672061461]],
[[88.431893439575802, 86.459272754032739], [93.400261681763538, 3.5041690372595453]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[25.302663405742905, 90.965425641453351], [31.884491697764958, 35.880019812296034]],
[[87.641297339838275, 20.377144107642813], [80.276026842238238, 90.841319587541577]], [[14.097491687234964,
99.790641727293078], [14.181879052710332, 69.957347613100836]], [[81.947655870784715, 62.082411711815226], [8.6837333697858377,
15.244370873759896]], [[61.74742196011465, 29.437516030577598], [54.649929929545088, 40.35589353447758]], [[94.022187035702345,
83.335572962817793], [87.379860935581533, 36.951175898939482]]]))
res=acosh(arg)
ref=Data(numpy.array([[[5.2576784330296311, 5.212705644723707], [5.2504322211627388, 4.5090075081620471]],
[[4.1155853549672061, 5.0075231423119817], [5.0496784696098871, 3.1400456206904903]], [[5.2216956660972746,
1.8210457157023874], [5.0888851315296018, 4.7654048945882801]], [[3.6796048228214242, 5.2875318075733615], [5.2279764321968374,
3.5756287593263258]], [[3.4141929052963755, 4.5181659425846679], [4.9125375214849907, 3.7639967265035321]],
[[5.1753479006258116, 5.1527872028336335], [5.2300126684264212, 1.9260894616398991]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[3.9236661269309159, 5.2035964629627127], [4.1550209149744246, 4.2731335198543867]],
[[5.1663669487865587, 3.7069584430952047], [5.0785794154217756, 5.202231127665808]], [[3.3378837578237039, 5.2961964832588198],
[3.3438668793761139, 4.9409818305305739]], [[5.0991906511154443, 4.8215450339501285], [2.8512667866370323,
3.4162799784592983]], [[4.8161338295407301, 4.0751284773769649], [4.694011395692308, 4.3907310833319526]],
[[5.2366496860798568, 5.11598668493777], [5.1633792680707424, 4.3025615032480333]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_acosh_taggedData_rank4(self):
arg=Data(numpy.array([[[[46.496494307641363, 74.917094330056727, 39.893774938569727, 23.744943878587605],
[1.9737426076200388, 56.13429325455833, 31.478338229941585, 76.686853948479268], [43.543067819658987, 81.289887895435285,
32.113423511300105, 5.213549323262523]], [[26.577313488763004, 82.755886663842674, 6.4828955638004224, 81.780421145322038],
[84.79256558820957, 69.233222959376874, 73.836164807553629, 87.692408248293873], [37.136000517418708, 90.288377224446137,
62.614392713419683, 88.339987656018039]]], [[[61.202863958945962, 31.566286842895735, 7.1708278242804298, 98.950695215124099],
[87.222678883207024, 86.95839324301987, 17.656917302211554, 54.991339984079993], [92.159416624775972, 31.425747720223157,
47.207404840689208, 79.815101091507159]], [[13.75432234393317, 36.005105956151937, 80.930354510392675, 17.903169928485063],
[37.209969721856766, 68.392829385096988, 68.225744945843331, 25.131306602144075], [57.726340455843392, 45.183440336464102,
96.487976002311996, 74.482543907471182]]], [[[97.032639801911586, 59.169720141290711, 65.544382023430359, 27.350556781956005],
[85.48226011720655, 8.7268878117714603, 49.450968175354753, 75.078362059466997], [47.954002472987767, 16.036826907987312,
99.975563170888265, 78.829796914932373]], [[39.21420494818117, 42.262998162260104, 73.751675519611155, 51.828252577302301],
[60.148666432515796, 37.443825584849876, 97.665835616597235, 78.975812123743339], [6.9121385596705096, 34.189572613115473,
27.703168010672275, 50.045255814521546]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[76.45018571903698, 24.717910838106601, 78.98873320779046, 62.765160850734503],
[61.239518935391644, 70.008902728343131, 78.300714796360708, 34.932147232071522], [37.022585726034904, 58.180738950315579,
27.287564890515544, 54.517546763840656]], [[15.093623698826033, 30.443962452205266, 89.802715985190773, 77.992879086297037],
[37.816659621995385, 64.854538050781173, 81.326022233556586, 1.9993032471653205], [38.637332121131173, 32.158640232053635,
71.342867154253426, 5.2704550021018708]]], [[[60.437096462714948, 49.090407043277665, 78.184244621340568, 60.917754368590664],
[42.949146499752899, 31.471629405983144, 36.886647249542328, 40.010907031786985], [9.581053748614563, 32.843241151168968,
75.216103702188008, 68.09522545374368]], [[82.504776175599545, 57.609847555036787, 95.669336674553534, 78.017033779006482],
[40.298523228110923, 14.938993210257649, 31.561252137958434, 28.44443377692734], [24.326622031518038, 61.769365476509179,
50.466775790330708, 40.289781067050903]]], [[[13.88323115651615, 6.714972583508235, 97.860470433016005, 75.032728358835342],
[11.04088136407165, 77.052563320907453, 97.427382444573666, 33.006120873883368], [1.7554298156787875, 51.058303561715107,
29.46416973203182, 94.334872484467382]], [[3.5895347426782043, 40.561254020265949, 67.84874109154778, 93.690445556218563],
[25.256475539837954, 56.511124744109935, 3.5800990775641948, 63.00192152079822], [42.748122023741885, 80.763225726336117,
74.43049456512324, 31.553184442256732]]]]))
res=acosh(arg)
ref=Data(numpy.array([[[[4.5324084412759778, 5.0094847279060319, 4.3792103554802608, 3.8600731017873722],
[1.3016637290797095, 4.7208147464351278, 4.1421944195445226, 5.0328349651237305], [4.4667658109698571, 5.0911309744336259,
4.1621787939300265, 2.3350809370517589]], [[3.9728510376447228, 5.109005824071593, 2.5563122244559442, 5.0971476629476182],
[5.1333202761679582, 4.9305758676636238, 4.9949499704360436, 5.1669499996633848], [4.3075527147709547, 5.196125251025741,
4.8300785753238706, 5.1743080104110604]]], [[[4.8072744174301363, 4.1449858695233406, 2.658270592315148, 5.2877433445020525],
[5.1615786939465957, 5.1585438835194486, 3.5634719544667166, 4.7002402176468125], [5.2166376113963704, 4.1405214877183933,
4.5475857430671622, 5.0728206578448036]], [[3.3131761972681315, 4.2766150387385435, 5.0866979727721846, 3.5773440805534538],
[4.3095433003567933, 4.9183617154061148, 4.9159154531733078, 3.916865458190002], [4.7487857259228701, 4.50375535715325,
5.2625387259978202, 5.0036669013457349]]], [[[5.2681680417753087, 4.7734856928164495, 4.8758164840313665, 4.0015496982806926],
[5.1414218364571465, 2.8562570863627581, 4.5940265687942565, 5.01163522143636], [4.5632807172725283, 3.4670614367497063,
5.29804795523276, 5.0603980061109874]], [[4.3620236184007943, 4.4369191414800992, 4.9938049295476628, 4.6409895153027874],
[4.7898973432892191, 4.3158106467268684, 5.2746727815426553, 5.0622487258630153], [2.6211521330965311, 4.2248539437647477,
4.014368049013294, 4.6059750587364245]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[5.0297437651741488, 3.9002658632157274, 5.0624123337133149, 4.8324838701438377],
[4.8078732275406084, 4.9417185850198733, 5.0536631335958884, 4.2463497717069103], [4.3044928897720416, 4.756627671189591,
3.9992423655604172, 4.6915856649381364]], [[3.4063203831311251, 4.1087650332057839, 5.1907313988379462, 5.0497236078916279],
[4.325722057994339, 4.8652346239002213, 5.0915754213748823, 1.3165555330279051], [4.3471986353008933, 4.1635865170827602,
4.9605954282607705, 2.3461398613499078]]], [[[4.7946818261908994, 4.5867070641055383, 5.0521744319245858, 4.8026044713453375],
[4.4530284006885319, 4.1419811640723054, 4.3008130131764508, 4.3821430718817362], [2.9502001551652342, 4.184661309810318,
5.0134683412122536, 4.9140003614485606]], [[5.1059666365457144, 4.7467653633473841, 5.2540177010556652, 5.0500332892131663],
[4.3893080251791545, 3.3959998775424971, 4.1448262804214515, 4.0407905386517378], [3.8842957690757829, 4.8164891884239784,
4.6143642220081729, 4.3890909997782792]]], [[[3.3225293134567675, 2.5918958799586354, 5.276663767488472, 5.0110271683900773],
[3.0926948705710546, 5.0375928992743964, 5.2722281468793524, 4.1896106435456426], [1.1625825203718756, 4.6260194561242551,
4.0760340229896617, 5.2399700114788725]], [[1.9511766454658923, 4.3958084665836674, 4.910373700880009, 5.2331149143207876],
[3.9218376124513488, 4.7275064044247292, 1.9484357218467823, 4.8362494165490313], [4.4483356113795223, 5.0846305854377469,
5.0029677813756557, 4.1445704990230352]]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_atanh_taggedData_rank0(self):
arg=Data(-0.320619038958,self.functionspace)
arg.setTaggedValue(1,0.869122682798)
res=atanh(arg)
ref=Data(-0.332336921208,self.functionspace)
ref.setTaggedValue(1,1.32948203584)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_atanh_taggedData_rank1(self):
arg=Data(numpy.array([-0.49724785679895611, 0.76485832136382981]),self.functionspace)
arg.setTaggedValue(1,numpy.array([-0.71695223330373481, 0.98907589120670503]))
res=atanh(arg)
ref=Data(numpy.array([-0.5456433240595332, 1.0078187373348622]),self.functionspace)
ref.setTaggedValue(1,numpy.array([-0.90134518516976136, 2.6022266354573262]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_atanh_taggedData_rank2(self):
arg=Data(numpy.array([[-0.41875373709407426, -0.031282543674564844, -0.37120980277072957, -0.33787277754808165,
0.9056835178923357], [0.10920509246927712, -0.9517935928864919, -0.38928920166887748, 0.51987390317679982,
-0.38673372014824514], [0.84666821394639546, 0.70139465198953088, 0.65524269199234908, -0.76892126906681368,
0.53641715611532659], [0.8319590120911895, 0.54197223487670665, 0.96505599773867456, 0.18013767879594189,
-0.23629819004673036]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[0.36214053363992749, -0.664498666560283, -0.18821662111337301, -0.16146935463873657,
0.6826053287306455], [0.94446504653387175, -0.33438894541106456, -0.024588916748005452, -0.85997299749442313,
0.7590303783132617], [-0.17183976558739666, -0.58358085652249014, 0.31083502908173499, 0.85373153758284226,
-0.75382778617691071], [0.02157269345526025, -0.2087677756939843, -0.3645241397483423, 0.076955395055613884,
0.49258045667332828]]))
res=atanh(arg)
ref=Data(numpy.array([[-0.44617979391481238, -0.031292754010403323, -0.38982552275887766, -0.35168921785961199,
1.5029700335665168], [0.10964234311011919, -1.8505060400721478, -0.41096200383131098, 0.57616694042294059,
-0.40795359483226379], [1.2442671095703073, 0.8700403910046729, 0.78443110215353462, -1.017683354133686, 0.59911167917750008],
[1.1944666231886989, 0.60694387161398944, 2.0147645883194851, 0.18212498120425324, -0.24084972556636608]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[0.37934733814448002, -0.80082666251032752, -0.19048764725210868, -0.1628950195664004,
0.8339763214069672], [1.7778647815902611, -0.34776162471495142, -0.024593874154403211, -1.2932409850373054,
0.99392357017656985], [-0.17356179326165472, -0.66787580856210826, 0.32146948112818524, 1.2697561085057214,
-0.98176231871677033], [0.021576040897969627, -0.21188262231678223, -0.38209346340171296, 0.077107850497316832,
0.53946179405081751]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_atanh_taggedData_rank3(self):
arg=Data(numpy.array([[[0.10445331614917319, 0.2730814888903883], [-0.60232593544217883, 0.96715501656915182]],
[[-0.17016809723013615, -0.089807528529218916], [0.23654377024927897, 0.83272135685004955]], [[0.016551420278897,
-0.38236850351537788], [-2.0657074242591555e-05, -0.40819212706994223]], [[-0.3729914622085253, 0.62722527860088206],
[0.82747007179222232, 0.25145176276119274]], [[-0.73980019966402311, 0.96693217416513644], [0.90586640577652378,
0.21899534641151908]], [[0.19566248084568705, 0.47149584732702499], [-0.48621869468657664,
-0.79464808240093432]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[-0.12685419941171083, 0.86382397828941637], [0.55687203880422764,
-0.43398285832464556]], [[-0.063925399703885222, 0.92085617372342865], [0.55098040379713153, 0.39890555903637726]],
[[0.58521949389478389, -0.47732531221219043], [-0.52649872740584502, -0.05400171295475209]], [[-0.20264962628344207,
0.89825210951105694], [0.42220448045958414, | |
fh_out.write('\t'.join(fields)+'\n')
else: # chrom is not on the list
fh_out.write(line+'\n')
else: # chrom is not on the list
fh_out.write(line+'\n')
linenum = linenum +1
fh_log.write("In "+ str(table) + ": " +str(var_count) +' in ' + str(line_count) + ' variants\n')
fh_log.close()
conn.close()
fh.close()
fh_out.close()
""" Overlap with GadAll table """
def addOverlapWithGadAll(vcf, format='vcf', table='gadAll', tmpextin='', tmpextout='.1', sep='\t'):
basefile=vcf
vcf=basefile+tmpextin
outfile=basefile+tmpextout
fh_out = open(outfile, "w")
fh = open(vcf)
logcountfile=basefile+'.count.log'
fh_log = open(logcountfile, 'a')
var_count=0
line_count=0
inds=getFormatSpecificIndices(format=format)
conn = sql_config.conn2annotator()
cursor = conn.cursor ()
linenum = 1
for line in fh:
#print ('Line ' + str(linenum))
line = line.strip()
## not comments
if line.startswith("##")==False:
#header line
if line.startswith('CHROM') or line.startswith('#CHROM') :
fh_out.write(line+'\n')
else:
fields=line.split(sep)
chr=fields[inds[0]].strip()
# That is a special case - for some reason this table has no "chr" preceeding number
if(chr.startswith("chr")==True):
chr = str(chr).replace("chr", "")
pos=fields[inds[1]].strip()
isOverlap = False
sql='select * from ' + table + ' where chromosome="'+ str(chr) + '" AND (chromStart <= ' + str(pos) + ' AND ' + str(pos) + ' <= chromEnd);'
#print(sql)
cursor.execute (sql)
rows = cursor.fetchall ()
records=[]
if len(rows) > 0:
records_count=1
line_count=line_count+1
r_tmp=[]
for row in rows:
var_count=var_count+1
if fu.isOnTheList(r_tmp, str(row[3]))==False:
r_tmp.append(str(row[3]) )
#records.append(str(table)+str(records_count)+':'+str(row[3]))
records.append(str(table)+'='+str(row[3]))
records_count=records_count+1
if str(fields[7]).endswith(';')==True:
fields[7]=fields[7]+';'.join(records)
else:
fields[7]=fields[7]+';'+';'.join(records)
fh_out.write('\t'.join(fields)+'\n')
else:
fh_out.write(line+'\n')
linenum = linenum +1
else:
fh_out.write(line+'\n')
fh_log.write("In "+ str(table) + ": " +str(var_count) +' in ' + str(line_count) + ' variants\n')
fh_log.close()
conn.close()
fh.close()
fh_out.close()
""" Overlap with gwasCatalog table """
def addOverlapWithGwasCatalog(vcf, format='vcf', table='gwasCatalog', tmpextin='', tmpextout='.1', sep='\t'):
basefile=vcf
vcf=basefile+tmpextin
outfile=basefile+tmpextout
fh_out = open(outfile, "w")
fh = open(vcf)
logcountfile=basefile+'.count.log'
fh_log = open(logcountfile, 'a')
var_count=0
line_count=0
inds=getFormatSpecificIndices(format=format)
conn = sql_config.conn2annotator()
cursor = conn.cursor ()
linenum = 1
for line in fh:
#print ('Line ' + str(linenum))
line = line.strip()
## not comments
if line.startswith("##")==False:
#header line
if line.startswith('CHROM') or line.startswith('#CHROM') :
fh_out.write(line+'\n')
else:
fields=line.split(sep)
chr=fields[inds[0]].strip()
if(chr.startswith("chr")==False):
chr = "chr" + chr
pos=fields[inds[1]].strip()
isOverlap = False
sql='select * from ' + table + ' where chrom="'+ str(chr) + '" AND chromEnd = ' + str(pos) + ';'
#print(sql)
cursor.execute (sql)
rows = cursor.fetchall ()
records=[]
if len(rows) > 0:
line_count=line_count+1
records_count=1
for row in rows:
var_count=var_count+1
records.append(str(table)+'='+str('pubMedID')+'='+str(row[5]) + ',trait='+str(row[10]))
records_count=records_count+1
if str(fields[7]).endswith(';')==True:
fields[7]=fields[7]+';'.join(records)
else:
fields[7]=fields[7]+';'+';'.join(records)
fh_out.write('\t'.join(fields)+'\n')
else:
fh_out.write(line+'\n')
linenum = linenum +1
else:
fh_out.write(line+'\n')
fh_log.write("In "+ str(table) + ": " +str(var_count) +' in ' + str(line_count) + ' variants\n')
fh_log.close()
conn.close()
fh.close()
fh_out.close()
""" Overlap with HUGO Gene Nomenclature Committee (HGNC) table """
def addOverlapWitHUGOGeneNomenclature(vcf, format='vcf', table='hugo', tmpextin='', tmpextout='.1', sep='\t'):
basefile=vcf
vcf=basefile+tmpextin
outfile=basefile+tmpextout
fh_out = open(outfile, "w")
fh = open(vcf)
logcountfile=basefile+'.count.log'
fh_log = open(logcountfile, 'a')
var_count=0
line_count=0
inds=getFormatSpecificIndices(format=format)
conn = sql_config.conn2annotator()
cursor = conn.cursor ()
linenum = 1
for line in fh:
#print ('Line ' + str(linenum))
line = line.strip()
## not comments
if line.startswith("##")==False:
#header line
if line.startswith('CHROM') or line.startswith('#CHROM') :
fh_out.write(line+'\n')
else:
fields=line.split(sep)
chr=fields[inds[0]].strip()
if(chr.startswith("chr")==False):
chr = "chr" + chr
pos=fields[inds[1]].strip()
isOverlap = False
sql='select * from ' + table + ' where chrom="'+ str(chr) + '" AND (chromStart <= ' + str(pos) + ' AND ' + str(pos) + ' <= chromEnd);'
#print(sql)
cursor.execute (sql)
rows = cursor.fetchall ()
records=[]
if len(rows) > 0:
line_count=line_count+1
records_count=1
r_tmp=[]
for row in rows:
var_count=var_count+1
t=str(str(row[5]) +','+ str(row[6])).strip()
#print t
if fu.isOnTheList(r_tmp, t)==False:
r_tmp.append( t )
records.append('HGNC_GeneAnnotation'+'='+t)
records_count=records_count+1
records_str=','.join(records).replace(';',',')
if str(fields[7]).endswith(';')==True:
fields[7]=fields[7]+records_str
else:
fields[7]=fields[7]+';'+records_str
fh_out.write('\t'.join(fields)+'\n')
else:
fh_out.write(line+'\n')
linenum = linenum +1
else:
fh_out.write(line+'\n')
fh_log.write("In "+ str(table) + ": " +str(var_count) +' in ' + str(line_count) + ' variants\n')
fh_log.close()
conn.close()
fh.close()
fh_out.close()
""" Overlap with segdup regions genomicSuperDups"""
def addOverlapWithGenomicSuperDups(vcf, format='vcf', table='genomicSuperDups', tmpextin='', tmpextout='.1', sep='\t'):
basefile=vcf
vcf=basefile+tmpextin
outfile=basefile+tmpextout
fh_out = open(outfile, "w")
fh = open(vcf)
logcountfile=basefile+'.count.log'
fh_log = open(logcountfile, 'a')
var_count=0
line_count=0
inds=getFormatSpecificIndices(format=format)
conn = sql_config.conn2annotator()
cursor = conn.cursor ()
linenum = 1
for line in fh:
line = line.strip()
## not comments
if line.startswith("##")==False:
#header line
if line.startswith('CHROM') or line.startswith('#CHROM') :
fh_out.write(line+'\n')
else:
fields=line.split(sep)
chr=fields[inds[0]].strip()
if(chr.startswith("chr")==False):
chr = "chr" + chr
pos=fields[inds[1]].strip()
isOverlap = False
otherChrom=''
otherStart=''
otherEnd=''
l=str(isOverlap)
sql='select * from ' + table + ' where chrom="'+ str(chr) + '" AND (chromStart <= ' + str(pos) + ' AND ' + str(pos) + ' <= chromEnd);'
#print(sql)
cursor.execute (sql)
rows = cursor.fetchone ()
if rows is not None:
line_count=line_count+1
var_count=var_count+1
isOverlap=True
#row=rows[0]
otherChrom=rows[7]
otherStart=rows[8]
otherEnd=rows[9]
fields[7]=fields[7]+';'+str(table)+'='+str(isOverlap)+';'+'otherChrom='+str(otherChrom)+';otherStart='+str(otherStart)+';otherEnd='+str(otherEnd)
fh_out.write('\t'.join(fields)+'\n')
#print(line+sep+str(isOverlap)+'\n')
linenum = linenum +1
else:
fh_out.write(line+'\n')
fh_log.write("In "+ str(table) + ": " +str(var_count) +' in ' + str(line_count) + ' variants\n')
fh_log.close()
conn.close()
fh.close()
fh_out.close()
""" Searches Genes Databases and returns Genes/Cytobands with which SNP or INDEL overlaps"""
def addOverlapWithRefGene(vcf, format='vcf', table='refGene', tmpextin='', tmpextout='.1', sep='\t'):
basefile=vcf
vcf=basefile+tmpextin
outfile=basefile+tmpextout
fh_out = open(outfile, "w")
fh = open(vcf)
logcountfile=basefile+'.count.log'
fh_log = open(logcountfile, 'a')
var_count=0
line_count=0
#refGene
#refGene
colindex=1
colindex2=12
name='name'
name2='name2'
startName = 'txStart'
endName = 'txEnd'
inds=getFormatSpecificIndices(format=format)
conn = sql_config.conn2annotator()
cursor = conn.cursor ()
linenum = 1
for line in fh:
#print ('Line ' + str(linenum))
line = line.strip()
## not comments
if line.startswith("##")==False:
#header line
if line.startswith('CHROM') or line.startswith('#CHROM') :
fh_out.write(line+'\n')
else:
fields=line.split(sep)
chr=fields[inds[0]].strip()
if(chr.startswith("chr")==False):
chr = "chr" + chr
pos=fields[inds[1]].strip()
isOverlap = False
sql='select * from ' + table + ' where chrom="'+ str(chr) + '" AND (' + startName + ' <= ' + str(pos) + ' AND ' + str(pos) + ' <= ' + endName +');'
overlapsWith=[]
cursor.execute (sql)
rows = cursor.fetchall ()
if len(rows) > 0:
line_count=line_count+1
for row in rows:
var_count=var_count+1
overlapsWith.append(name2+'='+str(row[colindex2])+';'+name+'='+str(row[colindex]))
genes=';'.join([str(x) for x in overlapsWith])
if str(fields[7]).endswith(";"):
fields[7]=fields[7]+str(genes)
else:
fields[7]=fields[7]+';'+str(genes)
fh_out.write('\t'.join(fields)+'\n')
linenum = linenum +1
else:
fh_out.write(line+'\n')
fh_log.write("In "+ str(table) + ": " +str(var_count) +' in ' + str(line_count) + ' variants\n')
fh_log.close()
conn.close()
fh.close()
fh_out.close()
""" Method to find overlap with Cytoband table"""
def addOverlapWithCytoband(vcf, format='vcf', table='cytoBand', tmpextin='', tmpextout='.1', sep='\t'):
basefile=vcf
vcf=basefile+tmpextin
outfile=basefile+tmpextout
fh_out = open(outfile, "w")
fh = open(vcf)
logcountfile=basefile+'.count.log'
fh_log = open(logcountfile, 'a')
var_count=0
line_count=0
#refGene
colindex=12
startName = 'txStart'
endName = 'txEnd'
if table == 'cytoBand':
colindex=3
startName = 'chromStart'
endName = 'chromEnd'
inds=getFormatSpecificIndices(format=format)
conn = sql_config.conn2annotator()
cursor = conn.cursor ()
linenum = 1
for line in fh:
#print ('Line ' + str(linenum))
line = line.strip()
## not comments
if line.startswith("##")==False:
#header line
if line.startswith('CHROM') or line.startswith('#CHROM') :
fh_out.write(line+'\n')
else:
fields=line.split(sep)
chr=fields[inds[0]].strip()
if(chr.startswith("chr")==False):
chr = "chr" + chr
pos=fields[inds[1]].strip()
isOverlap = False
sql='select * from ' + table + ' where chrom="'+ str(chr) + '" AND (' + startName + ' <= ' + str(pos) + ' AND ' + str(pos) + ' <= ' + endName +');'
overlapsWith=[]
cursor.execute (sql)
rows = cursor.fetchall ()
if len(rows) > 0:
line_count=line_count+1
for row in rows:
var_count=var_count+1
overlapsWith.append(str(row[colindex]))
overlapsWith=u.dedup(overlapsWith)
cytoband=';'.join([str(x) for x in overlapsWith])
if str(fields[7]).endswith(";"):
fields[7]=fields[7]+str(table)+'='+str(cytoband)
else:
fields[7]=fields[7]+';'+str(table)+'='+str(cytoband)
fh_out.write('\t'.join(fields)+'\n')
linenum = linenum +1
else:
fh_out.write(line+'\n')
fh_log.write("In "+ str(table) + ": " +str(var_count) +' in ' + str(line_count) + ' variants\n')
fh_log.close()
conn.close()
fh.close()
fh_out.close()
""" Method to find overlap with CNV tables"""
def addOverlapWithCnvDatabase(vcf, format='vcf', table='dgv_Cnv', tmpextin='', tmpextout='.1', sep='\t'):
basefile=vcf
vcf=basefile+tmpextin
outfile=basefile+tmpextout
fh_out = open(outfile, "w")
fh = open(vcf)
logcountfile=basefile+'.count.log'
fh_log = open(logcountfile, 'a')
var_count=0
line_count=0
inds=getFormatSpecificIndices(format=format)
conn = sql_config.conn2annotator()
cursor = conn.cursor ()
linenum = 1
for line in fh:
#print ('Line ' + str(linenum))
line = line.strip()
## not comments
if line.startswith("##")==False:
#header line
if line.startswith('CHROM') or line.startswith('#CHROM') :
fh_out.write(line+'\n')
else:
fields=line.split(sep)
chr=fields[inds[0]].strip()
if(chr.startswith("chr")==False):
chr = "chr" + chr
pos=fields[inds[1]].strip()
isOverlap = False
sql='select * from ' + table + ' where chrom="'+ str(chr) + '" AND (chromStart <= ' + str(pos) + ' AND ' + str(pos) + ' <= chromEnd);'
#print(sql)
cursor.execute (sql)
rows = cursor.fetchone ()
#correct: 460 - uncomment, 461 comment
if rows is not None:
line_count=line_count+1
var_count=var_count+1
isOverlap=True
if str(fields[7]).endswith(";"):
fields[7]=fields[7]+str(table)+'='+str(isOverlap)
else:
fields[7]=fields[7]+';'+str(table)+'='+str(isOverlap)
fh_out.write('\t'.join(fields)+'\n')
linenum = linenum +1
else:
fh_out.write(line+'\n')
fh_log.write("In "+ str(table) + ": " +str(var_count) +' in ' + str(line_count) + ' variants\n')
fh_log.close()
conn.close()
fh.close()
fh_out.close()
################
################
""" Method to find overlap with targetScanS tables"""
def addOverlapWithMiRNA(vcf, format='vcf', table='targetScanS', tmpextin='', tmpextout='.1', sep='\t'):
basefile=vcf
vcf=basefile+tmpextin
outfile=basefile+tmpextout
fh_out = open(outfile, "w")
fh = open(vcf)
logcountfile=basefile+'.count.log'
fh_log = open(logcountfile, 'a')
var_count=0
line_count=0
inds=getFormatSpecificIndices(format=format)
conn = sql_config.conn2annotator()
cursor = conn.cursor | |
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_lease_id = None
if lease_access_conditions is not None:
_lease_id = lease_access_conditions.lease_id
request = build_delete_request(
url=self._config.url,
version=self._config.version,
timeout=timeout,
lease_id=_lease_id,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
if cls:
return cls(pipeline_response, None, response_headers)
delete.metadata = {'url': "{url}/{shareName}/{directory}/{fileName}"} # type: ignore
@distributed_trace
def set_http_headers( # pylint: disable=inconsistent-return-statements
self,
timeout=None, # type: Optional[int]
file_content_length=None, # type: Optional[int]
file_permission="inherit", # type: Optional[str]
file_permission_key=None, # type: Optional[str]
file_attributes="none", # type: str
file_creation_time="now", # type: str
file_last_write_time="now", # type: str
file_http_headers=None, # type: Optional["_models.FileHTTPHeaders"]
lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"]
**kwargs # type: Any
):
# type: (...) -> None
"""Sets HTTP headers on the file.
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
Timeouts for File Service Operations.</a>`. Default value is None.
:type timeout: int
:param file_content_length: Resizes a file to the specified size. If the specified byte value
is less than the current size of the file, then all ranges above the specified byte value are
cleared. Default value is None.
:type file_content_length: long
:param file_permission: If specified the permission (security descriptor) shall be set for the
directory/file. This header can be used if Permission size is <= 8KB, else
x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as
input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or
x-ms-file-permission-key should be specified. Default value is "inherit".
:type file_permission: str
:param file_permission_key: Key of the permission to be set for the directory/file. Note: Only
one of the x-ms-file-permission or x-ms-file-permission-key should be specified. Default value
is None.
:type file_permission_key: str
:param file_attributes: If specified, the provided file attributes shall be set. Default value:
‘Archive’ for file and ‘Directory’ for directory. ‘None’ can also be specified as default.
Default value is "none".
:type file_attributes: str
:param file_creation_time: Creation time for the file/directory. Default value: Now. Default
value is "now".
:type file_creation_time: str
:param file_last_write_time: Last write time for the file/directory. Default value: Now.
Default value is "now".
:type file_last_write_time: str
:param file_http_headers: Parameter group. Default value is None.
:type file_http_headers: ~azure.storage.fileshare.models.FileHTTPHeaders
:param lease_access_conditions: Parameter group. Default value is None.
:type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions
:keyword comp: comp. Default value is "properties". Note that overriding this default value may
result in unsupported behavior.
:paramtype comp: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
comp = kwargs.pop('comp', "properties") # type: str
_file_content_type = None
_file_content_encoding = None
_file_content_language = None
_file_cache_control = None
_file_content_md5 = None
_file_content_disposition = None
_lease_id = None
if file_http_headers is not None:
_file_content_type = file_http_headers.file_content_type
_file_content_encoding = file_http_headers.file_content_encoding
_file_content_language = file_http_headers.file_content_language
_file_cache_control = file_http_headers.file_cache_control
_file_content_md5 = file_http_headers.file_content_md5
_file_content_disposition = file_http_headers.file_content_disposition
if lease_access_conditions is not None:
_lease_id = lease_access_conditions.lease_id
request = build_set_http_headers_request(
url=self._config.url,
comp=comp,
version=self._config.version,
timeout=timeout,
file_content_length=file_content_length,
file_content_type=_file_content_type,
file_content_encoding=_file_content_encoding,
file_content_language=_file_content_language,
file_cache_control=_file_cache_control,
file_content_md5=_file_content_md5,
file_content_disposition=_file_content_disposition,
file_permission=file_permission,
file_permission_key=file_permission_key,
file_attributes=file_attributes,
file_creation_time=file_creation_time,
file_last_write_time=file_last_write_time,
lease_id=_lease_id,
template_url=self.set_http_headers.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted'))
response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key'))
response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes'))
response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time'))
response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time'))
response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time'))
response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id'))
response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id'))
if cls:
return cls(pipeline_response, None, response_headers)
set_http_headers.metadata = {'url': "{url}/{shareName}/{directory}/{fileName}"} # type: ignore
@distributed_trace
def set_metadata( # pylint: disable=inconsistent-return-statements
self,
timeout=None, # type: Optional[int]
metadata=None, # type: Optional[Dict[str, str]]
lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"]
**kwargs # type: Any
):
# type: (...) -> None
"""Updates user-defined metadata for the specified file.
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
Timeouts for File Service Operations.</a>`. Default value is None.
:type timeout: int
:param metadata: A name-value pair to associate with a file storage object. Default value is
None.
:type metadata: dict[str, str]
:param lease_access_conditions: Parameter group. Default value is None.
:type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions
:keyword comp: comp. Default value is "metadata". Note that overriding this default value may
result in unsupported behavior.
:paramtype comp: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
comp = kwargs.pop('comp', "metadata") # type: str
_lease_id = None
if lease_access_conditions is not None:
_lease_id = lease_access_conditions.lease_id
request = build_set_metadata_request(
url=self._config.url,
comp=comp,
version=self._config.version,
timeout=timeout,
metadata=metadata,
lease_id=_lease_id,
template_url=self.set_metadata.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted'))
if cls:
return cls(pipeline_response, None, response_headers)
set_metadata.metadata = {'url': "{url}/{shareName}/{directory}/{fileName}"} # type: ignore
@distributed_trace
def acquire_lease( # pylint: disable=inconsistent-return-statements
self,
timeout=None, # type: Optional[int]
duration=None, # type: Optional[int]
proposed_lease_id=None, # type: Optional[str]
request_id_parameter=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""[Update] The Lease File operation establishes and manages a lock on a file for write and delete
operations.
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
Timeouts for File Service Operations.</a>`. Default value is None.
:type timeout: int
:param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a
lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease
duration cannot be changed using renew or change. Default value is None.
:type duration: int
:param proposed_lease_id: Proposed lease ID, in a GUID string format. The File service returns
400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid
Constructor (String) for a list of valid GUID string formats. Default value is None.
:type proposed_lease_id: str
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled. Default
value is None.
:type request_id_parameter: str
:keyword comp: comp. Default value is "lease". Note that overriding this default value may
result in unsupported behavior.
:paramtype comp: str
:keyword action: Describes what lease action to take. Default value is "acquire". Note that
overriding this default value may result in unsupported behavior.
:paramtype action: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
comp = kwargs.pop('comp', "lease") # type: str
action = kwargs.pop('action', "acquire") # type: str
request = build_acquire_lease_request(
url=self._config.url,
comp=comp,
action=action,
version=self._config.version,
timeout=timeout,
duration=duration,
proposed_lease_id=proposed_lease_id,
request_id_parameter=request_id_parameter,
template_url=self.acquire_lease.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id'))
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
if cls:
return cls(pipeline_response, None, response_headers)
acquire_lease.metadata = {'url': "{url}/{shareName}/{directory}/{fileName}"} # type: ignore
@distributed_trace
def release_lease( # pylint: disable=inconsistent-return-statements
self,
lease_id, # type: str
timeout=None, # type: Optional[int]
| |
<filename>bathy_smoother/bathy_smoother/LP_bathy_tools.py
import numpy as np
from bathy_smoother import bathy_smoothing
# This code is adapted from the matlab code
# "LP Bathymetry" by <NAME>
# http://drobilica.irb.hr/~mathieu/Bathymetry/index.html
# For a description of the method, see
# <NAME>, <NAME>, <NAME>, A new approach to
# bathymetry smoothing in sigma-coordinate ocean models, Ocean
# Modelling 29 (2009) 128--136.
def GetIJS_rx0(MSK, DEP, r):
eta_rho, xi_rho = DEP.shape
print('eta_rho = ', eta_rho, ' xi_rho = ', xi_rho)
nbVert = 0
ListCoord = np.zeros((eta_rho, xi_rho))
for iEta in range(eta_rho):
for iXi in range(xi_rho):
if (MSK[iEta,iXi] == 1):
nbVert = nbVert + 1
ListCoord[iEta,iXi] = nbVert
TotalNbVert = nbVert
print('ListCoord built')
print('Computing inequalities for r = ', r)
TotalNbConstant = 0
TotalNbEntry = 0
for iEta in range(eta_rho-1):
for iXi in range(xi_rho):
if (MSK[iEta,iXi] == 1 and MSK[iEta+1,iXi] == 1):
TotalNbConstant = TotalNbConstant + 2
TotalNbEntry = TotalNbEntry + 4
for iEta in range(eta_rho):
for iXi in range(xi_rho-1):
if (MSK[iEta,iXi] == 1 and MSK[iEta,iXi+1] == 1):
TotalNbConstant = TotalNbConstant + 2
TotalNbEntry = TotalNbEntry + 4
TotalNbConstant = TotalNbConstant + 2 * TotalNbVert
TotalNbEntry = TotalNbEntry + 4 * TotalNbVert
Constant = np.zeros((TotalNbConstant,1))
iList = np.zeros((TotalNbEntry,1))
jList=np.zeros((TotalNbEntry,1))
sList=np.zeros((TotalNbEntry,1))
nbConst=0;
nbEntry=0;
for iEta in range(eta_rho-1):
for iXi in range(xi_rho):
if (MSK[iEta,iXi] == 1 and MSK[iEta+1,iXi] == 1):
idx1 = ListCoord[iEta,iXi]
idx2 = ListCoord[iEta+1,iXi]
CST = (1+r) * DEP[iEta+1,iXi] + (-1+r) * DEP[iEta,iXi]
Constant[nbConst,0] = CST
nbConst = nbConst + 1
iList[nbEntry,0] = nbConst
jList[nbEntry,0] = idx2
sList[nbEntry,0] = -1-r
nbEntry = nbEntry + 1
iList[nbEntry,0] = nbConst
jList[nbEntry,0] = idx1
sList[nbEntry,0] = 1-r
nbEntry = nbEntry + 1
CST = (1+r) * DEP[iEta,iXi] + (-1+r) * DEP[iEta+1,iXi]
Constant[nbConst,0] = CST
nbConst = nbConst + 1
iList[nbEntry,0] = nbConst
jList[nbEntry,0] = idx1
sList[nbEntry,0] = -r-1
nbEntry = nbEntry + 1
iList[nbEntry,0] = nbConst
jList[nbEntry,0] = idx2
sList[nbEntry,0] = 1-r
nbEntry = nbEntry + 1
print('Inequalities for dh(iEta,iXi) and dh(iEta+1,iXi)')
for iEta in range(eta_rho):
for iXi in range(xi_rho-1):
if (MSK[iEta,iXi] == 1 and MSK[iEta, iXi+1] == 1):
idx1 = ListCoord[iEta,iXi]
idx2 = ListCoord[iEta,iXi+1]
CST = (1+r) * DEP[iEta,iXi+1] + (r-1) * DEP[iEta,iXi]
Constant[nbConst,0] = CST
nbConst = nbConst + 1
iList[nbEntry,0] = nbConst
jList[nbEntry,0] = idx2
sList[nbEntry,0] = -r-1
nbEntry = nbEntry + 1
iList[nbEntry,0] = nbConst
jList[nbEntry,0] = idx1
sList[nbEntry,0] = 1-r
nbEntry = nbEntry + 1
CST = (1+r) * DEP[iEta,iXi] + (r-1) * DEP[iEta,iXi+1]
Constant[nbConst,0] = CST
nbConst = nbConst + 1
iList[nbEntry,0] = nbConst
jList[nbEntry,0] = idx1
sList[nbEntry,0] = -r-1
nbEntry = nbEntry + 1
iList[nbEntry,0] = nbConst
jList[nbEntry,0] = idx2
sList[nbEntry,0] = 1-r
nbEntry = nbEntry + 1
print('Inequalities for dh(iEta,iXi) and dh(iEta,iXi+1)')
for iEta in range(eta_rho):
for iXi in range(xi_rho):
if (MSK[iEta,iXi] == 1):
idx = ListCoord[iEta,iXi]
Constant[nbConst,0] = 0
nbConst = nbConst + 1
iList[nbEntry,0] = nbConst
jList[nbEntry,0] = TotalNbVert + idx
sList[nbEntry,0] = -1
nbEntry = nbEntry + 1
iList[nbEntry,0] = nbConst
jList[nbEntry,0] = idx
sList[nbEntry,0] = 1
nbEntry = nbEntry + 1
Constant[nbConst,0] = 0
nbConst = nbConst + 1
iList[nbEntry,0] = nbConst
jList[nbEntry,0] = TotalNbVert+idx
sList[nbEntry,0] = -1
nbEntry = nbEntry + 1
iList[nbEntry,0] = nbConst
jList[nbEntry,0] = idx
sList[nbEntry,0] = -1
nbEntry = nbEntry + 1
print('Inequalities dh <= ad and -dh <= ad')
print('rx0: nbEntry = ', nbEntry, ' nbConst = ', nbConst)
print(' ')
if (abs(nbEntry - TotalNbEntry) > 0):
raise ValueError('We have a coding inconsistency for nbEntry. Please correct')
if (abs(nbConst - TotalNbConstant) > 0):
raise ValueError('We have a coding inconsistency for nbConst. Please correct')
return iList, jList, sList, Constant
def GetIJS_maxamp(MSK, DEP, AmpConst):
eta_rho, xi_rho = DEP.shape
print('eta_rho = ', eta_rho, ' xi_rho = ', xi_rho)
nbVert = 0
ListCoord = np.zeros((eta_rho, xi_rho))
for iEta in range(eta_rho):
for iXi in range(xi_rho):
if (MSK[iEta,iXi] == 1):
nbVert = nbVert + 1
ListCoord[iEta,iXi] = nbVert
TotalNbConstant = 0
TotalNbEntry = 0
for iEta in range(eta_rho):
for iXi in range(xi_rho):
if (MSK[iEta,iXi] == 1):
alpha = AmpConst[iEta,iXi]
if (alpha < 9999):
TotalNbConstant = TotalNbConstant + 2
TotalNbEntry = TotalNbEntry + 2
nbConst = 0
nbEntry = 0
Constant = np.zeros((TotalNbConstant,1))
iList = np.zeros((TotalNbEntry,1))
jList = np.zeros((TotalNbEntry,1))
sList = np.zeros((TotalNbEntry,1))
for iEta in range(eta_rho):
for iXi in range(xi_rho):
if (MSK[iEta,iXi] == 1):
idx = ListCoord[iEta,iXi]
alpha = AmpConst[iEta,iXi]
if (alpha < 9999):
Constant[nbConst,0] = alpha * DEP[iEta,iXi]
iList[nbEntry,0] = nbConst + 1
jList[nbEntry,0] = idx
sList[nbEntry,0] = -1
nbConst = nbConst + 1
nbEntry = nbEntry + 1
Constant[nbConst,0] = alpha * DEP[iEta,iXi]
iList[nbEntry,0] = nbConst + 1
jList[nbEntry,0] = idx
sList[nbEntry,0] = 1
nbConst = nbConst + 1
nbEntry = nbEntry + 1
print('Inequalities |h^{new} - h^{old}| <= alpha h^{old}')
print('maxamp: nbEntry = ', nbEntry, ' nbConst = ', nbConst)
print(' ')
if (abs(nbEntry - TotalNbEntry) > 0):
raise ValueError('We have a coding inconsistency for nbEntry. Please correct')
if (abs(nbConst - TotalNbConstant) > 0):
raise ValueError('We have a coding inconsistency for nbConst. Please correct')
return iList, jList, sList, Constant
def GetIJS_signs(MSK, SignConst):
eta_rho, xi_rho = MSK.shape
print('eta_rho = ', eta_rho, ' xi_rho = ', xi_rho)
nbVert = 0
ListCoord = np.zeros((eta_rho, xi_rho))
for iEta in range(eta_rho):
for iXi in range(xi_rho):
if (MSK[iEta,iXi] == 1):
nbVert = nbVert + 1
ListCoord[iEta,iXi] = nbVert
TotalNbConstant = 0
TotalNbEntry = 0
for iEta in range(eta_rho):
for iXi in range(xi_rho):
if (MSK[iEta,iXi] == 1 and SignConst[iEta,iXi] != 0):
TotalNbConstant = TotalNbConstant + 1
TotalNbEntry = TotalNbEntry + 1
nbConst = 0
nbEntry = 0
Constant = np.zeros((TotalNbConstant,1))
iList = np.zeros((TotalNbEntry,1))
jList = np.zeros((TotalNbEntry,1))
sList = np.zeros((TotalNbEntry,1))
for iEta in range(eta_rho):
for iXi in range(xi_rho):
if (MSK[iEta,iXi] == 1 and SignConst[iEta,iXi] != 0):
idx = ListCoord[iEta,iXi]
Constant[nbConst,0] = 0
nbConst = nbConst + 1
iList[nbEntry,0] = nbConst
jList[nbEntry,0] = idx
if (SignConst[iEta,iXi] == 1):
sList[nbEntry,0] = -1
elif (SignConst[iEta, iXi] == -1):
sList[nbEntry,0] = 1
else:
raise ValueError('Wrong assigning please check SignConst')
nbEntry = nbEntry + 1
print('Inequalities dh >= 0 or dh <= 0')
print('signs: nbEntry = ', nbEntry, ' nbConst = ', nbConst)
print(' ')
if (abs(nbEntry - TotalNbEntry) > 0):
raise ValueError('We have a coding inconsistency for nbEntry. Please correct')
if (abs(nbConst - TotalNbConstant) > 0):
raise ValueError('We have a coding inconsistency for nbConst. Please correct')
return iList, jList, sList, Constant
def MergeIJS_listings(iList1, jList1, sList1, Constant1, iList2, jList2, sList2, Constant2):
# Suppose we have two sets of inequalities for two linear programs
# with the same set of variables presented in sparse form.
# The two descriptions are merge.
nbConst1 = Constant1.shape[0]
nbConst2 = Constant2.shape[0]
nbEnt1 = iList1.shape[0]
nbEnt2 = iList2.shape[0]
Constant = np.zeros((nbConst1+nbConst2,1))
iList = np.zeros((nbEnt1+nbEnt2,1))
jList = np.zeros((nbEnt1+nbEnt2,1))
sList = np.zeros((nbEnt1+nbEnt2,1))
for iCons in range(nbConst1):
Constant[iCons,0] = Constant1[iCons,0]
for iCons in range(nbConst2):
Constant[nbConst1+iCons,0] = Constant2[iCons,0]
for iEnt in range(nbEnt1):
iList[iEnt,0] = iList1[iEnt,0]
jList[iEnt,0] = jList1[iEnt,0]
sList[iEnt,0] = sList1[iEnt,0]
for iEnt in range(nbEnt2):
iList[nbEnt1+iEnt,0] = nbConst1 + iList2[iEnt,0]
jList[nbEnt1+iEnt,0] = jList2[iEnt,0]
sList[nbEnt1+iEnt,0] = sList2[iEnt,0]
return iList, jList, sList, Constant
def GetBadPoints(MSK, DEP, rx0max):
RetBathy = bathy_smoothing.smoothing_Positive_rx0(MSK, DEP, rx0max)
K1 = np.where(RetBathy != DEP)
eta_rho, xi_rho = MSK.shape
MSKbad = np.zeros((eta_rho,xi_rho))
MSKbad[K1] = 1
return MSKbad
def Neighborhood(MSK, iEta, iXi, Kdist):
eta_rho, xi_rho = MSK.shape
MaxSiz = (2 * Kdist + 1) * (2 * Kdist + 1)
ListNeigh = np.zeros((MaxSiz,2), dtype=np.int)
ListStatus = -1 * np.ones((MaxSiz,1), dtype=np.int)
ListKeys = np.zeros((MaxSiz,1), dtype=np.int)
eKey = iEta + (eta_rho+1) * iXi
ListNeigh[0,0] = iEta
ListNeigh[0,1] = iXi
ListStatus[0,0] = 0
ListKeys[0,0] = eKey
nbPt = 1
List4dir = np.array([[1, 0],
[0, 1],
[-1, 0],
[0, -1]])
for iK in range(1,Kdist+1):
nbPtOld = nbPt
for iPt in range(nbPtOld):
if (ListStatus[iPt,0] == iK-1):
iEta = ListNeigh[iPt,0]
iXi = ListNeigh[iPt,1]
for ineigh in range(4):
iEtaN = iEta + List4dir[ineigh,0]
iXiN = iXi + List4dir[ineigh,1]
if (iEtaN <= eta_rho-1 and iEtaN >= 0 and iXiN <= xi_rho-1 \
and iXiN >= 0 and MSK[iEtaN,iXiN] == 1):
eKeyN = iEtaN + (eta_rho+1)*iXiN
Kf = np.where(ListKeys == eKeyN)
nbKf = np.size(Kf,1)
if (nbKf == 0):
ListNeigh[nbPt,0] = iEtaN
ListNeigh[nbPt,1] = iXiN
ListStatus[nbPt,0] = iK
| |
1055,5,146.0,176,Adult Care,1970-01-01 06:56:00,0.2,24.0,0.36
1056,10,47154.0,177,Leisure,1970-01-01 06:57:00,63.93,4276.0,63.27
1057,6,12515.0,177,Work and Education,1970-01-01 06:57:00,16.97,1122.0,16.6
1058,11,6417.0,177,Travel and Other,1970-01-01 06:57:00,8.7,608.0,9.0
1059,3,3900.0,177,Housework,1970-01-01 06:57:00,5.29,443.0,6.56
1060,4,3632.0,177,Child Care,1970-01-01 06:57:00,4.92,285.0,4.22
1061,5,140.0,177,Adult Care,1970-01-01 06:57:00,0.19,24.0,0.36
1062,10,47145.0,178,Leisure,1970-01-01 06:58:00,63.92,4276.0,63.27
1063,6,12588.0,178,Work and Education,1970-01-01 06:58:00,17.07,1131.0,16.74
1064,11,6380.0,178,Travel and Other,1970-01-01 06:58:00,8.65,604.0,8.94
1065,3,3880.0,178,Housework,1970-01-01 06:58:00,5.26,443.0,6.56
1066,4,3630.0,178,Child Care,1970-01-01 06:58:00,4.92,281.0,4.16
1067,5,135.0,178,Adult Care,1970-01-01 06:58:00,0.18,23.0,0.34
1068,10,47130.0,179,Leisure,1970-01-01 06:59:00,63.9,4280.0,63.33
1069,6,12635.0,179,Work and Education,1970-01-01 06:59:00,17.13,1137.0,16.82
1070,11,6360.0,179,Travel and Other,1970-01-01 06:59:00,8.62,597.0,8.83
1071,3,3873.0,179,Housework,1970-01-01 06:59:00,5.25,442.0,6.54
1072,4,3625.0,179,Child Care,1970-01-01 06:59:00,4.91,279.0,4.13
1073,5,135.0,179,Adult Care,1970-01-01 06:59:00,0.18,23.0,0.34
1074,10,47138.0,180,Leisure,1970-01-01 07:00:00,63.91,4279.0,63.32
1075,6,12651.0,180,Work and Education,1970-01-01 07:00:00,17.15,1139.0,16.85
1076,11,6350.0,180,Travel and Other,1970-01-01 07:00:00,8.61,600.0,8.88
1077,3,3865.0,180,Housework,1970-01-01 07:00:00,5.24,441.0,6.53
1078,4,3619.0,180,Child Care,1970-01-01 07:00:00,4.91,277.0,4.1
1079,5,135.0,180,Adult Care,1970-01-01 07:00:00,0.18,22.0,0.33
1080,10,40209.0,181,Leisure,1970-01-01 07:01:00,54.51,3647.0,53.97
1081,6,14285.0,181,Work and Education,1970-01-01 07:01:00,19.37,1276.0,18.88
1082,11,8892.0,181,Travel and Other,1970-01-01 07:01:00,12.06,826.0,12.22
1083,3,5397.0,181,Housework,1970-01-01 07:01:00,7.32,611.0,9.04
1084,4,4802.0,181,Child Care,1970-01-01 07:01:00,6.51,363.0,5.37
1085,5,173.0,181,Adult Care,1970-01-01 07:01:00,0.23,35.0,0.52
1086,10,40219.0,182,Leisure,1970-01-01 07:02:00,54.53,3641.0,53.88
1087,6,14341.0,182,Work and Education,1970-01-01 07:02:00,19.44,1279.0,18.93
1088,11,8857.0,182,Travel and Other,1970-01-01 07:02:00,12.01,831.0,12.3
1089,3,5415.0,182,Housework,1970-01-01 07:02:00,7.34,615.0,9.1
1090,4,4754.0,182,Child Care,1970-01-01 07:02:00,6.45,358.0,5.3
1091,5,172.0,182,Adult Care,1970-01-01 07:02:00,0.23,34.0,0.5
1092,10,40230.0,183,Leisure,1970-01-01 07:03:00,54.54,3643.0,53.91
1093,6,14431.0,183,Work and Education,1970-01-01 07:03:00,19.57,1284.0,19.0
1094,11,8778.0,183,Travel and Other,1970-01-01 07:03:00,11.9,816.0,12.07
1095,3,5414.0,183,Housework,1970-01-01 07:03:00,7.34,617.0,9.13
1096,4,4744.0,183,Child Care,1970-01-01 07:03:00,6.43,364.0,5.39
1097,5,161.0,183,Adult Care,1970-01-01 07:03:00,0.22,34.0,0.5
1098,10,40254.0,184,Leisure,1970-01-01 07:04:00,54.58,3637.0,53.82
1099,6,14484.0,184,Work and Education,1970-01-01 07:04:00,19.64,1290.0,19.09
1100,11,8716.0,184,Travel and Other,1970-01-01 07:04:00,11.82,818.0,12.1
1101,3,5395.0,184,Housework,1970-01-01 07:04:00,7.31,616.0,9.12
1102,4,4749.0,184,Child Care,1970-01-01 07:04:00,6.44,362.0,5.36
1103,5,160.0,184,Adult Care,1970-01-01 07:04:00,0.22,35.0,0.52
1104,10,40266.0,185,Leisure,1970-01-01 07:05:00,54.59,3638.0,53.83
1105,6,14509.0,185,Work and Education,1970-01-01 07:05:00,19.67,1294.0,19.15
1106,11,8705.0,185,Travel and Other,1970-01-01 07:05:00,11.8,819.0,12.12
1107,3,5401.0,185,Housework,1970-01-01 07:05:00,7.32,614.0,9.09
1108,4,4721.0,185,Child Care,1970-01-01 07:05:00,6.4,358.0,5.3
1109,5,156.0,185,Adult Care,1970-01-01 07:05:00,0.21,35.0,0.52
1110,10,40108.0,186,Leisure,1970-01-01 07:06:00,54.38,3654.0,54.07
1111,6,14992.0,186,Work and Education,1970-01-01 07:06:00,20.33,1332.0,19.71
1112,11,8438.0,186,Travel and Other,1970-01-01 07:06:00,11.44,799.0,11.82
1113,3,5319.0,186,Housework,1970-01-01 07:06:00,7.21,588.0,8.7
1114,4,4731.0,186,Child Care,1970-01-01 07:06:00,6.41,347.0,5.13
1115,5,170.0,186,Adult Care,1970-01-01 07:06:00,0.23,38.0,0.56
1116,10,40120.0,187,Leisure,1970-01-01 07:07:00,54.39,3655.0,54.08
1117,6,15023.0,187,Work and Education,1970-01-01 07:07:00,20.37,1335.0,19.75
1118,11,8454.0,187,Travel and Other,1970-01-01 07:07:00,11.46,802.0,11.87
1119,3,5304.0,187,Housework,1970-01-01 07:07:00,7.19,587.0,8.69
1120,4,4698.0,187,Child Care,1970-01-01 07:07:00,6.37,343.0,5.08
1121,5,159.0,187,Adult Care,1970-01-01 07:07:00,0.22,36.0,0.53
1122,10,40132.0,188,Leisure,1970-01-01 07:08:00,54.41,3659.0,54.14
1123,6,15081.0,188,Work and Education,1970-01-01 07:08:00,20.45,1337.0,19.78
1124,11,8418.0,188,Travel and Other,1970-01-01 07:08:00,11.41,802.0,11.87
1125,3,5289.0,188,Housework,1970-01-01 07:08:00,7.17,583.0,8.63
1126,4,4684.0,188,Child Care,1970-01-01 07:08:00,6.35,341.0,5.05
1127,5,154.0,188,Adult Care,1970-01-01 07:08:00,0.21,36.0,0.53
1128,10,40144.0,189,Leisure,1970-01-01 07:09:00,54.43,3662.0,54.19
1129,6,15116.0,189,Work and Education,1970-01-01 07:09:00,20.49,1340.0,19.83
1130,11,8417.0,189,Travel and Other,1970-01-01 07:09:00,11.41,803.0,11.88
1131,3,5268.0,189,Housework,1970-01-01 07:09:00,7.14,581.0,8.6
1132,4,4659.0,189,Child Care,1970-01-01 07:09:00,6.32,336.0,4.97
1133,5,154.0,189,Adult Care,1970-01-01 07:09:00,0.21,36.0,0.53
1134,10,40142.0,190,Leisure,1970-01-01 07:10:00,54.42,3664.0,54.22
1135,6,15126.0,190,Work and Education,1970-01-01 07:10:00,20.51,1341.0,19.84
1136,11,8423.0,190,Travel and Other,1970-01-01 07:10:00,11.42,802.0,11.87
1137,3,5270.0,190,Housework,1970-01-01 07:10:00,7.14,581.0,8.6
1138,4,4641.0,190,Child Care,1970-01-01 07:10:00,6.29,334.0,4.94
1139,5,156.0,190,Adult Care,1970-01-01 07:10:00,0.21,36.0,0.53
1140,10,39821.0,191,Leisure,1970-01-01 07:11:00,53.99,3609.0,53.4
1141,6,15657.0,191,Work and Education,1970-01-01 07:11:00,21.23,1397.0,20.67
1142,11,8234.0,191,Travel and Other,1970-01-01 07:11:00,11.16,795.0,11.76
1143,3,5153.0,191,Housework,1970-01-01 07:11:00,6.99,575.0,8.51
1144,4,4721.0,191,Child Care,1970-01-01 07:11:00,6.4,347.0,5.13
1145,5,172.0,191,Adult Care,1970-01-01 07:11:00,0.23,35.0,0.52
1146,10,39829.0,192,Leisure,1970-01-01 07:12:00,54.0,3611.0,53.43
1147,6,15684.0,192,Work and Education,1970-01-01 07:12:00,21.26,1397.0,20.67
1148,11,8254.0,192,Travel and Other,1970-01-01 07:12:00,11.19,798.0,11.81
1149,3,5143.0,192,Housework,1970-01-01 07:12:00,6.97,578.0,8.55
1150,4,4685.0,192,Child Care,1970-01-01 07:12:00,6.35,340.0,5.03
1151,5,163.0,192,Adult Care,1970-01-01 07:12:00,0.22,34.0,0.5
1152,10,39853.0,193,Leisure,1970-01-01 07:13:00,54.03,3609.0,53.4
1153,6,15746.0,193,Work and Education,1970-01-01 07:13:00,21.35,1402.0,20.75
1154,11,8238.0,193,Travel and Other,1970-01-01 07:13:00,11.17,799.0,11.82
1155,3,5113.0,193,Housework,1970-01-01 07:13:00,6.93,577.0,8.54
1156,4,4647.0,193,Child Care,1970-01-01 07:13:00,6.3,338.0,5.0
1157,5,161.0,193,Adult Care,1970-01-01 07:13:00,0.22,33.0,0.49
1158,10,39865.0,194,Leisure,1970-01-01 07:14:00,54.05,3609.0,53.4
1159,6,15776.0,194,Work and Education,1970-01-01 07:14:00,21.39,1406.0,20.8
1160,11,8220.0,194,Travel and Other,1970-01-01 07:14:00,11.14,801.0,11.85
1161,3,5106.0,194,Housework,1970-01-01 07:14:00,6.92,575.0,8.51
1162,4,4632.0,194,Child Care,1970-01-01 07:14:00,6.28,334.0,4.94
1163,5,159.0,194,Adult Care,1970-01-01 07:14:00,0.22,33.0,0.49
1164,10,39874.0,195,Leisure,1970-01-01 07:15:00,54.06,3615.0,53.49
1165,6,15797.0,195,Work and Education,1970-01-01 07:15:00,21.42,1406.0,20.8
1166,11,8203.0,195,Travel and Other,1970-01-01 07:15:00,11.12,796.0,11.78
1167,3,5105.0,195,Housework,1970-01-01 07:15:00,6.92,579.0,8.57
1168,4,4620.0,195,Child Care,1970-01-01 07:15:00,6.26,329.0,4.87
1169,5,159.0,195,Adult Care,1970-01-01 07:15:00,0.22,33.0,0.49
1170,10,38511.0,196,Leisure,1970-01-01 07:16:00,52.21,3484.0,51.55
1171,6,16586.0,196,Work and Education,1970-01-01 07:16:00,22.49,1492.0,22.08
1172,11,8759.0,196,Travel and Other,1970-01-01 07:16:00,11.88,837.0,12.39
1173,3,5030.0,196,Housework,1970-01-01 07:16:00,6.82,582.0,8.61
1174,4,4684.0,196,Child Care,1970-01-01 07:16:00,6.35,326.0,4.82
1175,5,188.0,196,Adult Care,1970-01-01 07:16:00,0.25,37.0,0.55
1176,10,38502.0,197,Leisure,1970-01-01 07:17:00,52.2,3486.0,51.58
1177,6,16620.0,197,Work and Education,1970-01-01 07:17:00,22.53,1497.0,22.15
1178,11,8785.0,197,Travel and Other,1970-01-01 07:17:00,11.91,835.0,12.36
1179,3,5037.0,197,Housework,1970-01-01 07:17:00,6.83,573.0,8.48
1180,4,4635.0,197,Child Care,1970-01-01 07:17:00,6.28,331.0,4.9
1181,5,179.0,197,Adult Care,1970-01-01 07:17:00,0.24,36.0,0.53
1182,10,38494.0,198,Leisure,1970-01-01 07:18:00,52.19,3487.0,51.6
1183,6,16697.0,198,Work and Education,1970-01-01 07:18:00,22.64,1501.0,22.21
1184,11,8760.0,198,Travel and Other,1970-01-01 07:18:00,11.88,826.0,12.22
1185,3,5017.0,198,Housework,1970-01-01 07:18:00,6.8,575.0,8.51
1186,4,4618.0,198,Child Care,1970-01-01 07:18:00,6.26,333.0,4.93
1187,5,172.0,198,Adult Care,1970-01-01 07:18:00,0.23,36.0,0.53
1188,10,38483.0,199,Leisure,1970-01-01 07:19:00,52.17,3487.0,51.6
1189,6,16738.0,199,Work and Education,1970-01-01 07:19:00,22.69,1506.0,22.28
1190,11,8753.0,199,Travel and Other,1970-01-01 07:19:00,11.87,823.0,12.18
1191,3,5021.0,199,Housework,1970-01-01 07:19:00,6.81,569.0,8.42
1192,4,4593.0,199,Child Care,1970-01-01 07:19:00,6.23,337.0,4.99
1193,5,170.0,199,Adult Care,1970-01-01 07:19:00,0.23,36.0,0.53
1194,10,38493.0,200,Leisure,1970-01-01 07:20:00,52.19,3487.0,51.6
1195,6,16763.0,200,Work and Education,1970-01-01 07:20:00,22.73,1509.0,22.33
1196,11,8740.0,200,Travel and Other,1970-01-01 07:20:00,11.85,825.0,12.21
1197,3,5014.0,200,Housework,1970-01-01 07:20:00,6.8,569.0,8.42
1198,4,4579.0,200,Child Care,1970-01-01 07:20:00,6.21,333.0,4.93
1199,5,169.0,200,Adult Care,1970-01-01 07:20:00,0.23,35.0,0.52
1200,10,37902.0,201,Leisure,1970-01-01 07:21:00,51.39,3456.0,51.14
1201,6,17406.0,201,Work and Education,1970-01-01 07:21:00,23.6,1582.0,23.41
1202,11,8818.0,201,Travel and Other,1970-01-01 07:21:00,11.96,811.0,12.0
1203,3,4854.0,201,Housework,1970-01-01 07:21:00,6.58,549.0,8.12
1204,4,4603.0,201,Child Care,1970-01-01 07:21:00,6.24,318.0,4.71
1205,5,175.0,201,Adult Care,1970-01-01 07:21:00,0.24,42.0,0.62
1206,10,37920.0,202,Leisure,1970-01-01 07:22:00,51.41,3455.0,51.12
1207,6,17450.0,202,Work and Education,1970-01-01 07:22:00,23.66,1584.0,23.44
1208,11,8803.0,202,Travel and Other,1970-01-01 07:22:00,11.93,818.0,12.1
1209,3,4859.0,202,Housework,1970-01-01 07:22:00,6.59,547.0,8.09
1210,4,4556.0,202,Child Care,1970-01-01 07:22:00,6.18,311.0,4.6
1211,5,170.0,202,Adult Care,1970-01-01 07:22:00,0.23,43.0,0.64
1212,10,37926.0,203,Leisure,1970-01-01 07:23:00,51.42,3450.0,51.05
1213,6,17523.0,203,Work and Education,1970-01-01 07:23:00,23.76,1591.0,23.54
1214,11,8726.0,203,Travel and Other,1970-01-01 07:23:00,11.83,822.0,12.16
1215,3,4865.0,203,Housework,1970-01-01 07:23:00,6.6,551.0,8.15
1216,4,4547.0,203,Child Care,1970-01-01 07:23:00,6.16,300.0,4.44
1217,5,171.0,203,Adult Care,1970-01-01 07:23:00,0.23,44.0,0.65
1218,10,37927.0,204,Leisure,1970-01-01 07:24:00,51.42,3447.0,51.01
1219,6,17574.0,204,Work and Education,1970-01-01 07:24:00,23.83,1598.0,23.65
1220,11,8688.0,204,Travel and Other,1970-01-01 07:24:00,11.78,816.0,12.07
1221,3,4866.0,204,Housework,1970-01-01 07:24:00,6.6,550.0,8.14
1222,4,4532.0,204,Child Care,1970-01-01 07:24:00,6.14,302.0,4.47
1223,5,171.0,204,Adult Care,1970-01-01 07:24:00,0.23,45.0,0.67
1224,10,37930.0,205,Leisure,1970-01-01 07:25:00,51.42,3445.0,50.98
1225,6,17600.0,205,Work and Education,1970-01-01 07:25:00,23.86,1598.0,23.65
1226,11,8679.0,205,Travel and Other,1970-01-01 07:25:00,11.77,814.0,12.04
1227,3,4866.0,205,Housework,1970-01-01 07:25:00,6.6,554.0,8.2
1228,4,4512.0,205,Child Care,1970-01-01 07:25:00,6.12,302.0,4.47
1229,5,171.0,205,Adult Care,1970-01-01 07:25:00,0.23,45.0,0.67
1230,10,37703.0,206,Leisure,1970-01-01 07:26:00,51.12,3424.0,50.67
1231,6,18124.0,206,Work and Education,1970-01-01 07:26:00,24.57,1660.0,24.56
1232,11,8475.0,206,Travel and Other,1970-01-01 07:26:00,11.49,799.0,11.82
1233,3,4766.0,206,Housework,1970-01-01 07:26:00,6.46,542.0,8.02
1234,4,4507.0,206,Child Care,1970-01-01 07:26:00,6.11,286.0,4.23
1235,5,183.0,206,Adult Care,1970-01-01 07:26:00,0.25,47.0,0.7
1236,10,37719.0,207,Leisure,1970-01-01 07:27:00,51.14,3422.0,50.64
1237,6,18176.0,207,Work and Education,1970-01-01 07:27:00,24.64,1668.0,24.68
1238,11,8483.0,207,Travel and Other,1970-01-01 07:27:00,11.5,792.0,11.72
1239,3,4758.0,207,Housework,1970-01-01 07:27:00,6.45,546.0,8.08
1240,4,4447.0,207,Child Care,1970-01-01 07:27:00,6.03,284.0,4.2
1241,5,175.0,207,Adult Care,1970-01-01 07:27:00,0.24,46.0,0.68
1242,10,37716.0,208,Leisure,1970-01-01 07:28:00,51.13,3424.0,50.67
1243,6,18253.0,208,Work and Education,1970-01-01 07:28:00,24.75,1675.0,24.79
1244,11,8462.0,208,Travel and Other,1970-01-01 07:28:00,11.47,791.0,11.7
1245,3,4755.0,208,Housework,1970-01-01 07:28:00,6.45,541.0,8.01
1246,4,4400.0,208,Child Care,1970-01-01 07:28:00,5.97,280.0,4.14
1247,5,172.0,208,Adult Care,1970-01-01 07:28:00,0.23,47.0,0.7
1248,10,37709.0,209,Leisure,1970-01-01 07:29:00,51.13,3423.0,50.65
1249,6,18299.0,209,Work and Education,1970-01-01 07:29:00,24.81,1678.0,24.83
1250,11,8416.0,209,Travel and Other,1970-01-01 07:29:00,11.41,786.0,11.63
1251,3,4761.0,209,Housework,1970-01-01 07:29:00,6.45,544.0,8.05
1252,4,4394.0,209,Child Care,1970-01-01 07:29:00,5.96,280.0,4.14
1253,5,179.0,209,Adult Care,1970-01-01 07:29:00,0.24,47.0,0.7
1254,10,37707.0,210,Leisure,1970-01-01 07:30:00,51.12,3422.0,50.64
1255,6,18332.0,210,Work and Education,1970-01-01 07:30:00,24.85,1681.0,24.87
1256,11,8401.0,210,Travel and Other,1970-01-01 07:30:00,11.39,786.0,11.63
1257,3,4756.0,210,Housework,1970-01-01 07:30:00,6.45,542.0,8.02
1258,4,4386.0,210,Child Care,1970-01-01 07:30:00,5.95,280.0,4.14
1259,5,176.0,210,Adult Care,1970-01-01 07:30:00,0.24,47.0,0.7
1260,10,33387.0,211,Leisure,1970-01-01 07:31:00,45.27,3028.0,44.81
1261,6,19704.0,211,Work and Education,1970-01-01 07:31:00,26.71,1808.0,26.75
1262,11,10733.0,211,Travel and Other,1970-01-01 07:31:00,14.55,983.0,14.55
1263,3,5338.0,211,Housework,1970-01-01 07:31:00,7.24,605.0,8.95
1264,4,4392.0,211,Child Care,1970-01-01 07:31:00,5.95,283.0,4.19
1265,5,204.0,211,Adult Care,1970-01-01 07:31:00,0.28,51.0,0.75
1266,10,33378.0,212,Leisure,1970-01-01 07:32:00,45.25,3033.0,44.88
1267,6,19769.0,212,Work and Education,1970-01-01 07:32:00,26.8,1811.0,26.8
1268,11,10694.0,212,Travel and Other,1970-01-01 07:32:00,14.5,976.0,14.44
1269,3,5358.0,212,Housework,1970-01-01 07:32:00,7.26,608.0,9.0
1270,4,4364.0,212,Child Care,1970-01-01 07:32:00,5.92,278.0,4.11
1271,5,195.0,212,Adult Care,1970-01-01 07:32:00,0.26,52.0,0.77
1272,10,33388.0,213,Leisure,1970-01-01 07:33:00,45.27,3036.0,44.92
1273,6,19848.0,213,Work and Education,1970-01-01 07:33:00,26.91,1817.0,26.89
1274,11,10664.0,213,Travel and Other,1970-01-01 07:33:00,14.46,966.0,14.29
1275,3,5337.0,213,Housework,1970-01-01 07:33:00,7.24,602.0,8.91
1276,4,4330.0,213,Child Care,1970-01-01 07:33:00,5.87,283.0,4.19
1277,5,191.0,213,Adult Care,1970-01-01 07:33:00,0.26,54.0,0.8
1278,10,33410.0,214,Leisure,1970-01-01 07:34:00,45.3,3034.0,44.89
1279,6,19905.0,214,Work and Education,1970-01-01 07:34:00,26.99,1821.0,26.95
1280,11,10583.0,214,Travel and Other,1970-01-01 07:34:00,14.35,963.0,14.25
1281,3,5343.0,214,Housework,1970-01-01 07:34:00,7.24,605.0,8.95
1282,4,4323.0,214,Child Care,1970-01-01 07:34:00,5.86,282.0,4.17
1283,5,194.0,214,Adult Care,1970-01-01 07:34:00,0.26,53.0,0.78
1284,10,33424.0,215,Leisure,1970-01-01 07:35:00,45.32,3037.0,44.94
1285,6,19939.0,215,Work and Education,1970-01-01 07:35:00,27.03,1823.0,26.98
1286,11,10566.0,215,Travel and Other,1970-01-01 07:35:00,14.33,964.0,14.26
1287,3,5342.0,215,Housework,1970-01-01 07:35:00,7.24,601.0,8.89
1288,4,4295.0,215,Child Care,1970-01-01 07:35:00,5.82,279.0,4.13
1289,5,192.0,215,Adult Care,1970-01-01 07:35:00,0.26,54.0,0.8
1290,10,33262.0,216,Leisure,1970-01-01 07:36:00,45.1,3029.0,44.82
1291,6,20516.0,216,Work and Education,1970-01-01 07:36:00,27.82,1872.0,27.7
1292,11,10026.0,216,Travel and Other,1970-01-01 07:36:00,13.59,916.0,13.55
1293,3,5293.0,216,Housework,1970-01-01 07:36:00,7.18,590.0,8.73
1294,4,4451.0,216,Child Care,1970-01-01 07:36:00,6.03,297.0,4.39
1295,5,210.0,216,Adult Care,1970-01-01 07:36:00,0.28,54.0,0.8
1296,10,33280.0,217,Leisure,1970-01-01 07:37:00,45.12,3022.0,44.72
1297,6,20578.0,217,Work and Education,1970-01-01 07:37:00,27.9,1879.0,27.8
1298,11,10032.0,217,Travel and Other,1970-01-01 07:37:00,13.6,927.0,13.72
1299,3,5285.0,217,Housework,1970-01-01 07:37:00,7.17,587.0,8.69
1300,4,4380.0,217,Child Care,1970-01-01 07:37:00,5.94,291.0,4.31
1301,5,203.0,217,Adult Care,1970-01-01 07:37:00,0.28,52.0,0.77
1302,10,33273.0,218,Leisure,1970-01-01 07:38:00,45.11,3028.0,44.81
1303,6,20667.0,218,Work and Education,1970-01-01 07:38:00,28.02,1885.0,27.89
1304,11,9986.0,218,Travel and Other,1970-01-01 07:38:00,13.54,924.0,13.67
1305,3,5321.0,218,Housework,1970-01-01 07:38:00,7.21,583.0,8.63
1306,4,4314.0,218,Child Care,1970-01-01 07:38:00,5.85,288.0,4.26
1307,5,197.0,218,Adult Care,1970-01-01 07:38:00,0.27,50.0,0.74
1308,10,33275.0,219,Leisure,1970-01-01 07:39:00,45.11,3027.0,44.79
1309,6,20705.0,219,Work and Education,1970-01-01 07:39:00,28.07,1890.0,27.97
1310,11,9979.0,219,Travel and Other,1970-01-01 07:39:00,13.53,925.0,13.69
1311,3,5316.0,219,Housework,1970-01-01 07:39:00,7.21,583.0,8.63
1312,4,4288.0,219,Child Care,1970-01-01 07:39:00,5.81,284.0,4.2
1313,5,195.0,219,Adult Care,1970-01-01 07:39:00,0.26,49.0,0.73
1314,10,33278.0,220,Leisure,1970-01-01 07:40:00,45.12,3028.0,44.81
1315,6,20726.0,220,Work and Education,1970-01-01 07:40:00,28.1,1893.0,28.01
1316,11,9969.0,220,Travel and Other,1970-01-01 07:40:00,13.52,922.0,13.64
1317,3,5313.0,220,Housework,1970-01-01 07:40:00,7.2,583.0,8.63
1318,4,4276.0,220,Child Care,1970-01-01 07:40:00,5.8,283.0,4.19
1319,5,196.0,220,Adult Care,1970-01-01 07:40:00,0.27,49.0,0.73
1320,10,32882.0,221,Leisure,1970-01-01 07:41:00,44.58,2990.0,44.24
1321,6,21403.0,221,Work and Education,1970-01-01 07:41:00,29.02,1962.0,29.03
1322,11,9696.0,221,Travel and Other,1970-01-01 07:41:00,13.15,880.0,13.02
1323,3,5272.0,221,Housework,1970-01-01 07:41:00,7.15,576.0,8.52
1324,4,4289.0,221,Child Care,1970-01-01 07:41:00,5.81,290.0,4.29
1325,5,216.0,221,Adult Care,1970-01-01 07:41:00,0.29,60.0,0.89
1326,10,32894.0,222,Leisure,1970-01-01 07:42:00,44.6,2988.0,44.21
1327,6,21448.0,222,Work and Education,1970-01-01 07:42:00,29.08,1967.0,29.11
1328,11,9700.0,222,Travel and Other,1970-01-01 07:42:00,13.15,881.0,13.04
1329,3,5298.0,222,Housework,1970-01-01 07:42:00,7.18,581.0,8.6
1330,4,4209.0,222,Child Care,1970-01-01 07:42:00,5.71,281.0,4.16
1331,5,209.0,222,Adult Care,1970-01-01 07:42:00,0.28,60.0,0.89
1332,10,32906.0,223,Leisure,1970-01-01 07:43:00,44.61,2989.0,44.23
1333,6,21525.0,223,Work and Education,1970-01-01 07:43:00,29.18,1977.0,29.25
1334,11,9659.0,223,Travel and Other,1970-01-01 07:43:00,13.1,874.0,12.93
1335,3,5289.0,223,Housework,1970-01-01 07:43:00,7.17,586.0,8.67
1336,4,4172.0,223,Child Care,1970-01-01 07:43:00,5.66,275.0,4.07
1337,5,207.0,223,Adult Care,1970-01-01 07:43:00,0.28,57.0,0.84
1338,10,32902.0,224,Leisure,1970-01-01 07:44:00,44.61,2988.0,44.21
1339,6,21568.0,224,Work and Education,1970-01-01 07:44:00,29.24,1982.0,29.33
1340,11,9667.0,224,Travel and Other,1970-01-01 07:44:00,13.11,868.0,12.84
1341,3,5288.0,224,Housework,1970-01-01 07:44:00,7.17,585.0,8.66
1342,4,4135.0,224,Child Care,1970-01-01 07:44:00,5.61,280.0,4.14
1343,5,198.0,224,Adult Care,1970-01-01 07:44:00,0.27,55.0,0.81
1344,10,32919.0,225,Leisure,1970-01-01 07:45:00,44.63,2990.0,44.24
1345,6,21600.0,225,Work and Education,1970-01-01 07:45:00,29.28,1983.0,29.34
1346,11,9630.0,225,Travel and Other,1970-01-01 07:45:00,13.06,870.0,12.87
1347,3,5290.0,225,Housework,1970-01-01 07:45:00,7.17,582.0,8.61
1348,4,4121.0,225,Child Care,1970-01-01 07:45:00,5.59,276.0,4.08
1349,5,198.0,225,Adult Care,1970-01-01 07:45:00,0.27,57.0,0.84
1350,10,31834.0,226,Leisure,1970-01-01 07:46:00,43.16,2886.0,42.7
1351,6,22718.0,226,Work and Education,1970-01-01 07:46:00,30.8,2089.0,30.91
1352,11,9743.0,226,Travel and Other,1970-01-01 07:46:00,13.21,873.0,12.92
1353,3,5214.0,226,Housework,1970-01-01 07:46:00,7.07,576.0,8.52
1354,4,4016.0,226,Child Care,1970-01-01 07:46:00,5.44,277.0,4.1
1355,5,233.0,226,Adult Care,1970-01-01 07:46:00,0.32,57.0,0.84
1356,10,31833.0,227,Leisure,1970-01-01 07:47:00,43.16,2887.0,42.72
1357,6,22774.0,227,Work and Education,1970-01-01 07:47:00,30.88,2097.0,31.03
1358,11,9766.0,227,Travel and Other,1970-01-01 07:47:00,13.24,861.0,12.74
1359,3,5216.0,227,Housework,1970-01-01 07:47:00,7.07,580.0,8.58
1360,4,3942.0,227,Child Care,1970-01-01 07:47:00,5.34,277.0,4.1
1361,5,227.0,227,Adult Care,1970-01-01 07:47:00,0.31,56.0,0.83
1362,10,31832.0,228,Leisure,1970-01-01 07:48:00,43.16,2890.0,42.76
1363,6,22864.0,228,Work and Education,1970-01-01 07:48:00,31.0,2106.0,31.16
1364,11,9720.0,228,Travel and Other,1970-01-01 07:48:00,13.18,851.0,12.59
1365,3,5202.0,228,Housework,1970-01-01 07:48:00,7.05,578.0,8.55
1366,4,3916.0,228,Child Care,1970-01-01 07:48:00,5.31,275.0,4.07
1367,5,224.0,228,Adult Care,1970-01-01 07:48:00,0.3,58.0,0.86
1368,10,31851.0,229,Leisure,1970-01-01 07:49:00,43.18,2889.0,42.75
1369,6,22922.0,229,Work and Education,1970-01-01 07:49:00,31.08,2114.0,31.28
1370,11,9680.0,229,Travel and Other,1970-01-01 07:49:00,13.12,852.0,12.61
1371,3,5197.0,229,Housework,1970-01-01 07:49:00,7.05,580.0,8.58
1372,4,3884.0,229,Child Care,1970-01-01 07:49:00,5.27,265.0,3.92
1373,5,224.0,229,Adult Care,1970-01-01 07:49:00,0.3,58.0,0.86
1374,10,31844.0,230,Leisure,1970-01-01 07:50:00,43.17,2893.0,42.81
1375,6,22964.0,230,Work and Education,1970-01-01 07:50:00,31.13,2116.0,31.31
1376,11,9663.0,230,Travel and Other,1970-01-01 07:50:00,13.1,849.0,12.56
1377,3,5203.0,230,Housework,1970-01-01 07:50:00,7.05,582.0,8.61
1378,4,3859.0,230,Child Care,1970-01-01 07:50:00,5.23,262.0,3.88
1379,5,225.0,230,Adult Care,1970-01-01 07:50:00,0.31,56.0,0.83
1380,10,31444.0,231,Leisure,1970-01-01 07:51:00,42.63,2866.0,42.41
1381,6,23891.0,231,Work and Education,1970-01-01 07:51:00,32.39,2188.0,32.38
1382,11,9217.0,231,Travel and Other,1970-01-01 07:51:00,12.5,803.0,11.88
1383,3,5186.0,231,Housework,1970-01-01 07:51:00,7.03,582.0,8.61
1384,4,3776.0,231,Child Care,1970-01-01 07:51:00,5.12,268.0,3.97
1385,5,244.0,231,Adult Care,1970-01-01 07:51:00,0.33,51.0,0.75
1386,10,31450.0,232,Leisure,1970-01-01 07:52:00,42.64,2865.0,42.39
1387,6,23974.0,232,Work and Education,1970-01-01 07:52:00,32.5,2194.0,32.47
1388,11,9196.0,232,Travel and Other,1970-01-01 07:52:00,12.47,806.0,11.93
1389,3,5188.0,232,Housework,1970-01-01 07:52:00,7.03,584.0,8.64
1390,4,3718.0,232,Child Care,1970-01-01 07:52:00,5.04,260.0,3.85
1391,5,232.0,232,Adult Care,1970-01-01 07:52:00,0.31,49.0,0.73
1392,10,31463.0,233,Leisure,1970-01-01 07:53:00,42.66,2874.0,42.53
1393,6,24076.0,233,Work and Education,1970-01-01 07:53:00,32.64,2207.0,32.66
1394,11,9103.0,233,Travel and Other,1970-01-01 07:53:00,12.34,792.0,11.72
1395,3,5180.0,233,Housework,1970-01-01 07:53:00,7.02,578.0,8.55
1396,4,3710.0,233,Child Care,1970-01-01 07:53:00,5.03,256.0,3.79
1397,5,226.0,233,Adult Care,1970-01-01 07:53:00,0.31,51.0,0.75
1398,10,31458.0,234,Leisure,1970-01-01 07:54:00,42.65,2883.0,42.66
1399,6,24149.0,234,Work and Education,1970-01-01 07:54:00,32.74,2215.0,32.78
1400,11,9051.0,234,Travel and Other,1970-01-01 07:54:00,12.27,784.0,11.6
1401,3,5187.0,234,Housework,1970-01-01 07:54:00,7.03,573.0,8.48
1402,4,3688.0,234,Child Care,1970-01-01 07:54:00,5.0,253.0,3.74
1403,5,225.0,234,Adult Care,1970-01-01 07:54:00,0.31,50.0,0.74
1404,10,31474.0,235,Leisure,1970-01-01 07:55:00,42.67,2889.0,42.75
1405,6,24197.0,235,Work and Education,1970-01-01 07:55:00,32.81,2218.0,32.82
1406,11,9024.0,235,Travel and Other,1970-01-01 07:55:00,12.23,781.0,11.56
1407,3,5177.0,235,Housework,1970-01-01 07:55:00,7.02,572.0,8.46
1408,4,3664.0,235,Child Care,1970-01-01 07:55:00,4.97,248.0,3.67
1409,5,222.0,235,Adult Care,1970-01-01 07:55:00,0.3,50.0,0.74
1410,10,31297.0,236,Leisure,1970-01-01 07:56:00,42.43,2865.0,42.39
1411,6,25006.0,236,Work and Education,1970-01-01 07:56:00,33.9,2285.0,33.81
1412,11,8475.0,236,Travel and Other,1970-01-01 07:56:00,11.49,758.0,11.22
1413,3,5096.0,236,Housework,1970-01-01 07:56:00,6.91,562.0,8.32
1414,4,3650.0,236,Child Care,1970-01-01 07:56:00,4.95,237.0,3.51
1415,5,234.0,236,Adult Care,1970-01-01 07:56:00,0.32,51.0,0.75
1416,10,31304.0,237,Leisure,1970-01-01 07:57:00,42.44,2872.0,42.5
1417,6,25079.0,237,Work and Education,1970-01-01 07:57:00,34.0,2292.0,33.92
1418,11,8430.0,237,Travel and Other,1970-01-01 07:57:00,11.43,749.0,11.08
1419,3,5092.0,237,Housework,1970-01-01 07:57:00,6.9,560.0,8.29
1420,4,3625.0,237,Child Care,1970-01-01 07:57:00,4.91,233.0,3.45
1421,5,228.0,237,Adult Care,1970-01-01 07:57:00,0.31,52.0,0.77
1422,10,31332.0,238,Leisure,1970-01-01 07:58:00,42.48,2871.0,42.48
1423,6,25200.0,238,Work and Education,1970-01-01 07:58:00,34.17,2303.0,34.08
1424,11,8349.0,238,Travel and Other,1970-01-01 07:58:00,11.32,736.0,10.89
1425,3,5085.0,238,Housework,1970-01-01 07:58:00,6.89,559.0,8.27
1426,4,3568.0,238,Child Care,1970-01-01 07:58:00,4.84,237.0,3.51
1427,5,224.0,238,Adult Care,1970-01-01 07:58:00,0.3,52.0,0.77
1428,10,31336.0,239,Leisure,1970-01-01 07:59:00,42.48,2871.0,42.48
1429,6,25273.0,239,Work and Education,1970-01-01 07:59:00,34.26,2309.0,34.17
1430,11,8275.0,239,Travel and Other,1970-01-01 07:59:00,11.22,735.0,10.88
1431,3,5083.0,239,Housework,1970-01-01 07:59:00,6.89,558.0,8.26
1432,4,3568.0,239,Child Care,1970-01-01 07:59:00,4.84,233.0,3.45
1433,5,223.0,239,Adult Care,1970-01-01 07:59:00,0.3,52.0,0.77
1434,10,31328.0,240,Leisure,1970-01-01 08:00:00,42.47,2870.0,42.47
1435,6,25322.0,240,Work and Education,1970-01-01 08:00:00,34.33,2317.0,34.29
1436,11,8240.0,240,Travel and Other,1970-01-01 08:00:00,11.17,727.0,10.76
1437,3,5092.0,240,Housework,1970-01-01 08:00:00,6.9,558.0,8.26
1438,4,3554.0,240,Child Care,1970-01-01 08:00:00,4.82,234.0,3.46
1439,5,222.0,240,Adult Care,1970-01-01 08:00:00,0.3,52.0,0.77
1440,6,27372.0,241,Work and Education,1970-01-01 08:01:00,37.11,2502.0,37.02
1441,10,27079.0,241,Leisure,1970-01-01 08:01:00,36.71,2491.0,36.86
1442,11,9391.0,241,Travel and Other,1970-01-01 08:01:00,12.73,778.0,11.51
1443,3,6212.0,241,Housework,1970-01-01 08:01:00,8.42,678.0,10.03
1444,4,3450.0,241,Child Care,1970-01-01 08:01:00,4.68,244.0,3.61
1445,5,254.0,241,Adult Care,1970-01-01 08:01:00,0.34,65.0,0.96
1446,6,27461.0,242,Work and Education,1970-01-01 08:02:00,37.23,2514.0,37.2
1447,10,27103.0,242,Leisure,1970-01-01 08:02:00,36.75,2499.0,36.98
1448,11,9330.0,242,Travel and Other,1970-01-01 08:02:00,12.65,768.0,11.36
1449,3,6209.0,242,Housework,1970-01-01 08:02:00,8.42,673.0,9.96
1450,4,3410.0,242,Child Care,1970-01-01 08:02:00,4.62,242.0,3.58
1451,5,245.0,242,Adult Care,1970-01-01 08:02:00,0.33,62.0,0.92
1452,6,27603.0,243,Work and Education,1970-01-01 08:03:00,37.42,2526.0,37.38
1453,10,27128.0,243,Leisure,1970-01-01 08:03:00,36.78,2495.0,36.92
1454,11,9165.0,243,Travel and Other,1970-01-01 08:03:00,12.43,759.0,11.23
1455,3,6229.0,243,Housework,1970-01-01 08:03:00,8.45,676.0,10.0
1456,4,3391.0,243,Child Care,1970-01-01 08:03:00,4.6,241.0,3.57
1457,5,242.0,243,Adult Care,1970-01-01 08:03:00,0.33,61.0,0.9
1458,6,27672.0,244,Work and Education,1970-01-01 08:04:00,37.52,2533.0,37.48
1459,10,27150.0,244,Leisure,1970-01-01 08:04:00,36.81,2501.0,37.01
1460,11,9086.0,244,Travel and Other,1970-01-01 08:04:00,12.32,750.0,11.1
1461,3,6241.0,244,Housework,1970-01-01 08:04:00,8.46,671.0,9.93
1462,4,3366.0,244,Child Care,1970-01-01 08:04:00,4.56,240.0,3.55
1463,5,243.0,244,Adult Care,1970-01-01 08:04:00,0.33,63.0,0.93
1464,6,27718.0,245,Work and Education,1970-01-01 08:05:00,37.58,2538.0,37.56
1465,10,27155.0,245,Leisure,1970-01-01 08:05:00,36.82,2503.0,37.04
1466,11,9055.0,245,Travel and Other,1970-01-01 08:05:00,12.28,751.0,11.11
1467,3,6246.0,245,Housework,1970-01-01 08:05:00,8.47,668.0,9.88
1468,4,3342.0,245,Child Care,1970-01-01 08:05:00,4.53,235.0,3.48
1469,5,242.0,245,Adult Care,1970-01-01 08:05:00,0.33,63.0,0.93
1470,6,28255.0,246,Work and Education,1970-01-01 08:06:00,38.31,2582.0,38.21
1471,10,27245.0,246,Leisure,1970-01-01 08:06:00,36.94,2483.0,36.74
1472,11,8428.0,246,Travel and Other,1970-01-01 08:06:00,11.43,704.0,10.42
1473,3,6156.0,246,Housework,1970-01-01 08:06:00,8.35,675.0,9.99
1474,4,3418.0,246,Child Care,1970-01-01 08:06:00,4.63,246.0,3.64
1475,5,256.0,246,Adult Care,1970-01-01 08:06:00,0.35,68.0,1.01
1476,6,28315.0,247,Work and Education,1970-01-01 08:07:00,38.39,2587.0,38.28
1477,10,27253.0,247,Leisure,1970-01-01 08:07:00,36.95,2479.0,36.68
1478,11,8407.0,247,Travel and Other,1970-01-01 08:07:00,11.4,706.0,10.45
1479,3,6159.0,247,Housework,1970-01-01 08:07:00,8.35,679.0,10.05
1480,4,3371.0,247,Child Care,1970-01-01 08:07:00,4.57,242.0,3.58
1481,5,253.0,247,Adult Care,1970-01-01 | |
<reponame>scramjetorg/framework-python<gh_stars>10-100
from scramjet.pyfca import Pyfca, DropChunk
import asyncio
from scramjet.ansi_color_codes import *
from os import environ
import scramjet.utils as utils
from collections.abc import Iterable, AsyncIterable
import re
DEBUG = 'DATASTREAM_DEBUG' in environ or 'SCRAMJET_DEBUG' in environ
tr = utils.print_trimmed
def log(stream, *args):
if DEBUG: # pragma: no cover
utils.LogWithTimer.log(f"{grey}{stream.name}{reset}", *args)
class UnsupportedOperation(Exception):
pass
class StreamAlreadyConsumed(Exception):
pass
class Stream():
def __init__(self, max_parallel=64, upstream=None, origin=None, name="datastream"):
self._upstream = upstream
self._origin = origin if origin else self
self.name = name
# whether we can write to the stream instance
self._writable = True
# whether the stream was already "used" (transformed/read from)
self._consumed = False
self._pyfca = upstream._pyfca if upstream else Pyfca(max_parallel)
self._ready_to_start = asyncio.Future()
self._sinks = []
log(self, f'INIT stream created with pyfca {self._pyfca}')
def __await__(self):
raise TypeError(
"Stream objects cannot be awaited on. To get data from a stream, "
"use a sink method (such as .to_list()) and await on that."
)
async def __aiter__(self):
self._uncork()
while True:
chunk = await self._pyfca.read()
if chunk is None:
break
yield chunk
def _uncork(self):
if not self._ready_to_start.done():
self._ready_to_start.set_result(True)
log(self, f'{green}uncorked{reset}')
if self._upstream:
log(self, f'uncorking upstream: {self._upstream.name}')
self._upstream._uncork()
def _mark_consumed(self):
if self._consumed: # cannot consume the same stream twice
raise StreamAlreadyConsumed
else:
self._consumed = True
def _as(self, target_class):
"""Create a stream of type target_class from current one."""
return target_class(
upstream=self,
max_parallel=self._pyfca.max_parallel,
name=f'{self.name}+_'
)
def use(self, func):
"""Perform a function on the whole stream and return the result."""
return func(self)
def write(self, chunk):
"""Write a single item to the datastream."""
return self._origin._pyfca.write(chunk)
def end(self):
"""Mark the end of input to the datastream."""
self._pyfca.end()
async def read(self):
"""Read a single item from the datastream."""
# cannot read from stream consumed by something else
if self._consumed:
raise StreamAlreadyConsumed
self._uncork()
return await self._pyfca.read()
@classmethod
def read_from(cls, source, max_parallel=64, chunk_size=None):
"""
Create a new stream from specified source, which must be either
an Iterable or implement .read() method.
"""
if chunk_size:
if hasattr(source, 'read'):
return cls.from_callback(
max_parallel, source.read, chunk_size)
else:
msg = (f"chunk_size was specified, but source {source} "
"does not implement read() method.")
raise UnsupportedOperation(msg)
else:
if isinstance(source, (Iterable, AsyncIterable)):
return cls.from_iterable(
source, max_parallel=max_parallel)
else:
msg = (f"Source {source} is not iterable. It cannot be used "
"unless it exposes read() method and chunk_size "
"is specified.")
raise UnsupportedOperation(msg)
@classmethod
def from_iterable(cls, iterable, max_parallel=64):
"""Create a new stream from an iterable object."""
stream = cls(max_parallel)
async def consume():
await stream._ready_to_start
if isinstance(iterable, Iterable):
for item in iterable:
await stream._pyfca.write(item)
if isinstance(iterable, AsyncIterable):
[await stream._pyfca.write(item) async for item in iterable]
stream._pyfca.end()
asyncio.create_task(consume())
stream._writable = False
return stream
@classmethod
def from_callback(cls, max_parallel, callback, *args):
"""Create a new stream using callback to get chunks."""
stream = cls(max_parallel)
async def consume():
await stream._ready_to_start
while True:
chunk = callback(*args)
if asyncio.iscoroutine(chunk):
chunk = await chunk
if chunk == '' or chunk == b'':
break
await stream._pyfca.write(chunk)
stream._pyfca.end()
asyncio.create_task(consume())
stream._writable = False
return stream
def map(self, func, *args):
"""Transform each chunk using a function."""
self._mark_consumed()
new_stream = self.__class__(upstream=self, origin=self._origin, name=f'{self.name}+m')
async def run_mapper(chunk):
if args:
log(new_stream, f'calling mapper {func} with args: {chunk, *args}')
result = func(chunk, *args)
if asyncio.iscoroutine(result):
result = await result
log(new_stream, f'mapper result: {tr(chunk)} -> {tr(result)}')
return result
log(new_stream, f'adding mapper: {func}')
new_stream._pyfca.add_transform(run_mapper)
return new_stream
def each(self, func, *args):
"""Perform an operation on each chunk and return it unchanged."""
async def mapper(chunk):
result = func(chunk, *args)
if asyncio.iscoroutine(result):
await result
return chunk
return self.map(mapper)
def decode(self, encoding):
"""Convert chunks of bytes into strings using specified encoding."""
import codecs
# Incremental decoders handle characters split across inputs.
# Input with only partial data yields empty string - drop these.
decoder = codecs.getincrementaldecoder(encoding)()
return self._as(StringStream).map(
lambda chunk: decoder.decode(chunk) or DropChunk
)
def filter(self, func, *args):
"""Keep only chunks for which func evaluates to True."""
self._mark_consumed()
new_stream = self.__class__(upstream=self, origin=self._origin, name=f'{self.name}+f')
async def run_filter(chunk):
if args:
log(new_stream, f'calling filter {func} with args: {chunk, *args}')
decision = func(chunk, *args)
if asyncio.iscoroutine(decision):
decision = await decision
log(new_stream, f'filter result: {tr(chunk)} -> {cyan}{decision}{reset}')
return chunk if decision else DropChunk
log(new_stream, f'adding filter: {func}')
new_stream._pyfca.add_transform(run_filter)
return new_stream
def flatmap(self, func, *args):
"""Run func on each chunk and return all results as separate chunks."""
self._mark_consumed()
new_stream = self.__class__(
max_parallel=self._pyfca.max_parallel, origin=self._origin, name=f'{self.name}+fm'
)
async def consume():
self._uncork()
while True:
chunk = await self._pyfca.read()
log(self, f'got: {tr(chunk)}')
if chunk is None:
break
results = func(chunk, *args)
if asyncio.iscoroutine(results):
results = await results
log(self, f'{cyan}split:{reset} -> {repr(results)}')
for item in results:
log(new_stream, f'put: {tr(item)}')
await new_stream._pyfca.write(item)
log(new_stream, f'{blue}drained{reset}')
log(new_stream, f'ending pyfca {new_stream._pyfca}')
new_stream._pyfca.end()
asyncio.create_task(consume(), name='flatmap-consumer')
return new_stream
def batch(self, func, *args):
"""
Convert a stream of chunks into a stream of lists of chunks.
func: called on each chunk to determine when the batch will end.
"""
self._mark_consumed()
new_stream = self.__class__(
max_parallel=self._pyfca.max_parallel, origin=self._origin, name=f'{self.name}+b'
)
async def consume():
self._uncork()
batch = []
while True:
chunk = await self._pyfca.read()
log(self, f'got: {tr(chunk)}')
if chunk is None:
break
batch.append(chunk)
if args:
log(new_stream, f'calling {func} with args: {chunk, *args}')
if func(chunk, *args):
log(new_stream, f'{pink}put batch:{reset} {tr(batch)}')
await new_stream._pyfca.write(batch)
batch = []
if len(batch):
log(new_stream, f'{pink}put batch:{reset} {tr(batch)}')
await new_stream._pyfca.write(batch)
log(new_stream, f'ending pyfca {new_stream._pyfca}')
new_stream._pyfca.end()
asyncio.create_task(consume())
return new_stream
def sequence(self, sequencer, initialPartial=None):
"""
Change how the data is chopped into chunks.
sequencer: two-argument function taking partial result from previous
operation and current chunk. It should return an iterable; all items
from the iterable except the last one will become new chunks, and the
last one will be fed to the next call of the sequencer.
"""
self._mark_consumed()
new_stream = self.__class__(
max_parallel=self._pyfca.max_parallel, origin=self._origin, name=f'{self.name}+s'
)
async def consume():
self._uncork()
partial = initialPartial
while True:
chunk = await self._pyfca.read()
log(self, f'got: {tr(chunk)}')
if chunk is None:
break
chunks = sequencer(partial, chunk)
if asyncio.iscoroutine(chunks):
chunks = await chunks
log(new_stream, f'{blue}{len(chunks)} chunks:{reset} {chunks}')
for chunk in chunks[:-1]:
log(new_stream, f'put: {tr(chunk)}')
await new_stream._pyfca.write(chunk)
log(new_stream, f'carrying over partial result: {tr(chunks[-1])}')
partial = chunks[-1]
log(new_stream, f'leftover: {tr(partial)}')
# pytest claims that line #315 is not reacheable, cause of if statement is always True.
# TODO: refactor code here or find exact reason for pytest problem
if partial: # pragma: no cover
log(new_stream, f'put: {tr(partial)}')
await new_stream._pyfca.write(partial)
log(new_stream, f'ending pyfca {new_stream._pyfca}')
new_stream._pyfca.end()
asyncio.create_task(consume())
return new_stream
def pipe(self, target):
"""Forward all chunks from current stream into target."""
self._consumed = True
self._sinks.append(target)
async def consume():
self._uncork()
while True:
chunk = await self._pyfca.read()
if chunk is None:
break
drains = [target._pyfca.write(chunk) for target in self._sinks]
await asyncio.gather(*drains)
for target in self._sinks:
target._pyfca.end()
if len(self._sinks) == 1:
asyncio.create_task(consume(), name='pipe-consumer')
return target
async def to_list(self):
"""Create a list with all resulting stream chunks."""
self._mark_consumed()
self._uncork()
result = []
log(self, f'sink: {repr(result)}')
chunk = await self._pyfca.read()
while chunk is not None:
log(self, f'got: {tr(chunk)}')
result.append(chunk)
chunk = await self._pyfca.read()
return result
async def write_to(self, target):
"""
Write all resulting stream chunks into target.
target: object implementing .write() method
"""
self._mark_consumed()
self._uncork()
log(self, f'sink: {repr(target)}')
chunk = await self._pyfca.read()
while chunk is not None:
log(self, f'got: {tr(chunk)}')
write = target.write(chunk)
if asyncio.iscoroutine(write):
await write
chunk = await self._pyfca.read()
return target
async def reduce(self, func, initial=None):
"""
Apply two-argument func to elements from the stream cumulatively,
producing an awaitable that will resolve to a single value when the
stream ends. For a stream of [1,2,3,4] the result will be
func(func(func(1,2),3),4).
"""
self._mark_consumed()
self._uncork()
if initial is None:
accumulator = await self._pyfca.read()
log(self, f'got: {tr(accumulator)}')
else:
accumulator = initial
log(self, f'reducer: initialized accumulator with {initial}')
while True:
chunk = await self._pyfca.read()
log(self, f'got: {tr(chunk)}')
if chunk is None:
break
accumulator = func(accumulator, chunk)
if asyncio.iscoroutine(accumulator):
accumulator = await accumulator
log(self, f'reduce - intermediate result: {accumulator}')
return accumulator
class StringStream(Stream):
def __init__(self, max_parallel=64, upstream=None, origin=None, name="stringstream"):
super().__init__(max_parallel=max_parallel, upstream=upstream, origin=origin, name=name)
def parse(self, func, *args):
"""Transform StringStream into Stream."""
return self._as(Stream).map(func, *args)
def match(self, pattern):
"""Extract matching parts of chunk as new chunks."""
regex = re.compile(pattern)
def mapper(chunk):
matches = regex.findall(chunk)
if regex.groups <= 1:
return matches
else:
flattened = []
for tuple in matches:
flattened.extend(tuple)
return flattened
return self.flatmap(mapper)
| |
None, None, None),
0x80ef: (0x001f, "mTALocalCred", "ms-Exch-MTA-Local-Cred", 2, None, None, None),
0x80f0: (0x001f, "mTALocalDesig", "ms-Exch-MTA-Local-Desig", 2, None, None, None),
0x80f1: (0x0102, "nAddress", "ms-Exch-N-Address", 2, None, None, None),
0x80f2: (0x0003, "nAddressType", "ms-Exch-N-Address-Type", 2, None, None, None),
0x80f4: (0x0003, "numOfOpenRetries", "ms-Exch-Num-Of-Open-Retries", 2, None, None, None),
0x80f5: (0x0003, "numOfTransferRetries", "ms-Exch-Num-Of-Transfer-Retries", 2, None, None, None),
0x80f6: (0x0003, "objectClassCategory", "Object-Class-Category", 3, None, None, None),
0x80f7: (0x0003, "objectVersion", "Object-Version", 3, None, None, None),
0x80f8: (0x101f, "offLineABContainers", "ms-Exch-Off-Line-AB-Containers", 2, None, None, None),
0x80f9: (0x0102, "offLineABSchedule", "ms-Exch-Off-Line-AB-Schedule", 2, None, None, None),
0x80fa: (0x001f, "offLineABServer", "ms-Exch-Off-Line-AB-Server", 2, None, None, None),
0x80fb: (0x0003, "offLineABStyle", "ms-Exch-Off-Line-AB-Style", 2, None, None, None),
0x80fd: (0x0102, "oMObjectClass", "OM-Object-Class", 3, None, None, None),
0x80fe: (0x0003, "oMSyntax", "OM-Syntax", 1, None, None, None),
0x80ff: (0x000b, "oOFReplyToOriginator", "ms-Exch-OOF-Reply-To-Originator", 1, None, None, None),
0x8100: (0x0003, "openRetryInterval", "ms-Exch-Open-Retry-Interval", 2, None, None, None),
0x8101: (0x101f, "o", "Organization-Name", 1, "PidLidTaskStatus", "dispidTaskStatus", None),
0x8102: (0x101f, "ou", "Organizational-Unit-Name", 1, "PidLidPercentComplete", "dispidPercentComplete", None),
0x8103: (0x0102, "originalDisplayTable", "Original-Display-Table", 3, "PidLidTeamTask", "dispidTeamTask", None),
0x8104: (0x0102, "originalDisplayTableMSDOS", "Original-Display-Table-MSDOS", 3, "PidLidTaskStartDate", "dispidTaskStartDate", None),
0x8105: (0x101f, "outboundSites", "ms-Exch-Outbound-Sites", 2, "PidLidTaskDueDate", "dispidTaskDueDate", None),
0x8106: (0x0102, "pSelector", "ms-Exch-P-Selector", 2, None, None, None),
0x8107: (0x0102, "pSelectorInbound", "ms-Exch-P-Selector-Inbound", 2, "PidLidTaskResetReminder", "dispidTaskResetReminder", None),
0x8108: (0x0102, "perMsgDialogDisplayTable", "Per-Msg-Dialog-Display-Table", 3, "PidLidTaskAccepted", "dispidTaskAccepted", None),
0x8109: (0x0102, "perRecipDialogDisplayTable", "Per-Recip-Dialog-Display-Table", 3, "PidLidTaskDeadOccurrence", "dispidTaskDeadOccur", None),
0x810c: (0x101f, "postalAddress", "Postal-Address", 3, None, None, None),
0x810e: (0x001f, "pRMD", "ms-Exch-PRMD", 2, None, None, None),
0x810f: (0x001f, "proxyGeneratorDLL", "ms-Exch-Proxy-Generator-DLL", 2, "PidLidTaskDateCompleted", "dispidTaskDateCompleted", None),
0x8110: (0x000d, "publicDelegatesBL", "ms-Exch-Public-Delegates-BL", 2, "PidLidTaskActualEffort", "dispidTaskActualEffort", None),
0x8111: (0x0102, "quotaNotificationSchedule", "ms-Exch-Quota-Notification-Schedule", 2, "PidLidTaskEstimatedEffort", "dispidTaskEstimatedEffort", None),
0x8112: (0x0003, "quotaNotificationStyle", "ms-Exch-Quota-Notification-Style", 2, "PidLidTaskVersion", "dispidTaskVersion", None),
0x8113: (0x0003, "rangeLower", "Range-Lower", 1, "PidLidTaskState", "dispidTaskState", None),
0x8114: (0x0003, "rangeUpper", "Range-Upper", 1, None, None, None),
0x8115: (0x001f, "rASCallbackNumber", "ms-Exch-RAS-Callback-Number", 2, "PidLidTaskLastUpdate", "dispidTaskLastUpdate", None),
0x8116: (0x001f, "rASPhoneNumber", "ms-Exch-RAS-Phone-Number", 2, "PidLidTaskRecurrence", "dispidTaskRecur", None),
0x8117: (0x001f, "rASPhonebookEntryName", "ms-Exch-RAS-Phonebook-Entry-Name", 2, "PidLidTaskAssigners", "dispidTaskMyDelegators", None),
0x8118: (0x001f, "rASRemoteSRVRName", "ms-Exch-RAS-Remote-SRVR-Name", 2, None, None, None),
0x8119: (0x1102, "registeredAddress", "Registered-Address", 3, "PidLidTaskStatusOnComplete", "dispidTaskSOC", None),
0x811a: (0x001f, "remoteBridgeHead", "ms-Exch-Remote-Bridge-Head", 2, "PidLidTaskHistory", "dispidTaskHistory", None),
0x811b: (0x001f, "remoteBridgeHeadAddress", "ms-Exch-Remote-Bridge-Head-Address", 2, "PidLidTaskUpdates", "dispidTaskUpdates", None),
0x811d: (0x001f, "remoteSite", "ms-Exch-Remote-Site", 2, None, None, None),
0x811e: (0x0003, "replicationSensitivity", "ms-Exch-Replication-Sensitivity", 2, "PidLidTaskFCreator", "dispidTaskFCreator", None),
0x811f: (0x0003, "replicationStagger", "ms-Exch-Replication-Stagger", 2, "PidLidTaskOwner", "dispidTaskOwner", None),
0x8120: (0x000b, "reportToOriginator", "ms-Exch-Report-To-Originator", 1, "PidLidTaskMultipleRecipients", "dispidTaskMultRecips", None),
0x8121: (0x000b, "reportToOwner", "ms-Exch-Report-To-Owner", 1, "PidLidTaskAssigner", "dispidTaskDelegator", None),
0x8122: (0x0003, "reqSeq", "ms-Exch-Req-Seq", 2, "PidLidTaskLastUser", "dispidTaskLastUser", None),
0x8123: (0x000d, "responsibleLocalDXA", "ms-Exch-Responsible-Local-DXA", 2, "PidLidTaskOrdinal", "dispidTaskOrdinal", None),
0x8124: (0x001f, "ridServer", "ms-Exch-Rid-Server", 2, "PidLidTaskNoCompute", "dispidTaskNoCompute", None),
0x8125: (0x101f, "roleOccupant", "Role-Occupant", 3, "PidLidTaskLastDelegate", "dispidTaskLastDelegate", None),
0x8126: (0x101f, "routingList", "ms-Exch-Routing-List", 2, "PidLidTaskFRecurring", "dispidTaskFRecur", None),
0x8127: (0x0003, "rTSCheckpointSize", "ms-Exch-RTS-Checkpoint-Size", 2, "PidLidTaskRole", "dispidTaskRole", None),
0x8128: (0x0003, "rTSRecoveryTimeout", "ms-Exch-RTS-Recovery-Timeout", 2, None, None, None),
0x8129: (0x0003, "rTSWindowSize", "ms-Exch-RTS-Window-Size", 2, "PidLidTaskOwnership", "dispidTaskOwnership", None),
0x812a: (0x101f, "runsOn", "ms-Exch-Runs-On", 2, "PidLidTaskAcceptanceState", "dispidTaskDelegValue", None),
0x812b: (0x0102, "sSelector", "ms-Exch-S-Selector", 1, None, None, None),
0x812c: (0x0102, "sSelectorInbound", "ms-Exch-S-Selector-Inbound", 1, "PidLidTaskFFixOffline", "dispidTaskFFixOffline", None),
0x812d: (0x0003, "searchFlags", "Search-Flags", 3, None, None, None),
0x812e: (0x1102, "searchGuide", "Search-Guide", 3, None, None, None),
0x812f: (0x101f, "seeAlso", "See-Also", 3, None, None, None),
0x8130: (0x101f, "serialNumber", "Serial-Number", 3, None, None, None),
0x8131: (0x0003, "serviceActionFirst", "ms-Exch-Service-Action-First", 2, None, None, None),
0x8132: (0x0003, "serviceActionOther", "ms-Exch-Service-Action-Other", 2, None, None, None),
0x8133: (0x0003, "serviceActionSecond", "ms-Exch-Service-Action-Second", 2, None, None, None),
0x8134: (0x0003, "serviceRestartDelay", "ms-Exch-Service-Restart-Delay", 2, None, None, None),
0x8135: (0x001f, "serviceRestartMessage", "ms-Exch-Service-Restart-Message", 2, None, None, None),
0x8136: (0x0003, "sessionDisconnectTimer", "ms-Exch-Session-Disconnect-Timer", 2, None, None, None),
0x8138: (0x101f, "siteProxySpace", "ms-Exch-Site-Proxy-Space", 2, None, None, None),
0x8139: (0x0040, "spaceLastComputed", "ms-Exch-Space-Last-Computed", 2, "PidLidTaskCustomFlags", "dispidTaskCustomFlags", None),
0x813a: (0x001f, "street", "Street-Address", 1, None, None, None),
0x813b: (0x101f, "subRefs", "Sub-Refs", 1, None, None, None),
0x813c: (0x0003, "submissionContLength", "ms-Exch-Submission-Cont-Length", 1, None, None, None),
0x813d: (0x1102, "supportedApplicationContext", "Supported-Application-Context", 3, None, None, None),
0x813e: (0x000d, "supportingStack", "ms-Exch-Supporting-Stack", 2, None, None, None),
0x813f: (0x000d, "supportingStackBL", "ms-Exch-Supporting-Stack-BL", 2, None, None, None),
0x8140: (0x0102, "tSelector", "ms-Exch-T-Selector", 2, None, None, None),
0x8142: (0x101f, "targetMTAs", "ms-Exch-Target-MTAs", 2, None, None, None),
0x8143: (0x1102, "teletexTerminalIdentifier", "Teletex-Terminal-Identifier", 3, None, None, None),
0x8144: (0x0003, "tempAssocThreshold", "ms-Exch-Temp-Assoc-Threshold", 2, None, None, None),
0x8145: (0x0003, "tombstoneLifetime", "Tombstone-Lifetime", 3, None, None, None),
0x8146: (0x001f, "trackingLogPathName", "ms-Exch-Tracking-Log-Path-Name", 2, None, None, None),
0x8147: (0x0003, "transRetryMins", "ms-Exch-Trans-Retry-Mins", 2, None, None, None),
0x8148: (0x0003, "transTimeoutMins", "ms-Exch-Trans-Timeout-Mins", 2, None, None, None),
0x8149: (0x0003, "transferRetryInterval", "ms-Exch-Transfer-Retry-Interval", 2, None, None, None),
0x814a: (0x0003, "transferTimeoutNonUrgent", "ms-Exch-Transfer-Timeout-Non-Urgent", 2, None, None, None),
0x814b: (0x0003, "transferTimeoutNormal", "ms-Exch-Transfer-Timeout-Normal", 2, None, None, None),
0x814c: (0x0003, "transferTimeoutUrgent", "ms-Exch-Transfer-Timeout-Urgent", 2, None, None, None),
0x814d: (0x0003, "translationTableUsed", "ms-Exch-Translation-Table-Used", 2, None, None, None),
0x814e: (0x000b, "transportExpeditedData", "ms-Exch-Transport-Expedited-Data", 2, None, None, None),
0x814f: (0x0003, "trustLevel", "ms-Exch-Trust-Level", 2, None, None, None),
0x8150: (0x0003, "turnRequestThreshold", "ms-Exch-Turn-Request-Threshold", 2, None, None, None),
0x8151: (0x000b, "twoWayAlternateFacility", "ms-Exch-Two-Way-Alternate-Facility", 2, None, None, None),
0x8152: (0x000d, "unauthOrigBL", "ms-Exch-Unauth-Orig-BL", 1, None, None, None),
0x8153: (0x1102, "userPassword", "<PASSWORD>", 3, None, None, None),
0x8154: (0x0003, "uSNCreated", "USN-Created", 1, None, None, None),
0x8155: (0x0003, "uSNDSALastObjRemoved", "USN-DSA-Last-Obj-Removed", 3, None, None, None),
0x8156: (0x0003, "uSNLastObjRem", "USN-Last-Obj-Rem", 1, None, None, None),
0x8157: (0x0003, "uSNSource", "USN-Source", 3, None, None, None),
0x8158: (0x101f, "x121Address", "X121-Address", 3, None, None, None),
0x8159: (0x0102, "x25CallUserDataIncoming", "ms-Exch-X25-Call-User-Data-Incoming", 2, None, None, None),
0x815a: (0x0102, "x25CallUserDataOutgoing", "ms-Exch-X25-Call-User-Data-Outgoing", 2, None, None, None),
0x815b: (0x0102, "x25FacilitiesDataIncoming", "ms-Exch-X25-Facilities-Data-Incoming", 2, None, None, None),
0x815c: (0x0102, "x25FacilitiesDataOutgoing", "ms-Exch-X25-Facilities-Data-Outgoing", 2, None, None, None),
0x815d: (0x0102, "x25LeasedLinePort", "ms-Exch-X25-Leased-Line-Port", 2, None, None, None),
0x815e: (0x000b, "x25LeasedOrSwitched", "ms-Exch-X25-Leased-Or-Switched", 2, None, None, None),
0x815f: (0x001f, "x25RemoteMTAPhone", "ms-Exch-X25-Remote-MTA-Phone", 2, None, None, None),
0x8160: (0x0102, "x400AttachmentType", "ms-Exch-X400-Attachment-Type", 2, None, None, None),
0x8161: (0x0003, "x400SelectorSyntax", "ms-Exch-X400-Selector-Syntax", 2, None, None, None),
0x8163: (0x0003, "xMITTimeoutNonUrgent", "ms-Exch-XMIT-Timeout-Non-Urgent", 2, None, None, None),
0x8164: (0x0003, "xMITTimeoutNormal", "ms-Exch-XMIT-Timeout-Normal", 2, None, None, None),
0x8165: (0x0003, "xMITTimeoutUrgent", "ms-Exch-XMIT-Timeout-Urgent", 2, None, None, None),
0x8166: (0x0102, "siteFolderGUID", "ms-Exch-Site-Folder-GUID", 2, None, None, None),
0x8167: (0x001f, "siteFolderServer", "ms-Exch-Site-Folder-Server", 2, None, None, None),
0x8168: (0x0003, "replicationMailMsgSize", "ms-Exch-Replication-Mail-Msg-Size", 2, None, None, None),
0x8169: (0x0102, "maximumObjectID", "ms-Exch-Maximum-Object-ID", 2, None, None, None),
0x8170: (0x101f, "networkAddress", "Network-Address", 1, "PidTagAddressBookNetworkAddress", "PR_EMS_AB_NETWORK_ADDRESS", "AbNetworkAddress"),
0x8171: (0x001f, "lDAPDisplayName", "LDAP-Display-Name", 1, None, None, None),
0x8174: (0x101f, "bridgeheadServers", "ms-Exch-Bridgehead-Servers", 2, None, None, None),
0x8175: (0x101f, "url", "WWW-Page-Other", 1, None, None, None),
0x8177: (0x001f, "pOPContentFormat", "ms-Exch-POP-Content-Format", 2, None, None, None),
0x8178: (0x0003, "languageCode", "ms-Exch-Language", 2, None, None, None),
0x8179: (0x001f, "pOPCharacterSet", "ms-Exch-POP-Character-Set", 2, None, None, None),
0x817a: (0x0003, "USNIntersite", "USN-Intersite", 3, None, None, None),
0x817f: (0x0003, "enabledProtocols", "ms-Exch-Enabled-Protocols", 1, None, None, None),
0x8180: (0x0102, "connectionListFilter", "ms-Exch-Connection-List-Filter", 2, None, None, None),
0x8181: (0x101f, "availableAuthorizationPackages", "ms-Exch-Available-Authorization-Packages", 2, None, None, None),
0x8182: (0x101f, "characterSetList", "ms-Exch-Character-Set-List", 2, None, None, None),
0x8183: (0x000b, "useSiteValues", "ms-Exch-Use-Site-Values", 2, None, None, None),
0x8184: (0x101f, "enabledAuthorizationPackages", "ms-Exch-Enabled-Authorization-Packages", 2, None, None, None),
0x8185: (0x001f, "characterSet", "ms-Exch-Character-Set", 2, None, None, None),
0x8186: (0x0003, "contentType", "ms-Exch-Content-Type", 2, None, None, None),
0x8187: (0x000b, "anonymousAccess", "ms-Exch-Anonymous-Access", 2, None, None, None),
0x8188: (0x0102, "controlMsgFolderID", "ms-Exch-Control-Msg-Folder-ID", 2, None, None, None),
0x8189: (0x001f, "usenetSiteName", "ms-Exch-Usenet-Site-Name", 2, None, None, None),
0x818a: (0x0102, "controlMsgRules", "ms-Exch-Control-Msg-Rules", 2, None, None, None),
0x818b: (0x001f, "availableDistributions", "ms-Exch-Available-Distributions", 2, None, None, None),
0x818f: (0x0003, "outgoingMsgSizeLimit", "ms-Exch-Outgoing-Msg-Size-Limit", 2, None, None, None),
0x8190: (0x0003, "incomingMsgSizeLimit", "ms-Exch-Incoming-Msg-Size-Limit", 2, None, None, None),
0x8191: (0x000b, "sendTNEF", "ms-Exch-Send-TNEF", 2, None, None, None),
0x819b: (0x000b, "hTTPPubGAL", "ms-Exch-HTTP-Pub-GAL", 2, None, None, None),
0x819c: (0x0003, "hTTPPubGALLimit", "ms-Exch-HTTP-Pub-GAL-Limit", 2, None, None, None),
0x819e: (0x1102, "hTTPPubPF", "ms-Exch-HTTP-Pub-PF", 2, None, None, None),
0x81a1: (0x001f, "x500RDN", "ms-Exch-X500-RDN", 2, None, None, None),
0x81a2: (0x001f, "dnQualifier", "ms-Exch-X500-NC", 2, None, None, None),
0x81a3: (0x101f, "referralList", "ms-Exch-Referral-List", 2, None, None, None),
0x81a8: (0x000b, "enabledProtocolCfg", "ms-Exch-Enabled-Protocol-Cfg", 2, None, None, None),
0x81a9: (0x101f, "hTTPPubABAttributes", "ms-Exch-HTTP-Pub-AB-Attributes", 2, None, None, None),
0x81ab: (0x101f, "hTTPServers", "ms-Exch-HTTP-Servers", 2, None, None, None),
0x81b1: (0x000b, "proxyGenerationEnabled", "Proxy-Generation-Enabled", 3, None, None, None),
0x81b2: (0x0102, "rootNewsgroupsFolderID", "ms-Exch-Root-Newsgroups-Folder-ID", 2, None, None, None),
0x81b4: (0x0003, "connectionListFilterType", "ms-Exch-Connection-List-Filter-Type", 2, None, None, None),
0x81b5: (0x0003, "portNumber", "ms-Exch-Port-Number", 2, None, None, None),
0x81b6: (0x101f, "protocolSettings", "ms-Exch-Protocol-Settings", 1, None, None, None),
0x81c2: (0x0040, "promoExpiration", "ms-Exch-Promo-Expiration", 1, None, None, None),
0x81c3: (0x101f, "disabledGatewayProxy", "ms-Exch-Disabled-Gateway-Proxy", 2, None, None, None),
0x81c4: (0x0102, "compromisedKeyList", "ms-Exch-Compromised-Key-List", 2, None, None, None),
0x81c5: (0x001f, "iNSAdmin", "ms-Exch-INSAdmin", 2, None, None, None),
0x81c7: (0x101f, "objViewContainers", "ms-Exch-Obj-View-Containers", 2, None, None, None),
0x8202: (0x001f, "name", "RDN", 1, "PidLidAppointmentSequenceTime", "dispidApptSeqTime", None),
0x8c1c: (0x0102, "msExchMimeTypes", "ms-Exch-Mime-Types", 2, None, None, None),
0x8c1d: (0x0003, "lDAPSearchCfg", "ms-Exch-LDAP-Search-Cfg", 2, | |
# Copyright 2014 Big Switch Networks, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import socket
import time
import mock
from neutron_lib.plugins import directory
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_utils import importutils
from six.moves import http_client
from networking_bigswitch.plugins.bigswitch.db import consistency_db
from networking_bigswitch.plugins.bigswitch import servermanager
from networking_bigswitch.tests.unit.bigswitch \
import test_restproxy_plugin as test_rp
from networking_bigswitch.tests.unit.bigswitch.mock_paths import CERT_COMBINER
from networking_bigswitch.tests.unit.bigswitch.mock_paths import CONSISTENCY_DB
from networking_bigswitch.tests.unit.bigswitch.mock_paths import \
GET_SERVER_CERTIFICATE
from networking_bigswitch.tests.unit.bigswitch.mock_paths import HTTPCON
from networking_bigswitch.tests.unit.bigswitch.mock_paths import HTTPSCON
from networking_bigswitch.tests.unit.bigswitch.mock_paths import \
POOL_GET_CAPABILITIES
from networking_bigswitch.tests.unit.bigswitch.mock_paths import \
POOL_REST_ACTION
from networking_bigswitch.tests.unit.bigswitch.mock_paths import \
POOL_REST_CALL
from networking_bigswitch.tests.unit.bigswitch.mock_paths import \
POOL_TOPO_SYNC
from networking_bigswitch.tests.unit.bigswitch.mock_paths import \
POOL_UPDATE_TENANT_CACHE
from networking_bigswitch.tests.unit.bigswitch.mock_paths import SERVER_MANAGER
from networking_bigswitch.tests.unit.bigswitch.mock_paths import \
SERVER_REST_CALL
class ServerManagerTests(test_rp.BigSwitchProxyPluginV2TestCase):
def setUp(self):
self.socket_mock = mock.patch(
SERVER_MANAGER + '.socket.create_connection').start()
self.wrap_mock = mock.patch(SERVER_MANAGER +
'.ssl.wrap_socket').start()
super(ServerManagerTests, self).setUp()
# http patch must not be running or it will mangle the servermanager
# import where the https connection classes are defined
self.httpPatch.stop()
self.sm = importutils.import_module(SERVER_MANAGER)
def test_no_servers(self):
cfg.CONF.set_override('servers', [], 'RESTPROXY')
self.assertRaises(cfg.Error, servermanager.ServerPool)
def test_malformed_servers(self):
cfg.CONF.set_override('servers', ['172.16.31.10', '1.1.1.1:a'], 'RESTPROXY')
self.assertRaises(cfg.Error, servermanager.ServerPool)
def test_ipv6_server_address(self):
cfg.CONF.set_override(
'servers', ['[ABCD:EF01:2345:6789:ABCD:EF01:2345:6789]:80'],
'RESTPROXY')
s = servermanager.ServerPool()
self.assertEqual(s.servers[0].server,
'ABCD:EF01:2345:6789:ABCD:EF01:2345:6789')
def test_sticky_cert_fetch_fail(self):
pl = directory.get_plugin()
pl.servers.is_ssl_enabled = True
with mock.patch(GET_SERVER_CERTIFICATE,
side_effect=Exception('There is no more entropy in the'
'universe')) as sslgetmock:
self.assertRaises(
cfg.Error,
pl.servers._get_combined_cert_for_server,
*('example.org', 443)
)
sslgetmock.assert_has_calls([mock.call(('example.org', 443))])
def test_consistency_watchdog_stops_with_0_polling_interval(self):
pl = directory.get_plugin()
pl.servers.capabilities = ['consistency']
self.watch_p.stop()
with mock.patch('eventlet.sleep') as smock:
# should return immediately a polling interval of 0
pl.servers._consistency_watchdog(0)
self.assertFalse(smock.called)
def test_consistency_watchdog(self):
pl = directory.get_plugin()
pl.servers.capabilities = ['dummy']
self.watch_p.stop()
with mock.patch('eventlet.sleep') as smock,\
mock.patch(
POOL_REST_CALL,
side_effect=servermanager.RemoteRestError(
reason='Failure to trigger except clause.'))\
as rmock,\
mock.patch(
SERVER_MANAGER + '.LOG.exception',
side_effect=KeyError('Failure to break loop'))\
as lmock:
# should return immediately without consistency capability
pl.servers._consistency_watchdog()
self.assertFalse(smock.called)
pl.servers.capabilities = ['consistency']
self.assertRaises(KeyError,
pl.servers._consistency_watchdog)
rmock.assert_called_with('GET', '/health', '', {}, [], False)
self.assertEqual(1, len(lmock.mock_calls))
def test_file_put_contents(self):
pl = directory.get_plugin()
with mock.patch(SERVER_MANAGER + '.open', create=True) as omock:
pl.servers._file_put_contents('somepath', 'contents')
omock.assert_has_calls([mock.call('somepath', 'w')])
omock.return_value.__enter__.return_value.assert_has_calls([
mock.call.write('contents')
])
def test_combine_certs_to_file(self):
pl = directory.get_plugin()
with mock.patch(SERVER_MANAGER + '.open', create=True) as omock:
omock.return_value.__enter__().read.return_value = 'certdata'
pl.servers._combine_certs_to_file(['cert1.pem', 'cert2.pem'],
'combined.pem')
# mock shared between read and write file handles so the calls
# are mixed together
omock.assert_has_calls([
mock.call('combined.pem', 'w'),
mock.call('cert1.pem', 'r'),
mock.call('cert2.pem', 'r'),
], any_order=True)
omock.return_value.__enter__.return_value.assert_has_calls([
mock.call.read(),
mock.call.write('certdata'),
mock.call.read(),
mock.call.write('certdata')
])
# basic authentication
def test_auth_header(self):
cfg.CONF.set_override('server_auth', 'username:pass', 'RESTPROXY')
sp = servermanager.ServerPool()
with mock.patch(HTTPCON) as conmock:
rv = conmock.return_value
rv.getresponse.return_value.getheader.return_value = 'HASHHEADER'
sp.rest_create_network('tenant', 'network')
callheaders = rv.request.mock_calls[0][1][3]
self.assertIn('Authorization', callheaders)
self.assertNotIn('Cookie', callheaders)
self.assertEqual(callheaders['Authorization'],
'Basic dXNlcm5hbWU6cGFzcw==')
# token based authentication
def test_auth_token_header(self):
cfg.CONF.set_override('server_auth', 'fake_token', 'RESTPROXY')
sp = servermanager.ServerPool()
with mock.patch(HTTPCON) as conmock:
rv = conmock.return_value
rv.getresponse.return_value.getheader.return_value = 'HASHHEADER'
sp.rest_create_network('tenant', 'network')
callheaders = rv.request.mock_calls[0][1][3]
self.assertIn('Cookie', callheaders)
self.assertNotIn('Authorization', callheaders)
self.assertEqual(callheaders['Cookie'], 'session_cookie="fake_token"')
def test_header_add(self):
sp = servermanager.ServerPool()
with mock.patch(HTTPCON) as conmock:
rv = conmock.return_value
rv.getresponse.return_value.getheader.return_value = 'HASHHEADER'
sp.servers[0].rest_call('GET', '/', headers={'EXTRA-HEADER': 'HI'})
callheaders = rv.request.mock_calls[0][1][3]
# verify normal headers weren't mangled
self.assertIn('Content-type', callheaders)
self.assertEqual(callheaders['Content-type'],
'application/json')
# verify new header made it in
self.assertIn('EXTRA-HEADER', callheaders)
self.assertEqual(callheaders['EXTRA-HEADER'], 'HI')
def test_capabilities_retrieval(self):
sp = servermanager.ServerPool()
with mock.patch(HTTPCON) as conmock:
rv = conmock.return_value.getresponse.return_value
rv.getheader.return_value = 'HASHHEADER'
# each server will get different capabilities
rv.read.side_effect = ['["a","b","c"]', '["b","c","d"]']
# pool capabilities is union of both
# normally capabilities should be the same across all servers
# this only happens in two situations:
# 1. a server is down
# 2. during upgrade/downgrade
self.assertEqual(set(['a', 'b', 'c', 'd']), sp.get_capabilities())
self.assertEqual(2, rv.read.call_count)
# the pool should cache after the first call
# so no more HTTP calls should be made
rv.read.side_effect = ['["w","x","y"]', '["x","y","z"]']
self.assertEqual(set(['a', 'b', 'c', 'd']), sp.get_capabilities())
self.assertEqual(2, rv.read.call_count)
def test_capabilities_retrieval_failure(self):
sp = servermanager.ServerPool()
with mock.patch(HTTPCON) as conmock:
rv = conmock.return_value.getresponse.return_value
rv.getheader.return_value = 'HASHHEADER'
# a failure to parse should result in an empty capability set
rv.read.return_value = 'XXXXX'
self.assertEqual([], sp.servers[0].get_capabilities())
# as capabilities is empty, it should try to update capabilities
rv.read.side_effect = ['{"a": "b"}', '["b","c","d"]']
self.assertEqual(set(['a', 'b', 'c', 'd']), sp.get_capabilities())
def test_reconnect_on_timeout_change(self):
sp = servermanager.ServerPool()
with mock.patch(HTTPCON) as conmock:
rv = conmock.return_value
rv.getresponse.return_value.getheader.return_value = 'HASHHEADER'
sp.servers[0].capabilities = ['keep-alive']
sp.servers[0].rest_call('GET', '/', timeout=10)
# even with keep-alive enabled, a change in timeout will trigger
# a reconnect
sp.servers[0].rest_call('GET', '/', timeout=75)
conmock.assert_has_calls([
mock.call('localhost', 9000, timeout=10),
mock.call('localhost', 9000, timeout=75),
], any_order=True)
def test_connect_failures(self):
sp = servermanager.ServerPool()
with mock.patch(HTTPCON, return_value=None):
resp = sp.servers[0].rest_call('GET', '/')
self.assertEqual(resp, (0, None, None, None))
# verify same behavior on ssl class
sp.servers[0].currentcon = False
sp.servers[0].is_ssl_enabled = True
with mock.patch(HTTPSCON, return_value=None):
resp = sp.servers[0].rest_call('GET', '/')
self.assertEqual(resp, (0, None, None, None))
def test_reconnect_cached_connection(self):
self.skipTest("cached connections are currently disabled because "
"their assignment to the servermanager object is not "
"thread-safe")
sp = servermanager.ServerPool()
with mock.patch(HTTPCON) as conmock:
rv = conmock.return_value
rv.getresponse.return_value.getheader.return_value = 'HASH'
sp.servers[0].capabilities = ['keep-alive']
sp.servers[0].rest_call('GET', '/first')
# raise an error on re-use to verify reconnect
# return okay the second time so the reconnect works
rv.request.side_effect = [http_client.ImproperConnectionState(),
mock.MagicMock()]
sp.servers[0].rest_call('GET', '/second')
uris = [c[1][1] for c in rv.request.mock_calls]
expected = [
sp.base_uri + '/first',
sp.base_uri + '/second',
sp.base_uri + '/second',
]
self.assertEqual(uris, expected)
def test_no_reconnect_recurse_to_infinity(self):
self.skipTest("cached connections are currently disabled because "
"their assignment to the servermanager object is not "
"thread-safe")
# retry uses recursion when a reconnect is necessary
# this test makes sure it stops after 1 recursive call
sp = servermanager.ServerPool()
with mock.patch(HTTPCON) as conmock:
rv = conmock.return_value
# hash header must be string instead of mock object
rv.getresponse.return_value.getheader.return_value = 'HASH'
sp.servers[0].capabilities = ['keep-alive']
sp.servers[0].rest_call('GET', '/first')
# after retrying once, the rest call should raise the
# exception up
rv.request.side_effect = http_client.ImproperConnectionState()
self.assertRaises(http_client.ImproperConnectionState,
sp.servers[0].rest_call,
*('GET', '/second'))
# 1 for the first call, 2 for the second with retry
self.assertEqual(rv.request.call_count, 3)
def test_socket_error(self):
sp = servermanager.ServerPool()
with mock.patch(HTTPCON) as conmock:
conmock.return_value.request.side_effect = socket.timeout()
resp = sp.servers[0].rest_call('GET', '/')
self.assertEqual(resp, (0, None, None, None))
def test_cert_get_fail(self):
pl = directory.get_plugin()
pl.servers.is_ssl_enabled = True
with mock.patch('os.path.exists', return_value=False):
self.assertRaises(cfg.Error,
pl.servers._get_combined_cert_for_server,
*('example.org', 443))
def test_cert_make_dirs(self):
pl = directory.get_plugin()
pl.servers.is_ssl_enabled = True
cfg.CONF.set_override('ssl_sticky', False, 'RESTPROXY')
# pretend base dir exists, 3 children don't, and host cert does
with mock.patch('os.path.exists',
side_effect=[True, False, False,
False, True]) as exmock,\
mock.patch('os.makedirs') as makemock,\
mock.patch(CERT_COMBINER) as combmock:
# will raise error because no certs found
self.assertIn(
'example.org',
pl.servers._get_combined_cert_for_server('example.org', 443)
)
base = cfg.CONF.RESTPROXY.ssl_cert_directory
hpath = base + '/host_certs/example.org.pem'
combpath = base + '/combined/example.org.pem'
combmock.assert_has_calls([mock.call([hpath], combpath)])
self.assertEqual(exmock.call_count, 5)
self.assertEqual(makemock.call_count, 3)
def test_no_cert_error(self):
pl = directory.get_plugin()
pl.servers.is_ssl_enabled = True
cfg.CONF.set_override('ssl_sticky', False, 'RESTPROXY')
# pretend base dir exists and 3 children do, but host cert doesn't
with mock.patch(
'os.path.exists',
side_effect=[True, True, True, True, False]
) as exmock:
# will raise error because no certs found
self.assertRaises(
cfg.Error,
pl.servers._get_combined_cert_for_server,
*('example.org', 443)
)
self.assertEqual(exmock.call_count, 5)
def test_action_success(self):
pl = directory.get_plugin()
self.assertTrue(pl.servers.action_success((200,)))
def test_server_failure(self):
pl = directory.get_plugin()
self.assertTrue(pl.servers.server_failure((404,)))
# server failure has an ignore codes option
self.assertFalse(pl.servers.server_failure((404,),
ignore_codes=[404]))
def test_retry_on_unavailable(self):
pl = directory.get_plugin()
with mock.patch(SERVER_REST_CALL,
return_value=(http_client.SERVICE_UNAVAILABLE,
0, 0, 0)) as srestmock,\
mock.patch(SERVER_MANAGER + '.eventlet.sleep') as tmock:
# making a call should trigger retries with sleeps in between
pl.servers.rest_call('GET', '/', '', None, [])
rest_call = [mock.call('GET', '/', '', None, False,
reconnect=True)]
rest_call_count = (
servermanager.HTTP_SERVICE_UNAVAILABLE_RETRY_COUNT + 1)
srestmock.assert_has_calls(rest_call * rest_call_count)
sleep_call = [mock.call(
servermanager.HTTP_SERVICE_UNAVAILABLE_RETRY_INTERVAL)]
# should sleep 1 less time than the number of calls
sleep_call_count = rest_call_count - 1
tmock.assert_has_calls(sleep_call * sleep_call_count)
def test_delete_failure_forces_topo_sync(self):
pl = directory.get_plugin()
with mock.patch(SERVER_REST_CALL,
return_value=(http_client.INTERNAL_SERVER_ERROR,
0, 0, 0)), \
mock.patch(POOL_TOPO_SYNC,
return_value=(False,
servermanager.TOPO_RESPONSE_OK)) \
as topo_mock:
# a failed DELETE call should trigger a forced topo_sync
# with check_ts True
self.assertRaises(servermanager.RemoteRestError,
pl.servers.rest_action,
**{'action': 'DELETE', 'resource': '/',
'data': '',
'errstr': "Unable to DELETE query to BCF: %s",
'ignore_codes': []})
topo_mock.assert_called_once_with(**{'check_ts': True})
def test_post_failure_forces_topo_sync(self):
pl = directory.get_plugin()
with mock.patch(SERVER_REST_CALL,
return_value=(http_client.INTERNAL_SERVER_ERROR,
0, 0, 0)), \
mock.patch(POOL_TOPO_SYNC,
return_value=(False,
servermanager.TOPO_RESPONSE_OK)) \
as topo_mock:
# a failed POST call should trigger a forced topo_sync
# with check_ts True
self.assertRaises(servermanager.RemoteRestError,
pl.servers.rest_action,
**{'action': 'POST', 'resource': | |
<reponame>jumploop/Python3_WebSpider
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 2020/9/23 23:06
# @Author : 一叶知秋
# @File : simple.py
# @Software: PyCharm
import codecs
import copy
import csv
import json
import logging
import logging.config
import math
import os
import random
import sys
from collections import OrderedDict
from datetime import date, datetime, timedelta
from time import sleep
import requests
from lxml import etree
from requests.adapters import HTTPAdapter
from tqdm import tqdm
logging_path = os.path.split(
os.path.realpath(__file__))[0] + os.sep + 'logging.conf'
logging.config.fileConfig(logging_path)
logger = logging.getLogger('weibo')
class Weibo(object):
def __init__(self, config):
"""Weibo类初始化"""
self.validate_config(config)
self.filter = config[
'filter'] # 取值范围为0、1,程序默认值为0,代表要爬取用户的全部微博,1代表只爬取用户的原创微博
since_date = config['since_date']
if isinstance(since_date, int):
since_date = date.today() - timedelta(since_date)
since_date = str(since_date)
self.since_date = since_date # 起始时间,即爬取发布日期从该值到现在的微博,形式为yyyy-mm-dd
self.write_mode = config[
'write_mode'] # 结果信息保存类型,为list形式,可包含csv、mongo和mysql三种类型
self.original_pic_download = config[
'original_pic_download'] # 取值范围为0、1, 0代表不下载原创微博图片,1代表下载
self.retweet_pic_download = config[
'retweet_pic_download'] # 取值范围为0、1, 0代表不下载转发微博图片,1代表下载
self.original_video_download = config[
'original_video_download'] # 取值范围为0、1, 0代表不下载原创微博视频,1代表下载
self.retweet_video_download = config[
'retweet_video_download'] # 取值范围为0、1, 0代表不下载转发微博视频,1代表下载
self.cookie = {'Cookie': config.get('cookie')} # 微博cookie,可填可不填
self.mysql_config = config.get('mysql_config') # MySQL数据库连接配置,可以不填
user_id_list = config['user_id_list']
if not isinstance(user_id_list, list):
if not os.path.isabs(user_id_list):
user_id_list = os.path.split(
os.path.realpath(__file__))[0] + os.sep + user_id_list
self.user_config_file_path = user_id_list # 用户配置文件路径
user_config_list = self.get_user_config_list(user_id_list)
else:
self.user_config_file_path = ''
user_config_list = [{
'user_id': user_id,
'since_date': self.since_date
} for user_id in user_id_list]
self.user_config_list = user_config_list # 要爬取的微博用户的user_config列表
self.user_config = {} # 用户配置,包含用户id和since_date
self.start_date = '' # 获取用户第一条微博时的日期
self.user = {} # 存储目标微博用户信息
self.got_count = 0 # 存储爬取到的微博数
self.weibo = [] # 存储爬取到的所有微博信息
self.weibo_id_list = [] # 存储爬取到的所有微博id
def validate_config(self, config):
"""验证配置是否正确"""
# 验证filter、original_pic_download、retweet_pic_download、original_video_download、retweet_video_download
argument_list = [
'filter', 'original_pic_download', 'retweet_pic_download',
'original_video_download', 'retweet_video_download'
]
for argument in argument_list:
if config[argument] != 0 and config[argument] != 1:
logger.warning(u'%s值应为0或1,请重新输入', config[argument])
sys.exit()
# 验证since_date
since_date = config['since_date']
if (not self.is_date(str(since_date))) and (not isinstance(
since_date, int)):
logger.warning(u'since_date值应为yyyy-mm-dd形式或整数,请重新输入')
sys.exit()
# 验证write_mode
write_mode = ['csv', 'json', 'mongo', 'mysql']
if not isinstance(config['write_mode'], list):
sys.exit(u'write_mode值应为list类型')
for mode in config['write_mode']:
if mode not in write_mode:
logger.warning(
u'%s为无效模式,请从csv、json、mongo和mysql中挑选一个或多个作为write_mode',
mode)
sys.exit()
# 验证user_id_list
user_id_list = config['user_id_list']
if (not isinstance(user_id_list,
list)) and (not user_id_list.endswith('.txt')):
logger.warning(u'user_id_list值应为list类型或txt文件路径')
sys.exit()
if not isinstance(user_id_list, list):
if not os.path.isabs(user_id_list):
user_id_list = os.path.split(
os.path.realpath(__file__))[0] + os.sep + user_id_list
if not os.path.isfile(user_id_list):
logger.warning(u'不存在%s文件', user_id_list)
sys.exit()
def is_date(self, since_date):
"""判断日期格式是否正确"""
try:
datetime.strptime(since_date, '%Y-%m-%d')
return True
except ValueError:
return False
def get_json(self, params):
"""获取网页中json数据"""
url = 'https://m.weibo.cn/api/container/getIndex?'
r = requests.get(url, params=params, cookies=self.cookie)
return r.json()
def get_weibo_json(self, page):
"""获取网页中微博json数据"""
params = {
'containerid': '107603' + str(self.user_config['user_id']),
'page': page
}
js = self.get_json(params)
return js
def user_to_csv(self):
"""将爬取到的用户信息写入csv文件"""
file_dir = os.path.split(
os.path.realpath(__file__))[0] + os.sep + 'weibo'
if not os.path.isdir(file_dir):
os.makedirs(file_dir)
file_path = file_dir + os.sep + 'users.csv'
result_headers = [
'用户id', '昵称', '性别', '生日', '所在地', '学习经历', '公司', '注册时间', '阳光信用',
'微博数', '粉丝数', '关注数', '简介', '主页', '头像', '高清头像', '微博等级', '会员等级',
'是否认证', '认证类型', '认证信息'
]
result_data = [[
v.encode('utf-8') if 'unicode' in str(type(v)) else v
for v in self.user.values()
]]
self.csv_helper(result_headers, result_data, file_path)
def user_to_mongodb(self):
"""将爬取的用户信息写入MongoDB数据库"""
user_list = [self.user]
self.info_to_mongodb('user', user_list)
logger.info(u'%s信息写入MongoDB数据库完毕', self.user['screen_name'])
def user_to_mysql(self):
"""将爬取的用户信息写入MySQL数据库"""
mysql_config = {
'host': 'localhost',
'port': 3306,
'user': 'root',
'password': '<PASSWORD>',
'charset': 'utf8mb4'
}
# 创建'weibo'数据库
create_database = """CREATE DATABASE IF NOT EXISTS weibo DEFAULT
CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci"""
self.mysql_create_database(mysql_config, create_database)
# 创建'user'表
create_table = """
CREATE TABLE IF NOT EXISTS user (
id varchar(20) NOT NULL,
screen_name varchar(30),
gender varchar(10),
statuses_count INT,
followers_count INT,
follow_count INT,
registration_time varchar(20),
sunshine varchar(20),
birthday varchar(40),
location varchar(200),
education varchar(200),
company varchar(200),
description varchar(140),
profile_url varchar(200),
profile_image_url varchar(200),
avatar_hd varchar(200),
urank INT,
mbrank INT,
verified BOOLEAN DEFAULT 0,
verified_type INT,
verified_reason varchar(140),
PRIMARY KEY (id)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4"""
self.mysql_create_table(mysql_config, create_table)
self.mysql_insert(mysql_config, 'user', [self.user])
logger.info(u'%s信息写入MySQL数据库完毕', self.user['screen_name'])
def user_to_database(self):
"""将用户信息写入文件/数据库"""
self.user_to_csv()
if 'mysql' in self.write_mode:
self.user_to_mysql()
if 'mongo' in self.write_mode:
self.user_to_mongodb()
def get_user_info(self):
"""获取用户信息"""
params = {'containerid': '100505' + str(self.user_config['user_id'])}
js = self.get_json(params)
if js['ok']:
info = js['data']['userInfo']
user_info = OrderedDict()
user_info['id'] = self.user_config['user_id']
user_info['screen_name'] = info.get('screen_name', '')
user_info['gender'] = info.get('gender', '')
params = {
'containerid':
'230283' + str(self.user_config['user_id']) + '_-_INFO'
}
zh_list = [
u'生日', u'所在地', u'小学', u'初中', u'高中', u'大学', u'公司', u'注册时间',
u'阳光信用'
]
en_list = [
'birthday', 'location', 'education', 'education', 'education',
'education', 'company', 'registration_time', 'sunshine'
]
for i in en_list:
user_info[i] = ''
js = self.get_json(params)
if js['ok']:
cards = js['data']['cards']
if isinstance(cards, list) and len(cards) > 1:
card_list = cards[0]['card_group'] + cards[1]['card_group']
for card in card_list:
if card.get('item_name') in zh_list:
user_info[en_list[zh_list.index(
card.get('item_name'))]] = card.get(
'item_content', '')
user_info['statuses_count'] = info.get('statuses_count', 0)
user_info['followers_count'] = info.get('followers_count', 0)
user_info['follow_count'] = info.get('follow_count', 0)
user_info['description'] = info.get('description', '')
user_info['profile_url'] = info.get('profile_url', '')
user_info['profile_image_url'] = info.get('profile_image_url', '')
user_info['avatar_hd'] = info.get('avatar_hd', '')
user_info['urank'] = info.get('urank', 0)
user_info['mbrank'] = info.get('mbrank', 0)
user_info['verified'] = info.get('verified', False)
user_info['verified_type'] = info.get('verified_type', -1)
user_info['verified_reason'] = info.get('verified_reason', '')
user = self.standardize_info(user_info)
self.user = user
self.user_to_database()
return user
def get_long_weibo(self, id):
"""获取长微博"""
for i in range(5):
url = 'https://m.weibo.cn/detail/%s' % id
html = requests.get(url, cookies=self.cookie).text
html = html[html.find('"status":'):]
html = html[:html.rfind('"hotScheme"')]
html = html[:html.rfind(',')]
html = '{' + html + '}'
js = json.loads(html, strict=False)
weibo_info = js.get('status')
if weibo_info:
weibo = self.parse_weibo(weibo_info)
return weibo
sleep(random.randint(6, 10))
def get_pics(self, weibo_info):
"""获取微博原始图片url"""
if weibo_info.get('pics'):
pic_info = weibo_info['pics']
pic_list = [pic['large']['url'] for pic in pic_info]
pics = ','.join(pic_list)
else:
pics = ''
return pics
def get_live_photo(self, weibo_info):
"""获取live photo中的视频url"""
live_photo_list = []
live_photo = weibo_info.get('pic_video')
if live_photo:
prefix = 'https://video.weibo.com/media/play?livephoto=//us.sinaimg.cn/'
for i in live_photo.split(','):
if len(i.split(':')) == 2:
url = prefix + i.split(':')[1] + '.mov'
live_photo_list.append(url)
return live_photo_list
def get_video_url(self, weibo_info):
"""获取微博视频url"""
video_url = ''
video_url_list = []
if weibo_info.get('page_info'):
if weibo_info['page_info'].get('media_info') and weibo_info[
'page_info'].get('type') == 'video':
media_info = weibo_info['page_info']['media_info']
video_url = media_info.get('mp4_720p_mp4')
if not video_url:
video_url = media_info.get('mp4_hd_url')
if not video_url:
video_url = media_info.get('mp4_sd_url')
if not video_url:
video_url = media_info.get('stream_url_hd')
if not video_url:
video_url = media_info.get('stream_url')
if video_url:
video_url_list.append(video_url)
live_photo_list = self.get_live_photo(weibo_info)
if live_photo_list:
video_url_list += live_photo_list
return ';'.join(video_url_list)
def download_one_file(self, url, file_path, type, weibo_id):
"""下载单个文件(图片/视频)"""
try:
if not os.path.isfile(file_path):
s = requests.Session()
s.mount(url, HTTPAdapter(max_retries=5))
downloaded = s.get(url, cookies=self.cookie, timeout=(5, 10))
with open(file_path, 'wb') as f:
f.write(downloaded.content)
except Exception as e:
error_file = self.get_filepath(
type) + os.sep + 'not_downloaded.txt'
with open(error_file, 'ab') as f:
url = str(weibo_id) + ':' + file_path + ':' + url + '\n'
f.write(url.encode(sys.stdout.encoding))
logger.exception(e)
def handle_download(self, file_type, file_dir, urls, w):
"""处理下载相关操作"""
file_prefix = w['created_at'][:11].replace('-', '') + '_' + str(
w['id'])
if file_type == 'img':
if ',' in urls:
url_list = urls.split(',')
for i, url in enumerate(url_list):
index = url.rfind('.')
if len(url) - index >= 5:
file_suffix = '.jpg'
else:
file_suffix = url[index:]
file_name = file_prefix + '_' + str(i + 1) + file_suffix
file_path = file_dir + os.sep + file_name
self.download_one_file(url, file_path, file_type, w['id'])
else:
index = urls.rfind('.')
if len(urls) - index > 5:
file_suffix = '.jpg'
else:
file_suffix = urls[index:]
file_name = file_prefix + file_suffix
file_path = file_dir + os.sep + file_name
self.download_one_file(urls, file_path, file_type, w['id'])
else:
file_suffix = '.mp4'
if ';' in urls:
url_list = urls.split(';')
if url_list[0].endswith('.mov'):
file_suffix = '.mov'
for i, url in enumerate(url_list):
file_name = file_prefix + '_' + str(i + 1) + file_suffix
file_path = file_dir + os.sep + file_name
self.download_one_file(url, file_path, file_type, w['id'])
else:
if urls.endswith('.mov'):
file_suffix = '.mov'
file_name = file_prefix + file_suffix
file_path = file_dir + os.sep + file_name
self.download_one_file(urls, file_path, file_type, w['id'])
def download_files(self, file_type, weibo_type, wrote_count):
"""下载文件(图片/视频)"""
try:
describe = ''
if file_type == 'img':
describe = u'图片'
key = 'pics'
else:
describe = u'视频'
key = 'video_url'
if weibo_type == 'original':
describe = u'原创微博' + describe
else:
describe = u'转发微博' + describe
logger.info(u'即将进行%s下载', describe)
file_dir = self.get_filepath(file_type)
file_dir = file_dir + os.sep + describe
if not os.path.isdir(file_dir):
os.makedirs(file_dir)
for w in tqdm(self.weibo[wrote_count:], desc='Download progress'):
if weibo_type == 'retweet':
if w.get('retweet'):
w = w['retweet']
else:
continue
if w.get(key):
self.handle_download(file_type, file_dir, w.get(key), w)
logger.info(u'%s下载完毕,保存路径:', describe)
logger.info(file_dir)
except Exception as e:
logger.exception(e)
def get_location(self, selector):
"""获取微博发布位置"""
location_icon = 'timeline_card_small_location_default.png'
span_list = selector.xpath('//span')
location = ''
for i, span in enumerate(span_list):
if span.xpath('img/@src'):
if location_icon in span.xpath('img/@src')[0]:
location = span_list[i + 1].xpath('string(.)')
break
return location
def get_article_url(self, selector):
"""获取微博中头条文章的url"""
article_url = ''
text = selector.xpath('string(.)')
if text.startswith(u'发布了头条文章'):
url = selector.xpath('//a/@data-url')
if url and url[0].startswith('http://t.cn'):
article_url = url[0]
return article_url
def get_topics(self, selector):
"""获取参与的微博话题"""
span_list = selector.xpath("//span[@class='surl-text']")
topics = ''
topic_list = []
| |
res += C12[u, v].dot(x.dot(AA34[u, v].conj().T))
return res
def eps_r_op_2s_AA12_C34(x, AA12, C34):
res = np.zeros((AA12.shape[2], C34.shape[2]), dtype=C34.dtype)
for u in xrange(C34.shape[0]):
for v in xrange(C34.shape[1]):
res += AA12[u, v].dot(x.dot(C34[u, v].conj().T))
return res
def eps_l_op_2s_AA12_C34(x, AA12, C34):
res = np.zeros((AA12.shape[3], C34.shape[3]), dtype=C34.dtype)
for u in xrange(C34.shape[0]):
for v in xrange(C34.shape[1]):
res += AA12[u, v].conj().T.dot(x.dot(C34[u, v]))
return res
def eps_l_op_2s_A1_A2_C34(x, A1, A2, C34):
res = np.zeros((A2.shape[2], C34.shape[3]), dtype=C34.dtype)
for u in xrange(C34.shape[0]):
for v in xrange(C34.shape[1]):
res += (A1[u].dot(A2[v])).conj().T.dot(x.dot(C34[u, v]))
return res
def eps_r_op_3s_C123_AAA456(x, C123, AAA456):
res = np.zeros((C123.shape[3], AAA456.shape[3]), dtype=AAA456.dtype)
for u in xrange(AAA456.shape[0]):
for v in xrange(AAA456.shape[1]):
for w in xrange(AAA456.shape[2]):
res += C123[u, v, w].dot(x.dot(AAA456[u, v, w].conj().T))
return res
def eps_l_op_3s_AAA123_C456(x, AAA123, C456):
res = np.zeros((AAA123.shape[4], C456.shape[4]), dtype=C456.dtype)
for u in xrange(C456.shape[0]):
for v in xrange(C456.shape[1]):
for w in xrange(C456.shape[2]):
res += AAA123[u, v, w].conj().T.dot(x.dot(C456[u, v, w]))
return res
def calc_AA(A, Ap1):
#print "XXX Starting calc-AA() XXX"
#print "Ap1.shape[2]", Ap1.shape[2]
Dp1 = Ap1.shape[2]
#print "A.shape[1]", A.shape[1]
Dm1 = A.shape[1]
#print "A.shape[0]", A.shape[0]
q = A.shape[0]
#print "Ap1.shape[0]", Ap1.shape[0]
qp1 = Ap1.shape[0]
AA = sp.zeros((q, qp1, Dm1, Dp1), dtype=A.dtype)
for u in xrange(q):
for v in xrange(qp1):
AA[u, v] = A[u].dot(Ap1[v])
#print AA.shape
#print "XXX End calc_AA"
return AA
#This works too: (just for reference)
#AA = np.array([dot(A[s], A[t]) for s in xrange(self.q) for t in xrange(self.q)])
#self.AA = AA.reshape(self.q, self.q, self.D, self.D)
def calc_AAA(A, Ap1, Ap2):
Dp2 = Ap2.shape[2]
Dm1 = A.shape[1]
q = A.shape[0]
qp1 = Ap1.shape[0]
qp2 = Ap2.shape[0]
AAA = sp.zeros((q, qp1, qp2, Dm1, Dp2), dtype=A.dtype)
for u in xrange(q):
for v in xrange(qp1):
for w in xrange(qp2):
AAA[u, v, w] = A[u].dot(Ap1[v]).dot(Ap2[w])
return AAA
def calc_C_mat_op_AA(op, AA):
#print "===="
#print "op:",op.shape
#print "AA:",AA.shape
res = sp.tensordot(op, AA, ((2, 3), (0, 1)))
#print "calculation successful!"
return res
def calc_C_3s_mat_op_AAA(op, AAA):
return sp.tensordot(op, AAA, ((3, 4, 5), (0, 1, 2)))
def calc_C_conj_mat_op_AA(op, AA):
return sp.tensordot(op.conj(), AA, ((0, 1), (0, 1)))
def calc_C_func_op(op, A, Ap1):
q = A.shape[0]
qp1 = Ap1.shape[0]
C = sp.zeros((A.shape[0], Ap1.shape[0], A.shape[1], Ap1.shape[2]), dtype=A.dtype)
for u in xrange(q):
for v in xrange(qp1):
AAuv = A[u].dot(Ap1[v])
for s in xrange(q):
for t in xrange(qp1):
h_nn_stuv = op(s, t, u, v)
if h_nn_stuv != 0:
C[s, t] += h_nn_stuv * AAuv
return C
def calc_C_func_op_AA(op, AA):
q = AA.shape[0]
qp1 = AA.shape[1]
C = sp.zeros_like(AA)
for u in xrange(q):
for v in xrange(qp1):
AAuv = AA[u, v]
for s in xrange(q):
for t in xrange(qp1):
h_nn_stuv = op(s, t, u, v)
if h_nn_stuv != 0:
C[s, t] += h_nn_stuv * AAuv
return C
def calc_K(Kp1, C, lm1, rp1, A, Ap1, sanity_checks=False):
Dm1 = A.shape[1]
q = A.shape[0]
qp1 = Ap1.shape[0]
K = sp.zeros((Dm1, Dm1), dtype=A.dtype)
Hr = sp.zeros_like(K)
for s in xrange(q):
Ash = A[s].conj().T
for t in xrange(qp1):
if len(Ap1[t].shape) > 2:
print "Falsche dimension erkannt:"
print mm.H(Ap1[t]).shape
Hr += C[s, t].dot(rp1.dot(mm.H(Ap1[t]).dot(Ash)))
K += A[s].dot(Kp1.dot(Ash))
op_expect = mm.adot(lm1, Hr)
K += Hr
return K, op_expect
def calc_K_l(Km1, Cm1, lm2, r, A, Am1, sanity_checks=False):
"""Calculates the K_left using the recursive definition.
This is the "bra-vector" K_left, which means (K_left.dot(r)).trace() = <K_left|r>.
In other words, K_left ~ <K_left| and K_left.conj().T ~ |K_left>.
"""
D = A.shape[2]
q = A.shape[0]
qm1 = Am1.shape[0]
K = sp.zeros((D, D), dtype=A.dtype)
Hl = sp.zeros_like(K)
for s in xrange(qm1):
Am1sh = Am1[s].conj().T
for t in xrange(q):
Hl += A[t].conj().T.dot(Am1sh).dot(lm2.dot(Cm1[s, t]))
K += A[s].conj().T.dot(Km1.dot(A[s]))
op_expect = mm.adot_noconj(Hl, r)
K += Hl
return K, op_expect
def calc_K_3s(Kp1, C, lm1, rp2, A, Ap1, Ap2, sanity_checks=False):
Dm1 = A.shape[1]
q = A.shape[0]
qp1 = Ap1.shape[0]
qp2 = Ap2.shape[0]
K = sp.zeros((Dm1, Dm1), dtype=A.dtype)
Hr = sp.zeros_like(K)
for s in xrange(q):
Ash = A[s].conj().T
for t in xrange(qp1):
Ath = Ap1[t].conj().T
for u in xrange(qp2):
Hr += C[s, t, u].dot(rp2.dot(mm.H(Ap2[u]).dot(Ath).dot(Ash)))
K += A[s].dot(Kp1.dot(Ash))
op_expect = mm.adot(lm1, Hr)
K += Hr
return K, op_expect
def herm_sqrt_inv(x, zero_tol=1E-15, sanity_checks=False, return_rank=False, sc_data=''):
if isinstance(x, mm.eyemat):
x_sqrt = x
x_sqrt_i = x
rank = x.shape[0]
else:
try:
ev = x.diag #simple_diag_matrix
EV = None
except AttributeError:
ev, EV = la.eigh(x)
zeros = ev <= zero_tol #throw away negative results too!
ev_sqrt = sp.sqrt(ev)
err = sp.seterr(divide='ignore', invalid='ignore')
try:
ev_sqrt_i = 1 / ev_sqrt
ev_sqrt[zeros] = 0
ev_sqrt_i[zeros] = 0
finally:
sp.seterr(divide=err['divide'], invalid=err['invalid'])
if EV is None:
x_sqrt = mm.simple_diag_matrix(ev_sqrt, dtype=x.dtype)
x_sqrt_i = mm.simple_diag_matrix(ev_sqrt_i, dtype=x.dtype)
else:
B = mm.mmul_diag(ev_sqrt, EV.conj().T)
x_sqrt = EV.dot(B)
B = mm.mmul_diag(ev_sqrt_i, EV.conj().T)
x_sqrt_i = EV.dot(B)
rank = x.shape[0] - np.count_nonzero(zeros)
if sanity_checks:
if ev.min() < -zero_tol:
log.warning("Sanity Fail in herm_sqrt_inv(): Throwing away negative eigenvalues! %s %s",
ev.min(), sc_data)
if not np.allclose(x_sqrt.dot(x_sqrt), x):
log.warning("Sanity Fail in herm_sqrt_inv(): x_sqrt is bad! %s %s",
la.norm(x_sqrt.dot(x_sqrt) - x), sc_data)
if EV is None:
nulls = sp.zeros(x.shape[0])
nulls[zeros] = 1
nulls = sp.diag(nulls)
else: #if we did an EVD then we use the eigenvectors
nulls = EV.copy()
nulls[:, sp.invert(zeros)] = 0
nulls = nulls.dot(nulls.conj().T)
eye = np.eye(x.shape[0])
if not np.allclose(x_sqrt.dot(x_sqrt_i), eye - nulls):
log.warning("Sanity Fail in herm_sqrt_inv(): x_sqrt_i is bad! %s %s",
la.norm(x_sqrt.dot(x_sqrt_i) - eye + nulls), sc_data)
if return_rank:
return x_sqrt, x_sqrt_i, rank
else:
return x_sqrt, x_sqrt_i
def calc_l_r_roots(lm1, r, zero_tol=1E-15, sanity_checks=False, sc_data=''):
l_sqrt, l_sqrt_i = herm_sqrt_inv(lm1, zero_tol=zero_tol, sanity_checks=sanity_checks, sc_data=(sc_data, 'l'))
r_sqrt, r_sqrt_i = herm_sqrt_inv(r, zero_tol=zero_tol, sanity_checks=sanity_checks, sc_data=(sc_data, 'r'))
return l_sqrt, l_sqrt_i, r_sqrt, r_sqrt_i
def calc_Vsh(A, r_s, sanity_checks=False):
D = A.shape[2]
Dm1 = A.shape[1]
q = A.shape[0]
R = sp.zeros((D, q, Dm1), dtype=A.dtype, order='C')
for s in xrange(q):
R[:,s,:] = r_s.dot(A[s].conj().T)
R = R.reshape((q * D, Dm1))
Vconj = ns.nullspace_qr(R.conj().T).T
if sanity_checks:
if not sp.allclose(mm.mmul(Vconj.conj(), R), 0):
log.warning("Sanity Fail in calc_Vsh!: VR != 0")
if not sp.allclose(mm.mmul(Vconj, Vconj.conj().T), sp.eye(Vconj.shape[0])):
log.warning("Sanity Fail in calc_Vsh!: V H(V) != eye")
Vconj = Vconj.reshape((q * D - Dm1, D, q))
Vsh = Vconj.T
Vsh = sp.asarray(Vsh, order='C')
if sanity_checks:
Vs = sp.transpose(Vsh, axes=(0, 2, 1)).conj()
M = eps_r_noop(r_s, Vs, A)
if not sp.allclose(M, 0):
log.warning("Sanity Fail in calc_Vsh!: Bad Vsh")
return Vsh
def calc_Vsh_l(A, lm1_sqrt, sanity_checks=False):
D = A.shape[2]
Dm1 = A.shape[1]
q = A.shape[0]
L = sp.zeros((D, q, Dm1), dtype=A.dtype, order='C')
for s in xrange(q):
L[:,s,:] = lm1_sqrt.dot(A[s]).conj().T
L = L.reshape((D, q * Dm1))
V = ns.nullspace_qr(L)
if sanity_checks:
if not sp.allclose(L.dot(V), 0):
log.warning("Sanity Fail in calc_Vsh_l!: LV != 0")
if not sp.allclose(V.conj().T.dot(V), sp.eye(V.shape[1])):
log.warning("Sanity Fail in calc_Vsh_l!: V H(V) != eye")
V = V.reshape((q, Dm1, q * Dm1 - D))
Vsh = sp.transpose(V.conj(), axes=(0, 2, 1))
Vsh = sp.asarray(Vsh, order='C')
if sanity_checks:
M = eps_l_noop(lm1_sqrt, A, V)
if not sp.allclose(M, 0):
log.warning("Sanity Fail in calc_Vsh_l!: Bad Vsh")
return Vsh
def calc_x(Kp1, C, Cm1, rp1, lm2, Am1, A, Ap1, lm1_s, lm1_si, r_s, r_si, Vsh):
D = A.shape[2]
Dm1 = A.shape[1]
q = A.shape[0]
x = np.zeros((Dm1, q * D - Dm1), dtype=A.dtype)
x_part = np.empty_like(x)
x_subpart = np.empty_like(A[0])
assert not (C is None and not Kp1 is None) #existence of Kp1 implies existence of C
if not C is None:
x_part.fill(0)
for s in xrange(q):
x_subpart = eps_r_noop_inplace(rp1, C[s], Ap1, x_subpart) #~1st line
if not Kp1 is None:
x_subpart += A[s].dot(Kp1) #~3rd line
x_part += x_subpart.dot(r_si.dot(Vsh[s]))
x += lm1_s.dot(x_part)
if not lm2 is None:
x_part.fill(0)
for s in xrange(q): #~2nd line
x_subpart = eps_l_noop_inplace(lm2, Am1, Cm1[:, s], x_subpart)
x_part += x_subpart.dot(r_s.dot(Vsh[s]))
x += lm1_si.dot(x_part)
return x
def calc_x_3s(Kp1, C, Cm1, Cm2, rp1, rp2, lm2, lm3, Am2, Am1, A, Ap1, Ap2,
lm1_s, lm1_si, r_s, r_si, Vsh):
D = A.shape[2]
Dm1 = A.shape[1]
q = A.shape[0]
x = np.zeros((Dm1, q * D - Dm1), dtype=A.dtype)
x_part = np.empty_like(x)
x_subpart = np.empty_like(A[0])
H = mm.H
assert not (C is None and not Kp1 is None)
if not C is None:
x_part.fill(0)
for s in | |
<filename>voltha/adapters/adtran_olt/test/resources/test_adtranolt_platform.py
# Copyright 2017-present Adtran, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import voltha.adapters.adtran_olt.resources.adtranolt_platform as platform
import pytest
"""
These test functions test the function "mk_uni_port_num()"
in the class "adtran_platform()". For the tests with simple inputs,
answers are reached by simply adding the bit shifted arguments
because there aren't any overlapping 1 bits, so this should be the
same as a logical OR. For the rest of the tests I manually calculated
what the answers should be for a variety of scenarios.
"""
@pytest.fixture()
def test():
return platform.adtran_platform()
#simple args, and no arg for the parameter that has a default value
def test_adtran_platform_mk_uni_port_num(test):
output = test.mk_uni_port_num(1, 1)
assert output == 2**11 + 2**4
#simple args including one for the parameter that has a default value
def test_adtran_platform_mk_uni_port_num_not_default(test):
output = test.mk_uni_port_num(1, 1, 1)
assert output == 2**11 + 2**4 + 1
#tests scenario where the logical OR doesn't equal the sum
def test_adtran_platform_mk_uni_port_num_sum_dne_bitwise_or(test):
output = test.mk_uni_port_num(1, 128, 1)
assert output == 128 * 2**4 +1
#tests what happens when a negative number is introduced
def test_adtran_platform_mk_uni_port_num_negative(test):
output = test.mk_uni_port_num(-1, 1, 1)
assert output == -2031
#tests what happens when 2 negative numbers are introduced
def test_adtran_platform_mk_uni_port_num_negatives(test):
output = test.mk_uni_port_num(-1, -1, 1)
assert output == -15
#tests what happens when strings are passed as parameters
def test_adtran_platform_mk_uni_port_num_strings(test):
with pytest.raises(TypeError):
output = test.mk_uni_port_num("test", "test", "test")
#tests what happens when nothing is passed as a parameter
def test_adtran_platform_mk_uni_port_num_no_args(test):
with pytest.raises(TypeError):
output = test.mk_uni_port_num()
"""
These test the function "uni_id_from_uni_port()" in the class
"adtran_platform()". Several of these tests pass in a number between 0
and 15 which should all return the same number back. Two more pass in huge
numbers with the same last four digits. The one with all 1's returns 15 and
the one with all 0's returns 0. Finally, I made sure that passing
in either the wrong type of argument or none at all threw the
appropriate type error.
"""
#this functions a logical AND of 15 and 15 which should stay the same
def test_adtran_platform_uni_id_from_uni_port_same_num(test):
output = test.uni_id_from_uni_port(15)
assert output == 15
#this function tests the logical AND of 15 and a huge num (all 1's in binary)
def test_adtran_platform_uni_id_from_uni_port_huge_num_ones(test):
output = test.uni_id_from_uni_port(17179869183)
assert output == 15
#logical AND of 15 and 0
def test_adtran_platform_uni_id_from_uni_port_zero(test):
output = test.uni_id_from_uni_port(0)
assert output == 0
#logical AND of 15 and a huge num (last 4 digits 0's in binary)
def test_adtran_platfrom_uni_id_from_uni_port_huge_num_zeros(test):
output = test.uni_id_from_uni_port(17179869168)
assert output == 0
#logical AND of 12 and 15
def test_adtran_platfrom_uni_id_from_uni_port_twelve(test):
output = test.uni_id_from_uni_port(12)
assert output == 12
#logical AND of 9 and 15
def test_adtran_platform_uni_id_from_uni_port_nine(test):
output = test.uni_id_from_uni_port(9)
assert output == 9
#logical AND of 3 and 15
def test_adtran_platform_uni_id_from_uni_port_three(test):
output = test.uni_id_from_uni_port(3)
assert output == 3
#passing in a string
def test_adtran_platform_uni_id_from_uni_port_string(test):
with pytest.raises(TypeError):
output = test.uni_id_from_uni_port("test")
#NO INPUTS AT ALL
def test_adtran_platform_uni_id_from_uni_port_no_args(test):
with pytest.raises(TypeError):
output = test.uni_id_from_uni_port()
"""
These test functions test the function "mk_uni_port_num()"
For the tests with simple inputs, answers are reached by simply
adding the bit shifted arguments because there aren't any
overlapping 1 bits, so this should be the same as a logical OR.
For the rest of the tests I manually calculated what the answers
should be for a variety of scenarios.
"""
#simple args, and no arg for the parameter that has a default value
def test_mk_uni_port_num_default():
output = platform.mk_uni_port_num(1, 1)
assert output == 2**11 + 2**4
#simple args including one for the parameter that has a default value
def test_mk_uni_port_num_not_default():
output = platform.mk_uni_port_num(1, 1, 1)
assert output == 2**11 + 2**4 + 1
#tests scenario where the logical OR doesn't equal the sum
def test_mk_uni_port_num_sum_dne_bitwise_or():
output = platform.mk_uni_port_num(1, 128, 1)
assert output == 128 * 2**4 + 1
#tests what happens when a negative number is introduced
def test_mk_uni_port_num_negative():
output = platform.mk_uni_port_num(-1, 1, 1)
assert output == -2031
#tests what happens when 2 negative numbers are introduced
def test_mk_uni_port_num_negatives():
output = platform.mk_uni_port_num(-1, -1, 1)
assert output == -15
#tests what happens when strings are passed as parameters
def test_mk_uni_port_num_strings():
with pytest.raises(TypeError):
output = platform.mk_uni_port_num("test", "test", "test")
#tests what happens when nothing is passed as a parameter
def test_mk_uni_port_num_no_args():
with pytest.raises(TypeError):
output = platform.mk_uni_port_num()
"""
Several of these tests pass in a number between 0 and 15 which
should all return the same number back. Two more pass in huge numbers
with the same last four digits. The one with all 1's returns 15 and
the one with all 0's returns 0. Finally, I made sure that passing
in either the wrong type of argument or none at all threw the
appropriate type error.
"""
#this functions a logical AND of 15 and 15 which should stay the same
def test_uni_id_from_uni_port_same_num():
output = platform.uni_id_from_uni_port(15)
assert output == 15
#this function tests the logical AND of 15 and a huge num (all 1's in binary)
def test_uni_id_from_uni_port_huge_num_ones():
output = platform.uni_id_from_uni_port(17179869183)
assert output == 15
#logical AND of 15 and 0
def test_uni_id_from_uni_port_zero():
output = platform.uni_id_from_uni_port(0)
assert output == 0
#logical AND of 15 and a huge num (last 4 digits 0's in binary)
def test_uni_id_from_uni_port_huge_num_zeros():
output = platform.uni_id_from_uni_port(17179869168)
assert output == 0
#logical AND of 12 and 15
def test_uni_id_from_uni_port_twelve():
output = platform.uni_id_from_uni_port(12)
assert output == 12
#logical AND of 9 and 15
def test_uni_id_from_uni_port_nine():
output = platform.uni_id_from_uni_port(9)
assert output == 9
#logical AND of 3 and 15
def test_uni_id_from_uni_port_three():
output = platform.uni_id_from_uni_port(3)
assert output == 3
#passing in a string
def test_uni_id_from_uni_port_string():
with pytest.raises(TypeError):
output = platform.uni_id_from_uni_port("test")
#NO INPUTS AT ALL
def test_uni_id_from_uni_port_no_args():
with pytest.raises(TypeError):
output = platform.uni_id_from_uni_port()
"""
The first few tests try a few different scenarios to make sure that the bit shifting
and logical AND are working as expected. There should never be a result that is
larger than 15. Then I checked to make sure that passing the wrong argument or no
arguments at all throws the expected type error.
"""
#test with the smallest number that remains non-zero after bit shift
def test_intf_id_from_uni_port_num_smallest():
output = platform.intf_id_from_uni_port_num(2048)
assert output == 1
#test with a number with different bits 1 through 11 to make sure they don't affect result
def test_intf_id_from_uni_port_num_big():
output = platform.intf_id_from_uni_port_num(3458)
assert output == 1
#test with massive number where bits 15 through 12 are 1010
def test_intf_id_from_uni_port_num_massive():
output = platform.intf_id_from_uni_port_num(22459)
assert output == 10
#test with smallest number that remains positive after bit shift, but is zero after the AND
def test_intf_id_from_uni_port_num_big_zero():
output = platform.intf_id_from_uni_port_num(32768)
assert output == 0
#test with largest number that gets bit shifted down to zero
def test_intf_id_from_uni_port_num_bit_shift_to_zero():
output = platform.intf_id_from_uni_port_num(2047)
assert output == 0
#test with a string passed in
def test_intf_id_from_uni_port_num_string():
with pytest.raises(TypeError):
output = platform.intf_id_from_uni_port_num("test")
#test with no args passed in
def test_intf_id_from_uni_port_num_no_args():
with pytest.raises(TypeError):
output = platform.intf_id_from_uni_port_num()
"""
i did the standard tests to make sure that it returned the expected values
for random normal cases and the max and min cases. I also checked args
that were too big and too small. Then I made sure that the first arg truly
didn't matter and that the default value of the last parameter worked.
Finally, I checked to make sure string args and no args at all threw the
appropriate errors.
"""
#testing with all args at 0 which should return 1024
def test_mk_alloc_id_all_zeros():
output = platform.mk_alloc_id(0, 0, 0)
assert output == 1024
#testing with onu_id out of bounds
def test_mk_alloc_id_onu_id_too_big():
with pytest.raises(AssertionError):
output = platform.mk_alloc_id(0, 128, 0)
#testing with idx out of bounds
def test_mk_alloc_id_idx_idx_too_big():
with pytest.raises(AssertionError):
output = platform.mk_alloc_id(0, 0, 5)
#test with both being negative
def test_mk_alloc_id_both_args_negative():
with pytest.raises(AssertionError):
output = platform.mk_alloc_id(0, -1, -1)
#testing with both parameters at their respective max
def test_mk_alloc_id_both_max():
output = platform.mk_alloc_id(0, 127, 4)
assert output == 2175
#testing with random values in the middle of their ranges and a string as the first arg
def test_mk_alloc_id_random_args():
output = platform.mk_alloc_id("test", 100, 2)
assert output == 1636
#testing with testing with the default value
def test_mk_alloc_id_default_value():
output = platform.mk_alloc_id(0, 100)
assert output == 1124
#testing with strings passed in
def test_mk_alloc_id_strings():
with pytest.raises(AssertionError):
output = platform.mk_alloc_id("test", "test", "test")
#testing with no args passed in
def test_mk_alloc_id_no_args():
with pytest.raises(TypeError):
output = platform.mk_alloc_id()
"""
Just some basic tests to get coverage here.This function probably only
exists to support backwards compatibility.
"""
#inputing a negative number
def test_intf_id_from_nni_port_num_negative():
output = platform.intf_id_from_nni_port_num(-1)
assert | |
"""
An implementation of the OpenAI Transformer Language Model.
Mostly just a slightly modified version of
https://github.com/huggingface/pytorch-openai-transformer-lm
so thanks to them!
Some of these modules duplicate code elsewhere in AllenNLP,
but the serialized weights depend on the exact parameter setup
here, so it's easiest to just reimplement them.
"""
# pylint: disable=invalid-name,arguments-differ
from typing import NamedTuple, List
import copy
import io
import json
import logging
import math
import pathlib
import re
import tarfile
import numpy as np
import torch
from torch.nn import Parameter
from allennlp.common.checks import ConfigurationError
from allennlp.common.file_utils import cached_path
from allennlp.common.from_params import FromParams
logger = logging.getLogger(__name__)
def gelu(x: torch.Tensor) -> torch.Tensor:
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
def swish(x: torch.Tensor) -> torch.Tensor:
return x * torch.sigmoid(x)
_ACTIVATION_FUNCTIONS = {
'relu': torch.nn.ReLU,
'swish': swish,
'gelu': gelu
}
# pylint: disable=line-too-long
_PARAMETER_NAMES = ["model/we:0",
"model/h0/attn/c_attn/w:0", "model/h0/attn/c_attn/b:0", "model/h0/attn/c_proj/w:0",
"model/h0/attn/c_proj/b:0", "model/h0/ln_1/g:0", "model/h0/ln_1/b:0", "model/h0/mlp/c_fc/w:0",
"model/h0/mlp/c_fc/b:0", "model/h0/mlp/c_proj/w:0", "model/h0/mlp/c_proj/b:0", "model/h0/ln_2/g:0",
"model/h0/ln_2/b:0", "model/h1/attn/c_attn/w:0", "model/h1/attn/c_attn/b:0", "model/h1/attn/c_proj/w:0",
"model/h1/attn/c_proj/b:0", "model/h1/ln_1/g:0", "model/h1/ln_1/b:0", "model/h1/mlp/c_fc/w:0",
"model/h1/mlp/c_fc/b:0", "model/h1/mlp/c_proj/w:0", "model/h1/mlp/c_proj/b:0", "model/h1/ln_2/g:0",
"model/h1/ln_2/b:0", "model/h2/attn/c_attn/w:0", "model/h2/attn/c_attn/b:0", "model/h2/attn/c_proj/w:0",
"model/h2/attn/c_proj/b:0", "model/h2/ln_1/g:0", "model/h2/ln_1/b:0", "model/h2/mlp/c_fc/w:0",
"model/h2/mlp/c_fc/b:0", "model/h2/mlp/c_proj/w:0", "model/h2/mlp/c_proj/b:0", "model/h2/ln_2/g:0",
"model/h2/ln_2/b:0", "model/h3/attn/c_attn/w:0", "model/h3/attn/c_attn/b:0", "model/h3/attn/c_proj/w:0",
"model/h3/attn/c_proj/b:0", "model/h3/ln_1/g:0", "model/h3/ln_1/b:0", "model/h3/mlp/c_fc/w:0",
"model/h3/mlp/c_fc/b:0", "model/h3/mlp/c_proj/w:0", "model/h3/mlp/c_proj/b:0", "model/h3/ln_2/g:0",
"model/h3/ln_2/b:0", "model/h4/attn/c_attn/w:0", "model/h4/attn/c_attn/b:0", "model/h4/attn/c_proj/w:0",
"model/h4/attn/c_proj/b:0", "model/h4/ln_1/g:0", "model/h4/ln_1/b:0", "model/h4/mlp/c_fc/w:0",
"model/h4/mlp/c_fc/b:0", "model/h4/mlp/c_proj/w:0", "model/h4/mlp/c_proj/b:0", "model/h4/ln_2/g:0",
"model/h4/ln_2/b:0", "model/h5/attn/c_attn/w:0", "model/h5/attn/c_attn/b:0", "model/h5/attn/c_proj/w:0",
"model/h5/attn/c_proj/b:0", "model/h5/ln_1/g:0", "model/h5/ln_1/b:0", "model/h5/mlp/c_fc/w:0",
"model/h5/mlp/c_fc/b:0", "model/h5/mlp/c_proj/w:0", "model/h5/mlp/c_proj/b:0", "model/h5/ln_2/g:0",
"model/h5/ln_2/b:0", "model/h6/attn/c_attn/w:0", "model/h6/attn/c_attn/b:0", "model/h6/attn/c_proj/w:0",
"model/h6/attn/c_proj/b:0", "model/h6/ln_1/g:0", "model/h6/ln_1/b:0", "model/h6/mlp/c_fc/w:0",
"model/h6/mlp/c_fc/b:0", "model/h6/mlp/c_proj/w:0", "model/h6/mlp/c_proj/b:0", "model/h6/ln_2/g:0",
"model/h6/ln_2/b:0", "model/h7/attn/c_attn/w:0", "model/h7/attn/c_attn/b:0", "model/h7/attn/c_proj/w:0",
"model/h7/attn/c_proj/b:0", "model/h7/ln_1/g:0", "model/h7/ln_1/b:0", "model/h7/mlp/c_fc/w:0",
"model/h7/mlp/c_fc/b:0", "model/h7/mlp/c_proj/w:0", "model/h7/mlp/c_proj/b:0", "model/h7/ln_2/g:0",
"model/h7/ln_2/b:0", "model/h8/attn/c_attn/w:0", "model/h8/attn/c_attn/b:0", "model/h8/attn/c_proj/w:0",
"model/h8/attn/c_proj/b:0", "model/h8/ln_1/g:0", "model/h8/ln_1/b:0", "model/h8/mlp/c_fc/w:0",
"model/h8/mlp/c_fc/b:0", "model/h8/mlp/c_proj/w:0", "model/h8/mlp/c_proj/b:0", "model/h8/ln_2/g:0",
"model/h8/ln_2/b:0", "model/h9/attn/c_attn/w:0", "model/h9/attn/c_attn/b:0", "model/h9/attn/c_proj/w:0",
"model/h9/attn/c_proj/b:0", "model/h9/ln_1/g:0", "model/h9/ln_1/b:0", "model/h9/mlp/c_fc/w:0",
"model/h9/mlp/c_fc/b:0", "model/h9/mlp/c_proj/w:0", "model/h9/mlp/c_proj/b:0", "model/h9/ln_2/g:0",
"model/h9/ln_2/b:0", "model/h10/attn/c_attn/w:0", "model/h10/attn/c_attn/b:0", "model/h10/attn/c_proj/w:0",
"model/h10/attn/c_proj/b:0", "model/h10/ln_1/g:0", "model/h10/ln_1/b:0", "model/h10/mlp/c_fc/w:0",
"model/h10/mlp/c_fc/b:0", "model/h10/mlp/c_proj/w:0", "model/h10/mlp/c_proj/b:0", "model/h10/ln_2/g:0",
"model/h10/ln_2/b:0", "model/h11/attn/c_attn/w:0", "model/h11/attn/c_attn/b:0", "model/h11/attn/c_proj/w:0",
"model/h11/attn/c_proj/b:0", "model/h11/ln_1/g:0", "model/h11/ln_1/b:0", "model/h11/mlp/c_fc/w:0",
"model/h11/mlp/c_fc/b:0", "model/h11/mlp/c_proj/w:0", "model/h11/mlp/c_proj/b:0", "model/h11/ln_2/g:0",
"model/h11/ln_2/b:0", "model/clf/w:0", "model/clf/b:0"]
# pylint: enable=line-too-long
class TransformerConfig(NamedTuple):
"""
The transformer has to pass a bunch of params to its submodules,
this bundles them together to make things easier.
"""
embedding_dim: int = 768
num_heads: int = 12
embedding_dropout_probability: float = 0.1
attention_dropout_probability: float = 0.1
residual_dropout_probability: float = 0.1
activation_function: str = 'gelu'
class LayerNorm(torch.nn.Module):
"Construct a layernorm module in the OpenAI style (epsilon inside the square root)."
def __init__(self, n_state, e=1e-5):
super().__init__()
self.g = torch.nn.Parameter(torch.ones(n_state))
self.b = torch.nn.Parameter(torch.zeros(n_state))
self.e = e
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.e)
return self.g * x + self.b
class Conv1D(torch.nn.Module):
def __init__(self, nf: int, rf: int, nx: int) -> None:
super().__init__()
self.rf = rf
self.nf = nf
if rf == 1:
w = torch.empty(nx, nf)
torch.nn.init.normal_(w, std=0.02)
self.w = Parameter(w)
self.b = Parameter(torch.zeros(nf))
else:
raise NotImplementedError
def forward(self, x: torch.Tensor) -> torch.Tensor:
if self.rf == 1:
size_out = x.size()[:-1] + (self.nf,)
x = torch.addmm(self.b, x.view(-1, x.size(-1)), self.w)
x = x.view(*size_out)
else:
raise NotImplementedError
return x
class Attention(torch.nn.Module):
def __init__(self,
nx: int,
n_ctx: int,
config: TransformerConfig,
scale: bool = False) -> None:
super().__init__()
n_state = nx # in Attention: n_state=768 (nx=n_embd)
# [switch nx => n_state from Block to Attention to keep identical to TF implem]
assert n_state % config.num_heads == 0
self.register_buffer('b', torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx))
self.n_head = config.num_heads
self.split_size = n_state
self.scale = scale
self.c_attn = Conv1D(n_state * 3, 1, nx)
self.c_proj = Conv1D(n_state, 1, nx)
self.attn_dropout = torch.nn.Dropout(config.attention_dropout_probability)
self.resid_dropout = torch.nn.Dropout(config.residual_dropout_probability)
def _attn(self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor) -> torch.Tensor:
w = torch.matmul(q, k)
if self.scale:
w = w / math.sqrt(v.size(-1))
w = w * self.b + -1e9 * (1 - self.b) # TF implem method: mask_attn_weights
w = torch.nn.Softmax(dim=-1)(w)
w = self.attn_dropout(w)
return torch.matmul(w, v)
def merge_heads(self, x: torch.Tensor):
# pylint: disable=no-self-use
x = x.permute(0, 2, 1, 3).contiguous()
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states
def split_heads(self, x: torch.Tensor, k: bool = False):
new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states
if k:
return x.permute(0, 2, 3, 1)
else:
return x.permute(0, 2, 1, 3)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.c_attn(x)
query, key, value = x.split(self.split_size, dim=2)
query = self.split_heads(query)
key = self.split_heads(key, k=True)
value = self.split_heads(value)
a = self._attn(query, key, value)
a = self.merge_heads(a)
a = self.c_proj(a)
a = self.resid_dropout(a)
return a
class MLP(torch.nn.Module):
def __init__(self, n_state: int, config: TransformerConfig) -> None: # in MLP: n_state=3072 (4 * n_embd)
super().__init__()
self.c_fc = Conv1D(n_state, 1, config.embedding_dim)
self.c_proj = Conv1D(config.embedding_dim, 1, n_state)
self.act = _ACTIVATION_FUNCTIONS[config.activation_function]
self.dropout = torch.nn.Dropout(config.residual_dropout_probability)
def forward(self, x: torch.Tensor) -> torch.Tensor:
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
return self.dropout(h2)
class Block(torch.nn.Module):
def __init__(self,
n_ctx: int,
config: TransformerConfig,
scale: bool = False) -> None:
super().__init__()
nx = config.embedding_dim
self.attn = Attention(nx, n_ctx, config, scale)
self.ln_1 = LayerNorm(nx)
self.mlp = MLP(4 * nx, config)
self.ln_2 = LayerNorm(nx)
def forward(self, x: torch.Tensor) -> torch.Tensor:
a = self.attn(x)
n = self.ln_1(x + a)
m = self.mlp(n)
h = self.ln_2(n + m)
return h
class OpenaiTransformer(torch.nn.Module, FromParams):
"""
Openai transformer, as per https://blog.openai.com/language-unsupervised/.
Default parameters are the ones for their pretrained model.
Parameters
----------
vocab_size: ``int`` (optional, default: 40990)
The size of the vocabulary, including the positional encodings.
n_ctx: ``int`` (optional, default: 512)
The number of positional encodings.
embedding_dim: ``int`` (optional, default: 768)
The dimension of the output embeddings.
num_heads: ``int`` (optional, default: 12)
How many "heads" the attention has.
num_layers: ``int`` (optional, default: 12)
How many layers of "blocks" the transformer has.
embedding_dropout_probability: ``float`` (optional, default: 0.1)
Dropout for the embedding.
attention_dropout_probability: ``float`` (optional, default: 0.1)
Dropout for attention.
residual_dropout_probability: ``float`` (optional, default: 0.1)
Dropout for residual
activation_function: ``str`` (optional, default: ``'gelu'``)
Activation function for the multi-layer perceptron.
model_path: ``str`` (optional, default: ``None``)
A tar.gz file containing serialized model weights. If supplied,
the weights will be loaded from that file.
requires_grad: ``bool`` (optional, default: ``False``)
If true, the transformer will be fine-tuneable.
"""
def __init__(self,
vocab_size: int = 40990,
n_ctx: int = 512,
embedding_dim: int = 768,
num_heads: int = 12,
num_layers: int = 12,
embedding_dropout_probability: float = 0.1,
attention_dropout_probability: float = 0.1,
residual_dropout_probability: float = 0.1,
activation_function: str = 'gelu',
model_path: str = None,
requires_grad: bool = False) -> None:
super().__init__()
config = TransformerConfig(
embedding_dim,
num_heads,
embedding_dropout_probability,
attention_dropout_probability,
residual_dropout_probability,
activation_function,
)
self.vocab_size = vocab_size
self.n_ctx = n_ctx
self.num_output_layers = 1 + num_layers
self.embed = torch.nn.Embedding(vocab_size, embedding_dim)
self.drop = torch.nn.Dropout(embedding_dropout_probability)
block = Block(n_ctx, config, scale=True)
self.h = torch.nn.ModuleList([copy.deepcopy(block) for _ in range(num_layers)])
self.decoder = torch.nn.Linear(embedding_dim, vocab_size, bias=False)
self.decoder.weight = self.embed.weight # Tied weights
# To reproduce the noise_shape parameter of TF implementation
torch.nn.init.normal_(self.embed.weight, std=0.02)
for parameter in self.parameters():
parameter.requires_grad = requires_grad
if model_path:
self.load_weights(model_path)
def forward(self, x: torch.Tensor) -> List[torch.Tensor]:
#x = x.view(-1, x.size(2), x.size(3))
# x is (batch_size, sequence_length) tensor of byte-pair ids
# e is (batch_size, sequence_length, 2, embedding_dim) tensor of embeddings
e = self.embed(x)
# h is (batch_size, sequence_length, embedding_dim)
h = e.sum(dim=2)
all_layers = [h]
for block in self.h:
h = block(h)
all_layers.append(h)
# result is list of (batch_size, sequence_length, embedding_dim)
return all_layers
def load_weights(self,
transformer_model_path: str,
n_ctx: int = -1,
n_special: int = -1,
n_transfer: int = 12,
n_embd: int = 768,
names: List[str] = _PARAMETER_NAMES) -> None:
# pylint: disable=dangerous-default-value
logger.info(f"loading weights from {transformer_model_path}")
# if `file_path` is a URL, redirect to the cache
transformer_model_path = cached_path(transformer_model_path)
with tarfile.open(transformer_model_path) as tmp:
num_params_files = len([member for member in tmp.getmembers() if member.name.endswith('.npy')])
shapesfile = tmp.extractfile('model/params_shapes.json')
if shapesfile:
shapes = json.loads(shapesfile.read())
else:
raise ConfigurationError("unable to find model/params_shapes.json in the archive")
# numpy can't read from a tarfile directly, so we need a workaround
# https://github.com/numpy/numpy/issues/7989#issuecomment-341656702
init_params: List[np.ndarray] = []
for n in range(num_params_files):
array_file = io.BytesIO()
array_file.write(tmp.extractfile(f'model/params_{n}.npy').read())
array_file.seek(0)
# each np.load is a (11653478,) numpy array
init_params.append(np.load(array_file))
# init_params is a list of 10 arrays of size (11653578,)
# shapes are [[512, 768], [40478, 768], [1, 768, 2304], [2304], ... # 146 elts
# products are [512 * 768, 40478 * 768, ...]
# offsets is [512 * 768, 512 * 768 + 40478 * 768, ...]
offsets = np.cumsum([np.prod(shape) for shape in shapes])
# split into the 146 subarrays corresponding to shapes
| |
<reponame>Tseplyaev/aiida-fleur
# -*- coding: utf-8 -*-
###############################################################################
# Copyright (c), Forschungszentrum Jülich GmbH, IAS-1/PGI-1, Germany. #
# All rights reserved. #
# This file is part of the AiiDA-FLEUR package. #
# #
# The code is hosted on GitHub at https://github.com/broeder-j/aiida-fleur #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.flapw.de or #
# http://aiida-fleur.readthedocs.io/en/develop/ #
###############################################################################
"""
This is the worklfow 'initial_cls' using the Fleur code calculating
corelevel shifts with different methods.
"""
#TODO parsing of eigenvalues of LOS!
#TODO error handling of scf
#TODO Check if calculations failed, and termine the workflow without a raised execption
# currently the result extraction part will fail if calculations failed
#TODO USE SAME PARAMETERS! (maybe extract method for fleurinp needed)
# TODO: Allow for providing referenes as scf_ouputparameter nodes
# TODO: maybe launch all scfs at the same time
# TODO: gives only a warning currently if ref not found.
# but should lead to error if no ref is found for what should be calculated
from string import digits
from aiida.work.run import submit
from aiida.work.workchain import ToContext, WorkChain, if_
from aiida.work import workfunction as wf
from aiida.orm import Code, DataFactory, CalculationFactory, load_node, Group
from aiida.orm.querybuilder import QueryBuilder
from aiida.common.exceptions import NotExistent
from aiida_fleur.calculation.fleur import FleurCalculation
from aiida_fleur.workflows.scf import fleur_scf_wc
from aiida_fleur.tools.common_fleur_wf_util import get_natoms_element
StructureData = DataFactory('structure')
ParameterData = DataFactory('parameter')
RemoteData = DataFactory('remote')
FleurinpData = DataFactory('fleur.fleurinp')
FleurProcess = FleurCalculation.process()
FleurCalc = CalculationFactory('fleur.fleur')
class fleur_initial_cls_wc(WorkChain):
"""
Turn key solution for the calculation of core level shift
'method' : ['initial', 'full_valence ch', 'half_valence_ch', 'ch', ...]
'Bes' : [W4f, Be1s]
'CLS' : [W4f, Be1s]
toms' : ['all', 'postions' : []]
#'references' : ['calculate', and use # calculate : 'all' , or 'calculate' : ['W', 'Be']
'references' : { 'W': [calc/ouputnode or fleurinp, or structure data or
structure data + Parameter ], 'Be' : }
'scf_para' : {...}, 'default'
'relax' : True
'relax_mode': ['Fleur', 'QE Fleur', 'QE']
'relax_para' : {...}, 'default'
'calculate_doses' : False
'dos_para' : {...}, 'default'
# defaults
default wf_Parameters::
'method' : 'initial'
'atoms' : 'all
'references' : 'calculate'
'scf_para' : 'default'
'relax' : True
'relax_mode': 'QE Fleur'
'relax_para' : 'default'
'calculate_doses' : False
'dos_para' : 'default'
"""
_workflowversion = "0.3.4"
_default_wf_para = {'references' : {},
'relax' : True,
'relax_mode': 'Fleur',
'relax_para' : 'default',
'scf_para' : 'default',
'same_para' : True,
'serial' : False}
_default_options = {'resources' : {"num_machines": 1},
'max_wallclock_seconds' : 6*60*60,
'queue_name' : '',
'custom_scheduler_commands' : '',
#'max_memory_kb' : None,
'import_sys_environment' : False,
'environment_variables' : {}}
ERROR_INVALID_INPUT_RESOURCES = 1
ERROR_INVALID_INPUT_RESOURCES_UNDERSPECIFIED = 2
ERROR_INVALID_CODE_PROVIDED = 3
ERROR_INPGEN_CALCULATION_FAILED = 4
ERROR_CHANGING_FLEURINPUT_FAILED = 5
ERROR_CALCULATION_INVALID_INPUT_FILE = 6
ERROR_FLEUR_CALCULATION_FALIED = 7
ERROR_CONVERGENCE_NOT_ARCHIVED = 8
ERROR_REFERENCE_MISSING = 9
@classmethod
def define(cls, spec):
super(fleur_initial_cls_wc, cls).define(spec)
spec.input("wf_parameters", valid_type=ParameterData, required=False,
default=ParameterData(dict=cls._default_wf_para))
spec.input("fleurinp", valid_type=FleurinpData, required=False)
spec.input("fleur", valid_type=Code, required=True)
spec.input("inpgen", valid_type=Code, required=False)
spec.input("structure", valid_type=StructureData, required=False)
spec.input("calc_parameters", valid_type=ParameterData, required=False)
spec.input("options", valid_type=ParameterData, required=False,
default=ParameterData(dict=cls._default_options))
spec.outline(
cls.check_input,
cls.get_references,
cls.run_fleur_scfs,
if_(cls.relaxation_needed)(
cls.relax),
cls.find_parameters,
cls.run_scfs_ref,
cls.return_results
)
def check_input(self):
"""
Init same context and check what input is given if it makes sence
"""
### input check ### ? or done automaticly, how optional?
msg = ("INFO: Started initial_state_CLS workflow version {} "
"Workchain node identifiers: "#{}"
"".format(self._workflowversion))#, ProcessRegistry().current_calc_node))
self.report(msg)
# init
self.ctx.last_calc = None
self.ctx.eximated_jobs = 0
self.ctx.run_jobs = 0
self.ctx.calcs_res = []
self.ctx.labels = []
self.ctx.ref_labels = []
self.ctx.calcs_torun = []
self.ctx.ref_calcs_torun = []
self.ctx.ref_calcs_res = []
self.ctx.struc_to_relax = []
self.ctx.successful = True
self.ctx.warnings = []
self.ctx.errors = []
self.ctx.ref = {}
self.ctx.calculate_formation_energy = True
#Style: {atomtype : listof all corelevel, atomtype_coresetup... }
#ie: { 'W-1' : [shift_1s, ... shift 7/2 4f],
# 'W-1_coreconfig' : ['1s','2s',...],
# 'W-2' : [...], 'Be-1': [], ...} #all in eV!
self.ctx.CLS = {}
self.ctx.cl_energies = {}# same style as CLS only energy <-> shift
self.ctx.ref_cl_energies = {}
#Style: {'Compound' : energy, 'ref_x' : energy , ...}
#i.e {'Be12W' : 0.0, 'Be' : 0.104*htr_eV , 'W' : 0.12*htr_eV} # all in eV!
self.ctx.fermi_energies = {}
self.ctx.bandgaps = {}
self.ctx.atomtypes = {}
# set values, or defaults for Wf_para
wf_dict = self.inputs.wf_parameters.get_dict()
default = self._default_wf_para
self.ctx.serial = wf_dict.get('serial', default.get('serial'))
self.ctx.same_para = wf_dict.get('same_para', default.get('same_para'))
self.ctx.scf_para = wf_dict.get('scf_para', default.get('scf_para'))
self.ctx.relax = wf_dict.get('relax', default.get('relax'))
self.ctx.relax_mode = wf_dict.get('relax_mode', default.get('relax_mode'))
self.ctx.relax_para = wf_dict.get('relax_para', default.get('dos_para'))
defaultoptions = self._default_options
if self.inputs.options:
options = self.inputs.options.get_dict()
else:
options = defaultoptions
for key, val in defaultoptions.iteritems():
options[key] = options.get(key, val)
self.ctx.options = options
# check if inputs given make sense # TODO sort this out in common wc
inputs = self.inputs
if 'fleurinp' in inputs:
#TODO make a check if an extracted structure exists, since get_structuredata is wf
structure = inputs.fleurinp.get_structuredata(inputs.fleurinp)
self.ctx.elements = list(structure.get_composition().keys())
self.ctx.calcs_torun.append(inputs.get('fleurinp'))
#print('here1')
if 'structure' in inputs:
warning = 'WARNING: Ignoring Structure input, because Fleurinp was given'
self.ctx.warnings.append(warning)
self.report(warning)
if 'calc_parameters' in inputs:
warning = 'WARNING: Ignoring parameter input, because Fleurinp was given'
self.ctx.warnings.append(warning)
self.report(warning)
elif 'structure' in inputs:
self.ctx.elements = list(inputs.structure.get_composition().keys())
#self.ctx.elements = list(s.get_symbols_set())
if 'inpgen' not in inputs:
error = 'ERROR: StructureData was provided, but no inpgen code was provided'
self.ctx.errors.append(error)
self.control_end_wc(error)
return self.ERROR_INVALID_INPUT_RESOURCES
if 'calc_parameters' in inputs:
self.ctx.calcs_torun.append(
[inputs.get('structure'), inputs.get('calc_parameters')])
else:
self.ctx.calcs_torun.append(inputs.get('structure'))
else:
error = 'ERROR: No StructureData nor FleurinpData was provided'
self.ctx.errors.append(error)
self.control_end_wc(error)
return self.ERROR_INVALID_INPUT_RESOURCES
self.report('INFO: elements in structure: {}'.format(self.ctx.elements))
def get_references(self):
"""
To calculate a CLS in initial state approx, we need reference calculations
to the Elemental crystals. First it is checked if the user has provided them
Second the database is checked, if there are structures with certain extras.
Third the COD database is searched for the elemental Cystal structures.
If some referneces are not found stop here.
Are there already calculation of these 'references', ggf use them.
We do not put these calculation in the calculation queue yet because we
need specific parameters for them
"""
self.report('INFO: In Get_references initial_state_CLS workflow')
references = self.inputs.wf_parameters.get_dict().get('references', {})
# should be of the form of
#'references' : { 'W': calc, outputnode of workflow or fleurinp,
# or structure data or (structure data + Parameter),
# 'Be' : ...}
self.ctx.ref_calcs_torun = []
self.ctx.ref = {}
self.ctx.abort = False
struc_group = references.get('group', None)
para_group = references.get('para_group', None)
#TODO better checks if ref makes sense?
# get specific element reference if given override
#print(self.ctx.elements)
elements = self.ctx.elements # ggf copy because ctx.elements will be modified
for elem in elements:
#to_calc[elem] = 'find'
ref_el = references.get(elem, None)
#print ref_el
if ref_el:
# loading nodes
if isinstance(ref_el, list):
ref_el_node = []
for ref_el_el in ref_el:
try:
ref_el_nodes = load_node(ref_el_el)
except:
ref_el_node = None
self.report('ERROR: The reference node in the list '
'(id or uuid) provided: {} for element: '
'{} could not be loaded with load_node'
''.format(ref_el_el, elem))
self.ctx.abort = True
ref_el_node.append(ref_el_nodes)
else:
try:
ref_el_node = load_node(ref_el)
except:# NotExistent: No node was found
ref_el_node = None
self.report('ERROR: The reference node (id or uuid) '
'provided: {} for element: {} could'
'not be loaded with load_node'
''.format(ref_el, elem))
self.ctx.abort = True
# expecting nodes and filling ref_calcs_torun
if isinstance(ref_el_node, list):#(StructureData, ParameterData)):
#enforced parameters, add directly to run queue
# TODO: if a scf with these parameters was already done link to it
# and extract the results instead of running the calculation again....
if len(ref_el_node) == 2:
if isinstance(ref_el_node[0], StructureData) and isinstance(ref_el_node[1], ParameterData):
self.ctx.ref_calcs_torun.append(ref_el_node)
else:
self.report('WARNING: I did not undestand the list with length 2 '
'you gave me as reference input')
else:
self.report('WARNING: I did not undestand the list {} with length {} '
'you gave me as reference input'
''.format(ref_el_node, len(ref_el_node)))
elif isinstance(ref_el_node, FleurCalc):
#extract from fleur calc TODO
self.ctx.ref_cl_energies[elem] = {}
elif isinstance(ref_el_node, ParameterData):
#extract from workflow output TODO
self.ctx.ref_cl_energies[elem] = {}
elif isinstance(ref_el_node, FleurinpData):
# add to calculations
#enforced parameters, add directly to run queue
self.ctx.ref_calcs_torun.append(ref_el_node)
#self.ctx.ref[elem] = ref_el
elif isinstance(ref_el_node, StructureData):
self.ctx.ref[elem] = ref_el_node
self.ctx.ref_calcs_torun.append(ref_el_node)
#elif isinstance(ref_el, initial_state_CLS):
# extract TODO
else:
error = ("ERROR: I do not know what to do with this given "
"reference {} for element {}".format(ref_el, elem))
#print(error)
self.report(error)
self.ctx.errors.append(error)
self.ctx.abort = True
elif struc_group:
#print('here, looking in group')
#print(elem, struc_group)
structure, report = get_ref_from_group(elem, struc_group)
if report:
self.report(report)
parameter, report = get_para_from_group(elem, para_group)
if structure | |
<filename>pymc3/tests/test_step.py<gh_stars>1-10
import shutil
import tempfile
from .checks import close_to
from .models import (simple_categorical, mv_simple, mv_simple_discrete,
mv_prior_simple, simple_2model_continuous)
from pymc3.sampling import assign_step_methods, sample
from pymc3.model import Model
from pymc3.step_methods import (NUTS, BinaryGibbsMetropolis, CategoricalGibbsMetropolis,
Metropolis, Slice, CompoundStep, NormalProposal,
MultivariateNormalProposal, HamiltonianMC,
EllipticalSlice, SMC, DEMetropolis)
from pymc3.theanof import floatX
from pymc3.distributions import (
Binomial, Normal, Bernoulli, Categorical, Beta, HalfNormal)
from numpy.testing import assert_array_almost_equal
import numpy as np
import numpy.testing as npt
import pytest
import theano
import theano.tensor as tt
from .helpers import select_by_precision
class TestStepMethods(object): # yield test doesn't work subclassing object
master_samples = {
Slice: np.array([ 0.10233528, 0.40458486, 0.17329217, 0.46281232, 0.22556278,
1.52632836, -0.27823807, 0.02539625, 1.02711735, 0.03686346,
-0.62841281, -0.27125083, 0.31989505, 0.84031155, -0.18949138,
1.60550262, 1.01375291, -0.29742941, 0.35312738, 0.43363622,
1.18898078, 0.80063888, 0.38445644, 0.90184395, 1.69150017,
2.05452171, -0.13334755, 1.61265408, 1.36579345, 1.3216292 ,
-0.59487037, -0.34648927, 1.05107285, 0.42870305, 0.61552257,
0.55239884, 0.13929271, 0.26213809, -0.2316028 , 0.19711046,
1.42832629, 1.93641434, -0.81142379, -0.31059485, -0.3189694 ,
1.43542534, 0.40311093, 1.63103768, 0.24034874, 0.33924866,
0.94951616, 0.71700185, 0.79273056, -0.44569146, 1.91974783,
0.84673795, 1.12411833, -0.83123811, -0.54310095, -0.00721347,
0.9925055 , 1.04015058, -0.34958074, -0.14926302, -0.47990225,
-0.75629446, -0.95942067, 1.68179204, 1.20598073, 1.39675733,
1.22755935, 0.06728757, 1.05184231, 1.01126791, -0.67327093,
0.21429651, 1.33730461, -1.56174184, -0.64348764, 0.98050636,
0.25923049, 0.58622631, 0.46589069, 1.44367347, -0.43141573,
1.08293374, -0.5563204 , 1.46287904, 1.26019815, 0.52972104,
1.08792687, 1.10064358, 1.84881549, 0.91179647, 0.69316592,
-0.47657064, 2.22747063, 0.83388935, 0.84680716, -0.10556406]),
HamiltonianMC: np.array([ 0.43733634, 0.43733634, 0.15955614, -0.44355329, 0.21465731,
0.30148244, 0.45527282, 0.45527282, 0.41753005, -0.03480236,
1.16599611, 0.565306 , 0.565306 , 0.0077143 , -0.18291321,
-0.14577946, -0.00703353, -0.00703353, 0.14345194, -0.12345058,
0.76875516, 0.76875516, 0.84289506, 0.24596225, 0.95287087,
1.3799335 , 1.1493899 , 1.1493899 , 2.0255982 , -0.77850273,
0.11604115, 0.11604115, 0.39296557, 0.34826491, 0.5951183 ,
0.63097341, 0.57938784, 0.57938784, 0.76570029, 0.63516046,
0.23667784, 2.0151377 , 1.92064966, 1.09125654, -0.43716787,
0.61939595, 0.30566853, 0.30566853, 0.3690641 , 0.3690641 ,
0.3690641 , 1.26497542, 0.90890334, 0.01482818, 0.01482818,
-0.15542473, 0.26475651, 0.32687263, 1.21902207, 0.6708017 ,
-0.18867695, -0.18867695, -0.07141329, -0.04631175, -0.16855462,
-0.16855462, 1.05455573, 0.47371825, 0.47371825, 0.86307077,
0.86307077, 0.51484125, 1.0022533 , 1.0022533 , 1.02370316,
0.71331829, 0.71331829, 0.71331829, 0.40758664, 0.81307434,
-0.46269741, -0.60284666, 0.06710527, 0.06710527, -0.35055053,
0.36727629, 0.36727629, 0.69350367, 0.11268647, 0.37681301,
1.10168386, 0.49559472, 0.49559472, 0.06193658, -0.07947103,
0.01969434, 1.28470893, -0.13536813, -0.13536813, 0.6575966 ]),
Metropolis: np.array([ 1.62434536, 1.01258895, 0.4844172 , 0.4844172 , 0.4844172 ,
0.4844172 , 0.4844172 , 0.4844172 , 0.4844172 , 0.4844172 ,
0.31198899, 0.31198899, 0.31198899, 0.31198899, 1.21284494,
0.52911708, 0.261229 , 0.79158447, 0.10441177, -0.74079387,
-0.74079387, -0.50637818, -0.50637818, -0.50637818, -0.45557042,
-0.45557042, -0.33541147, 0.28179164, 0.58196196, 0.22971211,
0.02081788, 0.60744107, 0.8930284 , 0.8930284 , 1.40595822,
1.10786538, 1.10786538, 1.10786538, 1.10786538, -0.28863095,
-0.12859388, 0.74757504, 0.74757504, 0.74757504, 0.97766977,
0.97766977, 0.75534163, 0.55458356, 0.75288328, 0.87189193,
0.9937132 , 0.9937132 , 0.61842825, 0.61842825, 0.27457457,
0.31817143, 0.31817143, 0.31817143, -0.77674042, -0.60735798,
0.13319847, -0.82050213, -0.82050213, -0.50534274, -0.15479676,
-0.15479676, -0.19349227, -0.19349227, -0.21810923, -0.21810923,
-0.21810923, 1.0180548 , -0.18121323, 0.68213209, 0.68213209,
1.23266958, 1.23266958, 0.60913885, 1.41099989, 1.45756718,
1.45756718, 1.45756718, 1.45756718, 1.59526839, 1.82776295,
1.82776295, 1.82776295, 1.82776295, 2.2691274 , 2.16897216,
2.18638157, 1.06436284, 0.54726838, 0.54726838, 1.04247971,
0.86777655, 0.86777655, 0.86777655, 0.86777655, 0.61914177]),
NUTS: np.array([ 0.550575 , 0.550575 , 0.80046332, 0.91590059, 1.34621916,
1.34621916, -0.63917773, -0.65770809, -0.65770809, -0.64512868,
-1.05448153, -0.5225666 , 0.14335153, -0.0034499 , -0.0034499 ,
0.05309212, -0.53186371, 0.29325825, 0.43210854, 0.56284837,
0.56284837, 0.38041767, 0.47322034, 0.49937368, 0.49937368,
0.44424258, 0.44424258, -0.02790848, -0.40470145, -0.35725567,
-0.43744228, 0.41955432, 0.31099421, 0.31099421, 0.65811717,
0.66649398, 0.38493786, 0.54114658, 0.54114658, 0.68222408,
0.66404942, 1.44143108, 1.15638799, -0.06775775, -0.06775775,
0.30418561, 0.23543403, 0.57934404, -0.5435111 , -0.47938915,
-0.23816662, 0.36793792, 0.36793792, 0.64980016, 0.52150456,
0.64643321, 0.26130179, 1.10569077, 1.10569077, 1.23662797,
-0.36928735, -0.14303069, 0.85298904, 0.85298904, 0.31422085,
0.32113762, 0.32113762, 1.0692238 , 1.0692238 , 1.60127576,
1.49249738, 1.09065107, 0.84264371, 0.84264371, -0.08832343,
0.04868027, -0.02679449, -0.02679449, 0.91989101, 0.65754478,
-0.39220625, 0.08379492, 1.03055634, 1.03055634, 1.71071332,
1.58740483, 1.67905741, 0.77744868, 0.15050587, 0.15050587,
0.73979127, 0.15445515, 0.13134717, 0.85068974, 0.85068974,
0.6974799 , 0.16170472, 0.86405959, 0.86405959, -0.22032854]),
SMC: np.array([ 5.10950205e-02, 1.09811720e+00, 1.78330202e-01, 6.85938766e-01,
1.42354476e-01, -1.59630758e+00, 1.57176810e+00, -4.01398917e-01,
1.14567871e+00, 1.14954938e+00, 4.94399840e-01, 1.16253017e+00,
1.17432244e+00, 7.79195162e-01, 1.29017945e+00, 2.53722905e-01,
5.38589898e-01, 3.52121216e-01, 1.35795966e+00, 1.02086933e-01,
1.58845251e+00, 6.76852927e-01, -1.04716592e-02, -1.01613324e-01,
1.37680965e+00, 7.40036542e-01, 2.89069320e-01, 1.48153741e+00,
9.58156958e-01, 5.73623782e-02, 7.68850721e-01, 3.68643390e-01,
1.47645964e+00, 2.32596780e-01, -1.85008158e-01, 3.71335958e-01,
2.68600102e+00, -4.89504443e-01, 6.54265561e-02, 3.80455349e-01,
1.17875338e+00, 2.30233324e-01, 6.90960231e-01, 8.81668685e-01,
-2.19754340e-01, 1.27686862e-01, 3.28444250e-01, 1.34820635e-01,
5.29725257e-01, 1.43783915e+00, -1.64754264e-01, 7.41446719e-01,
-1.17733186e+00, 6.01215658e-02, 1.82638158e-01, -2.23232214e-02,
-1.79877583e-02, 8.37949150e-01, 4.41964955e-01, -8.66524743e-01,
4.90738093e-01, 2.42056488e-01, 4.67699626e-01, 2.91075351e-01,
1.49541153e+00, 8.30730845e-01, 1.03956404e+00, -5.16162910e-01,
2.84338859e-01, 1.72305888e+00, 9.52445566e-01, 1.48831718e+00,
8.03455325e-01, 1.48840970e+00, 6.98122664e-01, 3.30187139e-01,
7.88029712e-01, 9.31510828e-01, 1.01326878e+00, 2.26637755e-01,
1.70703646e-01, -8.54429841e-01, 2.97254590e-01, -2.77843274e-01,
-2.25544207e-01, 1.98862826e-02, 5.05953885e-01, 4.98203941e-01,
1.20897382e+00, -6.32958669e-05, -7.22425896e-01, 1.60930869e+00,
-5.02773645e-01, 2.46405678e+00, 9.16039706e-01, 1.14146060e+00,
-1.95781984e-01, -2.44653942e-01, 2.67851290e-01, 2.37462012e-01,
6.71471950e-01, 1.18319765e+00, 1.29146530e+00, -3.14177753e-01,
-1.31041215e-02, 1.05029405e+00, 1.31202399e+00, 7.40532839e-02,
9.15510041e-01, 7.71054604e-01, 9.83483263e-01, 9.03032142e-01,
9.14191160e-01, 9.32285366e-01, 1.13937607e+00, -4.29155928e-01,
3.44609229e-02, -5.46423555e-02, 1.34625982e+00, -1.28287047e-01,
-1.55214879e-02, 3.25294234e-01, 1.06120585e+00, -5.09891282e-01,
1.25789335e+00, 1.01808348e+00, -9.92590713e-01, 1.72832932e+00,
1.12232980e+00, 8.54801892e-01, 1.41534752e+00, 3.50798405e-01,
3.69381623e-01, 1.48608411e+00, -1.15506310e-02, 1.57066360e+00,
2.00747378e-01, 4.47219763e-01, 5.57720524e-01, -7.74295353e-02,
1.79192501e+00, 7.66510475e-01, 1.38852488e+00, -4.06055122e-01,
2.73203156e-01, 3.61014687e-01, 1.23574043e+00, 1.64565746e-01,
-9.89896480e-02, 9.26130265e-02, 1.06440134e+00, -1.55890408e-01,
4.47131846e-01, -7.59186008e-01, -1.50881256e+00, -2.13928005e-01,
-4.19160151e-01, 1.75815544e+00, 7.45423008e-01, 6.94781506e-01,
1.58596346e+00, 1.75508724e+00, 4.56070434e-01, 2.94128709e-02,
1.17703970e+00, -9.90230827e-02, 8.42796845e-01, 1.79154944e+00,
5.92779197e-01, 2.73562285e-01, 1.61597907e+00, 1.23514403e+00,
4.86261080e-01, -3.10434934e-01, 5.57873722e-01, 6.50365217e-01,
-3.41009850e-01, 9.26851109e-01, 8.28936486e-01, 9.16180689e-02,
1.30226405e+00, 3.73945789e-01, 6.04560122e-02, 6.00698708e-01,
9.68764731e-02, 1.41904148e+00, 6.94182961e-03, 3.17504138e-01,
5.90956041e-01, -5.78113887e-01, 5.26615565e-01, -4.19715252e-01,
8.92891364e-01, 1.30207363e-01, 4.19899637e-01, 7.10275704e-01,
9.27418179e-02, 1.85758044e+00, 4.76988907e-01, -1.36341398e-01]),
}
def setup_class(self):
self.temp_dir = tempfile.mkdtemp()
def teardown_class(self):
shutil.rmtree(self.temp_dir)
@pytest.mark.xfail(condition=(theano.config.floatX == "float32"), reason="Fails on float32")
def test_sample_exact(self):
for step_method in self.master_samples:
self.check_trace(step_method)
def check_trace(self, step_method):
"""Tests whether the trace for step methods is exactly the same as on master.
Code changes that effect how random numbers are drawn may change this, and require
`master_samples` to be updated, but such changes should be noted and justified in the
commit.
This method may also be used to benchmark step methods across commits, by running, for
example
```
BENCHMARK=100000 ./scripts/test.sh -s pymc3/tests/test_step.py:TestStepMethods
```
on multiple commits.
"""
n_steps = 100
with Model() as model:
x = Normal('x', mu=0, sd=1)
y = Normal('y', mu=x, sd=1, observed=1)
if step_method.__name__ == 'SMC':
trace = sample(draws=200,
random_seed=1,
progressbar=False,
step=step_method())
elif step_method.__name__ == 'NUTS':
step = step_method(scaling=model.test_point)
trace = sample(0, tune=n_steps,
discard_tuned_samples=False,
step=step, random_seed=1, chains=1)
else:
trace = sample(0, tune=n_steps,
discard_tuned_samples=False,
step=step_method(), random_seed=1, chains=1)
assert_array_almost_equal(
trace['x'],
self.master_samples[step_method],
decimal=select_by_precision(float64=6, float32=4))
def check_stat(self, check, trace, name):
for (var, stat, value, bound) in check:
s = stat(trace[var][2000:], axis=0)
close_to(s, value, bound)
def test_step_continuous(self):
start, model, (mu, C) = mv_simple()
unc = np.diag(C) ** .5
check = (('x', np.mean, mu, unc / 10.),
('x', np.std, unc, unc / 10.))
with model:
steps = (
Slice(),
HamiltonianMC(scaling=C, is_cov=True, blocked=False),
NUTS(scaling=C, is_cov=True, blocked=False),
Metropolis(S=C, proposal_dist=MultivariateNormalProposal, blocked=True),
Slice(blocked=True),
HamiltonianMC(scaling=C, is_cov=True),
NUTS(scaling=C, is_cov=True),
CompoundStep([
HamiltonianMC(scaling=C, is_cov=True),
HamiltonianMC(scaling=C, is_cov=True, blocked=False)]),
)
for step in steps:
trace = sample(0, tune=8000, chains=1,
discard_tuned_samples=False, step=step,
start=start, model=model, random_seed=1)
self.check_stat(check, trace, step.__class__.__name__)
def test_step_discrete(self):
if theano.config.floatX == "float32":
return # Cannot use @skip because it only skips one iteration of the yield
start, model, (mu, C) = mv_simple_discrete()
unc = np.diag(C) ** .5
check = (('x', np.mean, mu, unc / 10.),
('x', np.std, unc, unc / 10.))
with model:
steps = (
Metropolis(S=C, proposal_dist=MultivariateNormalProposal),
)
for step in steps:
trace = sample(20000, tune=0, step=step, start=start, model=model,
random_seed=1, chains=1)
self.check_stat(check, trace, step.__class__.__name__)
def test_step_categorical(self):
start, model, (mu, C) = simple_categorical()
unc = C ** .5
check = (('x', np.mean, mu, unc / 10.),
('x', np.std, unc, unc / 10.))
with model:
steps = (
CategoricalGibbsMetropolis(model.x, proposal='uniform'),
CategoricalGibbsMetropolis(model.x, proposal='proportional'),
)
for step in steps:
trace = sample(8000, tune=0, step=step, start=start, model=model, random_seed=1)
self.check_stat(check, trace, step.__class__.__name__)
def test_step_elliptical_slice(self):
start, model, (K, L, mu, std, noise) = mv_prior_simple()
unc = noise ** 0.5
check = (('x', np.mean, mu, unc / 10.),
('x', np.std, std, unc / 10.))
with model:
steps = (
EllipticalSlice(prior_cov=K),
EllipticalSlice(prior_chol=L),
)
for step in steps:
trace = sample(5000, tune=0, step=step, start=start, model=model,
random_seed=1, chains=1)
self.check_stat(check, trace, step.__class__.__name__)
class TestMetropolisProposal(object):
def test_proposal_choice(self):
_, model, _ = mv_simple()
with model:
s = np.ones(model.ndim)
sampler = Metropolis(S=s)
assert isinstance(sampler.proposal_dist, NormalProposal)
s = np.diag(s)
sampler = Metropolis(S=s)
assert isinstance(sampler.proposal_dist, MultivariateNormalProposal)
s[0, 0] = -s[0, 0]
with pytest.raises(np.linalg.LinAlgError):
sampler = Metropolis(S=s)
def test_mv_proposal(self):
np.random.seed(42)
cov = np.random.randn(5, 5)
cov = cov.dot(cov.T)
prop = MultivariateNormalProposal(cov)
samples = np.array([prop() for _ in range(10000)])
npt.assert_allclose(np.cov(samples.T), cov, rtol=0.2)
class TestCompoundStep(object):
samplers = (Metropolis, Slice, HamiltonianMC, NUTS, DEMetropolis)
@pytest.mark.skipif(theano.config.floatX == "float32",
reason="Test fails on 32 bit due to linalg issues")
def test_non_blocked(self):
"""Test that samplers correctly create non-blocked compound steps."""
_, model = simple_2model_continuous()
with model:
for sampler in self.samplers:
assert isinstance(sampler(blocked=False), CompoundStep)
@pytest.mark.skipif(theano.config.floatX == "float32",
reason="Test fails on 32 bit due to linalg issues")
def test_blocked(self):
_, model = simple_2model_continuous()
with model:
for sampler in self.samplers:
sampler_instance = sampler(blocked=True)
assert not isinstance(sampler_instance, CompoundStep)
| |
<filename>tests/fake_data.py
from scipy.signal import find_peaks
from astropy.io import fits
from astropy import constants as c
from astropy import units as u
import numpy as np
import pylab as plt
from astropy.table import Table
import os
from src.BAGLE import model
from src.BAGLE import model_fitter
from src.BAGLE import plot_models
import time
import pdb
import pytest
from astropy.time import Time
from astropy.coordinates import solar_system_ephemeris, EarthLocation, spherical_to_cartesian, cartesian_to_spherical
from astropy.coordinates import get_body_barycentric, get_body, get_moon, get_body_barycentric_posvel
# Always generate the same fake data.
np.random.seed(0)
def fake_lightcurve_parallax_bulge(outdir='./casey_testing_stuff/'):
raL_in = 17.30 * 15. # Bulge R.A.
decL_in = -29.0
mL_in = 1.0 # msun
t0_in = 57100.0
xS0_in = np.array([0.000, 0.088e-3]) # arcsec
beta_in = 2.0 # mas same as p=0.4
muS_in = np.array([-5.0, 0.0])
muL_in = np.array([5.0, 0.0])
dL_in = 4000.0 # pc
dS_in = 8000.0 # pc
b_sff_in = 1.0
mag_src_in = 19.0
fake_lightcurve_parallax(raL_in, decL_in, mL_in, t0_in, xS0_in, beta_in,
muS_in, muL_in, dL_in, dS_in, b_sff_in,
mag_src_in,
outdir=outdir)
return
def fake_lightcurve_parallax(raL_in, decL_in, mL_in, t0_in, xS0_in, beta_in,
muS_in, muL_in, dL_in, dS_in, b_sff_in,
mag_src_in,
outdir=''):
if (outdir != '') and (outdir != None):
os.makedirs(outdir, exist_ok=True)
pspl_par_in = model.PSPL_PhotAstrom_Par_Param1(mL_in,
t0_in,
beta_in,
dL_in,
dL_in / dS_in,
xS0_in[0],
xS0_in[1],
muL_in[0],
muL_in[1],
muS_in[0],
muS_in[1],
[b_sff_in],
[mag_src_in],
raL=raL_in,
decL=decL_in)
pspl_in = model.PSPL_PhotAstrom_noPar_Param1(mL_in,
t0_in,
beta_in,
dL_in,
dL_in / dS_in,
xS0_in[0],
xS0_in[1],
muL_in[0],
muL_in[1],
muS_in[0],
muS_in[1],
[b_sff_in],
[mag_src_in])
# Simulate
t = np.linspace(55000, 59000, 2000)
imag_obs_par = pspl_par_in.get_photometry(t)
pos_obs_par = pspl_par_in.get_astrometry(t)
imag_obs = pspl_in.get_photometry(t)
pos_obs = pspl_in.get_astrometry(t)
fig = plt.figure(1)
plt.clf()
ax1 = plt.subplot(2, 1, 1)
ax2 = plt.subplot(2, 1, 2)
ax1.plot(t, imag_obs_par)
ax1.plot(t, imag_obs)
ax2.plot(t, imag_obs_par - imag_obs)
plt.xlabel('time')
plt.ylabel('mag')
plt.show()
plt.savefig(outdir + 'fig1.png')
fig = plt.figure(2)
plt.clf()
ax1 = plt.subplot(2, 1, 1)
ax2 = plt.subplot(2, 1, 2)
ax1.plot(t, pos_obs_par[:, 0])
ax1.plot(t, pos_obs[:, 0])
ax2.plot(t, pos_obs_par[:, 0] - pos_obs[:, 0])
plt.xlabel('time')
plt.ylabel('pos, 0')
plt.show()
plt.savefig(outdir + 'fig2.png')
fig = plt.figure(3)
plt.clf()
ax1 = plt.subplot(2, 1, 1)
ax2 = plt.subplot(2, 1, 2)
ax1.plot(t, pos_obs_par[:, 1])
ax1.plot(t, pos_obs[:, 1])
ax2.plot(t, pos_obs_par[:, 1] - pos_obs[:, 1])
plt.xlabel('time')
plt.ylabel('pos, 1')
plt.show()
plt.savefig(outdir + 'fig3.png')
return
def fake_data_parallax_bulge(outdir='test_mnest_bulge/'):
raL_in = 17.30 * 15. # Bulge R.A.
decL_in = -29.0
mL_in = 10.0 # msun
t0_in = 57000.0
xS0_in = np.array([0.000, 0.088e-3]) # arcsec
beta_in = 2.0 # mas same as p=0.4
muS_in = np.array([-5.0, 0.0])
muL_in = np.array([0.0, 0.0])
dL_in = 4000.0 # pc
dS_in = 8000.0 # pc
b_sff = 1.0
imag_in = 19.0
data, params = fake_data_parallax(raL_in, decL_in, mL_in, t0_in, xS0_in,
beta_in,
muS_in, muL_in, dL_in, dS_in, b_sff,
imag_in, outdir=outdir, target='Bulge', noise=False)
return data, params
def fake_data_parallax_lmc(outdir='test_mnest_lmc/'):
raL_in = 80.89375 # LMC R.A.
decL_in = -29.0 # LMC Dec. This is the sin \beta = -0.99 where \beta = ecliptic lat
mL_in = 10.0 # msun
t0_in = 57000.0
xS0_in = np.array([0.000, 0.088e-3]) # arcsec
beta_in = 2.0 # mas same as p=0.4
# muS_in = np.array([-2.0, 1.5])
# muL_in = np.array([0.0, 0.0])
muS_in = np.array([-5.0, 0.0])
muL_in = np.array([0.0, 0.0])
dL_in = 4000.0 # pc
dS_in = 8000.0 # pc
b_sff = 1.0
imag_in = 19.0
data, params = fake_data_parallax(raL_in, decL_in, mL_in, t0_in, xS0_in,
beta_in,
muS_in, muL_in, dL_in, dS_in, b_sff,
imag_in, outdir=outdir, target='LMC')
return data, params
def fake_data_parallax(raL_in, decL_in, mL_in, t0_in, xS0_in, beta_in,
muS_in, muL_in, dL_in, dS_in, b_sff_in, mag_src_in,
outdir='', target='Unknown', noise=True):
pspl_par_in = model.PSPL_PhotAstrom_Par_Param1(mL_in,
t0_in,
beta_in,
dL_in,
dL_in / dS_in,
xS0_in[0],
xS0_in[1],
muL_in[0],
muL_in[1],
muS_in[0],
muS_in[1],
b_sff=[b_sff_in],
mag_src=[mag_src_in],
raL=raL_in,
decL=decL_in)
# Simulate
# photometric observations every 1 day and
# astrometric observations every 14 days
# for the bulge observing window. Observations missed
# for 125 days out of 365 days for photometry and missed
# for 245 days out of 365 days for astrometry.
t_phot = np.array([], dtype=float)
t_ast = np.array([], dtype=float)
for year_start in np.arange(56000, 58000, 365.25):
phot_win = 240.0
phot_start = (365.25 - phot_win) / 2.0
t_phot_new = np.arange(year_start + phot_start,
year_start + phot_start + phot_win, 1)
t_phot = np.concatenate([t_phot, t_phot_new])
ast_win = 120.0
ast_start = (365.25 - ast_win) / 2.0
t_ast_new = np.arange(year_start + ast_start,
year_start + ast_start + ast_win, 14)
t_ast = np.concatenate([t_ast, t_ast_new])
# Make the photometric observations.
# Assume 0.05 mag photoemtric errors at I=19.
# This means Signal = 400 e- at I=19.
flux0 = 4000.0
imag0 = 19.0
imag_obs = pspl_par_in.get_photometry(t_phot)
imag_obs_err = np.zeros(len(t_phot))
if noise:
flux_obs = flux0 * 10 ** ((imag_obs - imag0) / -2.5)
flux_obs_err = flux_obs ** 0.5
flux_obs += np.random.randn(len(t_phot)) * flux_obs_err
imag_obs = -2.5 * np.log10(flux_obs / flux0) + imag0
imag_obs_err = 1.087 / flux_obs_err
# Make the astrometric observations.
# Assume 0.15 milli-arcsec astrometric errors in each direction at all epochs.
if noise:
pos_obs_tmp = pspl_par_in.get_astrometry(t_ast)
pos_obs_err = np.ones((len(t_ast), 2), dtype=float) * 0.01 * 1e-3
pos_obs = pos_obs_tmp + pos_obs_err * np.random.randn(len(t_ast), 2)
else:
pos_obs = pspl_par_in.get_astrometry(t_ast)
pos_obs_err = np.zeros((len(t_ast), 2))
data = {}
data['t_phot1'] = t_phot
data['mag1'] = imag_obs
data['mag_err1'] = imag_obs_err
data['phot_files'] = ['fake_data_parallax_phot1']
data['ast_files'] = ['fake_data_parallax_ast1']
data['t_ast1'] = t_ast
data['xpos1'] = pos_obs[:, 0]
data['ypos1'] = pos_obs[:, 1]
data['xpos_err1'] = pos_obs_err[:, 0]
data['ypos_err1'] = pos_obs_err[:, 1]
data['raL'] = raL_in
data['decL'] = decL_in
data['target'] = target
data['phot_data'] = 'sim'
data['ast_data'] = 'sim'
params = {}
params['raL'] = raL_in
params['decL'] = decL_in
params['mL'] = mL_in
params['t0'] = t0_in
params['xS0_E'] = xS0_in[0]
params['xS0_N'] = xS0_in[1]
params['beta'] = beta_in
params['muS_E'] = muS_in[0]
params['muS_N'] = muS_in[1]
params['muL_E'] = muL_in[0]
params['muL_N'] = muL_in[1]
params['dL'] = dL_in
params['dS'] = dS_in
params['b_sff'] = b_sff_in
params['mag_src'] = mag_src_in
# Extra parameters
params['dL_dS'] = params['dL'] / params['dS']
params['tE'] = pspl_par_in.tE
params['thetaE'] = pspl_par_in.thetaE_amp
params['piE_E'] = pspl_par_in.piE[0]
params['piE_N'] = pspl_par_in.piE[1]
params['u0_amp'] = pspl_par_in.u0_amp
params['muRel_E'] = pspl_par_in.muRel[0]
params['muRel_N'] = pspl_par_in.muRel[1]
# model_fitter.plot_photometry(data, pspl_par_in, dense_time=True)
# plt.figure(1)
# plt.title('Input Data and Model')
# plt.savefig(outdir + 'fake_data_phot.png')
#
# model_fitter.plot_astrometry(data, pspl_par_in, dense_time=True)
# plt.figure(2)
# plt.title('Input Data and Model')
# plt.savefig(outdir + 'fake_data_ast.png')
#
# plt.figure(3)
# plt.title('Input Data and Model')
# plt.savefig(outdir + 'fake_data_t_vs_E.png')
#
# plt.figure(4)
# plt.title('Input Data and Model')
# plt.savefig(outdir + 'fake_data_t_vs_N.png')
return data, params
def fake_data1(beta_sign=-1, plot=False, verbose=False):
# Input parameters
mL_in = 10.0 # msun
t0_in = 57000.00
xS0_in = np.array([0.000, 0.000])
beta_in = beta_sign * 0.4 # Einstein radii
muL_in = np.array([0.0, -7.0]) # Strong
# muL_in = np.array([-7.0, 0.0]) # Weak
muS_in = np.array([1.5, -0.5]) # mas/yr
dL_in = 4000.0
dS_in = 8000.0
b_sff_in = 1.0
mag_src_in = 19.0
pspl_in = model.PSPL_PhotAstrom_noPar_Param1(mL_in,
t0_in,
beta_in,
dL_in,
dL_in / dS_in,
xS0_in[0],
xS0_in[1],
muL_in[0],
muL_in[1],
muS_in[0],
muS_in[1],
[b_sff_in],
[mag_src_in])
if verbose:
print('Photometry Parameters: ')
print('t0 = ', pspl_in.t0)
print('u0 = ', pspl_in.u0_amp)
print('tE = ', pspl_in.tE)
print('piE_E = ', pspl_in.piE[0])
print('piE_N = ', pspl_in.piE[1])
print('b_sff = ', pspl_in.b_sff)
print('mag_src = ', pspl_in.mag_src)
print('Astrometry Parameters: ')
print('mL = ', pspl_in.t0)
print('beta = ', pspl_in.u0_amp)
print('dL = ', pspl_in.tE)
print('dS = ', pspl_in.piE[0])
print('xS0_E = ', pspl_in.xS0[0])
print('xS0_N = ', pspl_in.xS0[1])
print('muL_E = ', pspl_in.muL[0])
print('muL_N = ', pspl_in.muL[1])
print('muS_E = ', pspl_in.muS[0])
print('muS_N = ', pspl_in.muS[1])
print('muRel_E = ', pspl_in.muRel[0])
print('muRel_N = ', pspl_in.muRel[1])
# Simulate
# photometric observations every 1 day and
# astrometric observations every 14 days
# for the bulge observing window. Observations missed
# for 125 days out of 365 days for photometry and missed
# for 245 days out of 365 days for astrometry.
t_phot = np.array([], dtype=float)
t_ast = np.array([], dtype=float)
for year_start in np.arange(54000, 60000, 365.25):
phot_win = 240.0
phot_start = (365.25 - phot_win) / 2.0
t_phot_new = np.arange(year_start + phot_start,
year_start + phot_start + phot_win, 1)
t_phot = np.concatenate([t_phot, t_phot_new])
ast_win = 120.0
ast_start = (365.25 - ast_win) / 2.0
t_ast_new = np.arange(year_start + ast_start,
year_start + ast_start + ast_win, 14)
t_ast = np.concatenate([t_ast, t_ast_new])
t_mod = np.arange(t_phot.min(), t_phot.max(), 1)
A = pspl_in.get_amplification(t_phot)
shift = pspl_in.get_centroid_shift(t_ast)
dt_phot = t_phot - pspl_in.t0
dt_ast = t_ast - pspl_in.t0
# Make the photometric observations.
# Assume 0.05 mag photoemtric errors at I=19.
# This means Signal = 400 e- at I=19.
flux0 = 400.0
imag0 = 19.0
# flux_in = flux0 * 10**((imag_in - imag0) / -2.5)
# flux_obs = flux_in * A
# flux_obs_err = flux_obs**0.5
# flux_obs += np.random.randn(len(t_phot)) * flux_obs_err
# imag_obs = -2.5 * np.log10(flux_obs / flux0) + imag0
# imag_obs_err = 1.087 / flux_obs_err
| |
f'be read. Invalidating rotor.'
logger.error(message)
break
trajectory = parser.parse_1d_scan_coords(path=job.local_path_to_output_file) \
if self.species_dict[label].is_ts else None
invalidate, invalidation_reason, message, actions = scan_quality_check(
label=label,
pivots=job.pivots,
energies=energies,
scan_res=job.scan_res,
used_methods=self.species_dict[label].rotors_dict[i]['trsh_methods'],
log_file=job.local_path_to_output_file,
species=self.species_dict[label],
preserve_params=self.species_dict[label].preserve_param_in_scan,
trajectory=trajectory,
original_xyz=self.species_dict[label].final_xyz,
)
if len(actions):
# the rotor scan is problematic, troubleshooting is required
logger.info(f'Trying to troubleshoot rotor {job.pivots} of {label} ...')
# Try to troubleshoot the rotor. sometimes, troubleshooting cannot yield solutions
# actions from scan_quality_check() is not the actual actions applied,
# they will be post-processed by trsh_scan_job. If troubleshooting fails,
# The actual actions will be an empty list, indicating invalid rotor.
trsh_success, actions = self.troubleshoot_scan_job(job=job, methods=actions)
if not trsh_success:
# Detailed reasons are logged in the troubleshoot_scan_job()
invalidation_reason += ' But unable to propose troubleshooting methods.'
else:
# record actions, only if the method is valid
self.species_dict[label].rotors_dict[i]['trsh_methods'].append(actions)
if not invalidate:
# the rotor scan is good, calculate the symmetry number
self.species_dict[label].rotors_dict[i]['success'] = True
self.species_dict[label].rotors_dict[i]['symmetry'] = determine_rotor_symmetry(
label=label, pivots=self.species_dict[label].rotors_dict[i]['pivots'],
rotor_path=job.local_path_to_output_file)[0]
logger.info('Rotor scan {scan} between pivots {pivots} for {label} has symmetry {symmetry}'.format(
scan=self.species_dict[label].rotors_dict[i]['scan'],
pivots=self.species_dict[label].rotors_dict[i]['pivots'],
label=label, symmetry=self.species_dict[label].rotors_dict[i]['symmetry']))
break
else:
raise SchedulerError(f'Could not match rotor with pivots {job.pivots} in species {label}')
# This is a bad rotor scan
if invalidate:
self.species_dict[label].rotors_dict[i]['success'] = None if len(actions) else False
# Better to save the path and invalidation reason for debugging and tracking the file
# if ``success`` is None, it means that the job is being troubleshooted
self.species_dict[label].rotors_dict[i]['scan_path'] = job.local_path_to_output_file
self.species_dict[label].rotors_dict[i]['invalidation_reason'] += invalidation_reason
# If energies were obtained, draw the scan curve
if energies is not None and len(energies):
folder_name = 'rxns' if job.is_ts else 'Species'
rotor_path = os.path.join(self.project_directory, 'output', folder_name, job.species_name, 'rotors')
plotter.plot_1d_rotor_scan(angles=angles,
energies=energies,
path=rotor_path,
scan=job.scan,
comment=message,
label=label,
original_dihedral=self.species_dict[label].rotors_dict[i]['original_dihedrals'],
)
# Save the restart dictionary
self.save_restart_dict()
def check_directed_scan(self, label, pivots, scan, energies):
"""
Checks (QA) whether the directed scan is relatively "smooth",
and whether the optimized geometry indeed represents the minimum energy conformer.
Recommends whether or not to use this rotor using the 'successful_rotors' and 'unsuccessful_rotors' attributes.
This method differs from check_directed_scan_job(), since here we consider the entire scan.
Args:
label (str): The species label.
pivots (list): The rotor pivots.
scan (list): The four atoms defining the dihedral.
energies (list): The rotor scan energies in kJ/mol.
Todo:
- Not used!!
- adjust to ND, merge with check_directed_scan_job (this one isn't being called)
"""
# If the job has not converged, troubleshoot
invalidate, invalidation_reason, message, actions = scan_quality_check(label=label,
pivots=pivots,
energies=energies)
if actions:
# the rotor scan is problematic, troubleshooting is required
if 'change conformer' in actions:
# a lower conformation was found
deg_increment = actions[1]
self.species_dict[label].set_dihedral(scan=scan, deg_increment=deg_increment)
is_isomorphic = self.species_dict[label].check_xyz_isomorphism(
allow_nonisomorphic_2d=self.allow_nonisomorphic_2d,
xyz=self.species_dict[label].initial_xyz)
if is_isomorphic:
self.delete_all_species_jobs(label)
# Remove all completed rotor calculation information
for rotor_dict in self.species_dict[label].rotors_dict.values():
# don't initialize all parameters, e.g., `times_dihedral_set` needs to remain as is
rotor_dict['scan_path'] = ''
rotor_dict['invalidation_reason'] = ''
rotor_dict['success'] = None
rotor_dict.pop('symmetry', None)
# re-run opt (or composite) on the new initial_xyz with the desired dihedral
if not self.composite_method:
self.run_opt_job(label, fine=self.fine_only)
else:
self.run_composite_job(label)
else:
# The conformer is wrong, and changing the dihedral resulted in a non-isomorphic species.
self.output[label]['errors'] += f'A lower conformer was found for {label} via a torsion mode, ' \
f'but it is not isomorphic with the 2D graph representation ' \
f'{self.species_dict[label].mol.copy(deep=True).to_smiles()}. ' \
f'Not calculating this species.'
self.output[label]['conformers'] += 'Unconverged'
self.output[label]['convergence'] = False
else:
logger.error(f'Directed scan for species {label} for pivots {pivots} failed with: '
f'{invalidation_reason}. Currently rotor troubleshooting methods do not apply for '
f'directed scans. Not troubleshooting rotor.')
for rotor_dict in self.species_dict[label].rotors_dict.values():
if rotor_dict['pivots'] == pivots:
rotor_dict['scan_path'] = ''
rotor_dict['invalidation_reason'] = invalidation_reason
rotor_dict['success'] = False
else:
# the rotor scan is good, calculate the symmetry number
for rotor_dict in self.species_dict[label].rotors_dict.values():
if rotor_dict['pivots'] == pivots:
if not invalidate:
rotor_dict['success'] = True
rotor_dict['symmetry'] = determine_rotor_symmetry(label=label,
pivots=pivots,
energies=energies)[0]
logger.info(f'Rotor scan {scan} between pivots {pivots} for {label} has symmetry '
f'{rotor_dict["symmetry"]}')
else:
rotor_dict['success'] = False
# Save the restart dictionary
self.save_restart_dict()
def check_directed_scan_job(self, label, job):
"""
Check that a directed scan job for a specific dihedral angle converged successfully, otherwise troubleshoot.
rotors_dict structure (attribute of ARCSpecies)::
rotors_dict: {1: {'pivots': ``list``,
'top': ``list``,
'scan': ``list``,
'number_of_running_jobs': ``int``,
'success': ``bool``,
'invalidation_reason': ``str``,
'times_dihedral_set': ``int``,
'scan_path': <path to scan output file>,
'max_e': ``float``, # in kJ/mol,
'symmetry': ``int``,
'dimensions': ``int``,
'original_dihedrals': ``list``,
'cont_indices': ``list``,
'directed_scan_type': ``str``,
'directed_scan': ``dict``, # keys: tuples of dihedrals as strings,
# values: dicts of energy, xyz, is_isomorphic, trsh
}
2: {}, ...
}
Args:
label (str): The species label.
job (Job): The rotor scan job object.
"""
if job.job_status[1]['status'] == 'done':
xyz = parser.parse_geometry(path=job.local_path_to_output_file)
is_isomorphic = self.species_dict[label].check_xyz_isomorphism(xyz=xyz, verbose=False)
for rotor_dict in self.species_dict[label].rotors_dict.values():
if rotor_dict['pivots'] == job.pivots:
key = tuple(f'{dihedral:.2f}' for dihedral in job.directed_dihedrals)
rotor_dict['directed_scan'][key] = {'energy': parser.parse_e_elect(
path=job.local_path_to_output_file),
'xyz': xyz,
'is_isomorphic': is_isomorphic,
'trsh': job.ess_trsh_methods,
}
else:
self.troubleshoot_ess(label=label,
job=job,
level_of_theory=self.scan_level)
def check_all_done(self, label):
"""
Check that we have all required data for the species/TS.
Args:
label (str): The species label.
"""
all_converged = True
for job_type, spawn_job_type in self.job_types.items():
if spawn_job_type and not self.output[label]['job_types'][job_type] \
and not ((self.species_dict[label].is_ts and job_type in ['scan', 'conformers'])
or (self.species_dict[label].number_of_atoms == 1
and job_type in ['conformers', 'opt', 'fine', 'freq', 'rotors', 'bde'])
or job_type == 'bde' and self.species_dict[label].bdes is None
or job_type == 'conformers'):
logger.debug(f'Species {label} did not converge')
all_converged = False
break
if all_converged:
self.output[label]['convergence'] = True
if self.species_dict[label].is_ts:
self.species_dict[label].make_ts_report()
logger.info(self.species_dict[label].ts_report + '\n')
zero_delta = datetime.timedelta(0)
conf_time = extremum_list([job.run_time for job in self.job_dict[label]['conformers'].values()],
return_min=False) \
if 'conformers' in self.job_dict[label] else zero_delta
opt_time = sum_time_delta([job.run_time for job in self.job_dict[label]['opt'].values()]) \
if 'opt' in self.job_dict[label] else zero_delta
comp_time = sum_time_delta([job.run_time for job in self.job_dict[label]['composite'].values()]) \
if 'composite' in self.job_dict[label] else zero_delta
other_time = extremum_list([sum_time_delta([job.run_time for job in job_dictionary.values()])
for job_type, job_dictionary in self.job_dict[label].items()
if job_type not in ['conformers', 'opt', 'composite']], return_min=False) \
if any([job_type not in ['conformers', 'opt', 'composite']
for job_type in self.job_dict[label].keys()]) else zero_delta
self.species_dict[label].run_time = self.species_dict[label].run_time \
or (conf_time or zero_delta) + (opt_time or zero_delta) \
+ (comp_time or zero_delta) + (other_time or zero_delta)
logger.info(f'\nAll jobs for species {label} successfully converged. '
f'Run time: {self.species_dict[label].run_time}')
else:
job_type_status = {key: val for key, val in self.output[label]['job_types'].items()
if key in self.job_types and self.job_types[key]}
logger.error(f'Species {label} did not converge. Job type status is: {job_type_status}')
# Update restart dictionary and save the yaml restart file:
self.save_restart_dict()
def get_servers_jobs_ids(self):
"""
Check status on all active servers, return a list of relevant running job IDs
"""
self.servers_jobs_ids = list()
for server in self.servers:
if server != 'local':
with SSHClient(server) as ssh:
self.servers_jobs_ids.extend(ssh.check_running_jobs_ids())
else:
self.servers_jobs_ids.extend(check_running_jobs_ids())
def troubleshoot_negative_freq(self, label, job):
"""
Troubleshooting cases where non-TS species have negative frequencies.
Run newly generated conformers.
Args:
label (str): The species label.
job (Job): The frequency job object.
"""
current_neg_freqs_trshed, confs, output_errors, output_warnings = trsh_negative_freq(
label=label, log_file=job.local_path_to_output_file,
neg_freqs_trshed=self.species_dict[label].neg_freqs_trshed, job_types=self.job_types)
self.species_dict[label].neg_freqs_trshed.extend(current_neg_freqs_trshed)
for output_error in output_errors:
self.output[label]['errors'] += output_error
if 'Invalidating species' in output_error:
logger.info(f'Deleting all currently running jobs for species {label}...')
self.delete_all_species_jobs(label)
self.output[label]['convergence'] = False
for output_warning in output_warnings:
self.output[label]['warnings'] += output_warning
if len(confs):
logger.info(f'Deleting all currently running jobs for species {label} before troubleshooting for '
f'negative frequency...')
self.delete_all_species_jobs(label)
self.species_dict[label].conformers = confs
self.species_dict[label].conformer_energies = [None] * len(confs)
self.job_dict[label]['conformers'] = dict() # initialize the conformer job dictionary
for i, xyz in enumerate(self.species_dict[label].conformers):
self.run_job(label=label, xyz=xyz, level_of_theory=self.conformer_level, job_type='conformer',
conformer=i)
def troubleshoot_scan_job(self,
job: Job,
methods: Optional[dict] = None,
) -> Tuple[bool, dict]:
"""
Troubleshooting rotor scans
Using the following methods:
1. freeze: freezing specific internal coordinates or all torsions other than the scan's pivots
2. inc_res: increasing the scan resolution.
3. change conformer: changing to a conformer with a lower energy
Args:
job (Job): The scan Job object.
methods (dict): The troubleshooting method/s to try::
{'freeze': <a list of problematic internal coordinates>,
'inc_res': ``None``,
'change conformer': <a xyz dict>}
Returns: Tuple[bool, dict]:
- ``True`` if the troubleshooting is valid.
- The actions are actual applied in the troubleshooting.
"""
label = job.species_name
trsh_success = False
actual_actions = dict() # If troubleshooting fails, there will be no action
# Read used troubleshooting methods
for rotor in | |
#! /usr/bin/env python
#
# mosaic.py -- Example of quick and dirty mosaicing of FITS images
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
"""
Usage:
$ ./mosaic.py -o output.fits input1.fits input2.fits ... inputN.fits
"""
import sys
import os
import math
import numpy as np
from ginga import AstroImage, trcalc
from ginga.util import wcs, loader, dp, iqcalc
from ginga.util import io_fits
from ginga.misc import log
def mosaic_inline(baseimage, imagelist, bg_ref=None, trim_px=None,
merge=False, allow_expand=True, expand_pad_deg=0.01,
max_expand_pct=None,
update_minmax=True, suppress_callback=False):
"""Drops new images into the image `baseimage` (if there is room),
relocating them according the WCS between the two images.
"""
# Get our own (mosaic) rotation and scale
header = baseimage.get_header()
((xrot_ref, yrot_ref),
(cdelt1_ref, cdelt2_ref)) = wcs.get_xy_rotation_and_scale(header)
scale_x, scale_y = math.fabs(cdelt1_ref), math.fabs(cdelt2_ref)
# drop each image in the right place in the new data array
mydata = baseimage._get_data()
count = 1
res = []
for image in imagelist:
name = image.get('name', 'image%d' % (count))
count += 1
data_np = image._get_data()
if 0 in data_np.shape:
baseimage.logger.info("Skipping image with zero length axis")
continue
# Calculate sky position at the center of the piece
ctr_x, ctr_y = trcalc.get_center(data_np)
ra, dec = image.pixtoradec(ctr_x, ctr_y)
# User specified a trim? If so, trim edge pixels from each
# side of the array
ht, wd = data_np.shape[:2]
if trim_px:
xlo, xhi = trim_px, wd - trim_px
ylo, yhi = trim_px, ht - trim_px
data_np = data_np[ylo:yhi, xlo:xhi, ...]
ht, wd = data_np.shape[:2]
# If caller asked us to match background of pieces then
# get the median of this piece
if bg_ref is not None:
bg = iqcalc.get_median(data_np)
bg_inc = bg_ref - bg
data_np = data_np + bg_inc
# Determine max/min to update our values
if update_minmax:
maxval = np.nanmax(data_np)
minval = np.nanmin(data_np)
baseimage.maxval = max(baseimage.maxval, maxval)
baseimage.minval = min(baseimage.minval, minval)
# Get rotation and scale of piece
header = image.get_header()
((xrot, yrot),
(cdelt1, cdelt2)) = wcs.get_xy_rotation_and_scale(header)
baseimage.logger.debug("image(%s) xrot=%f yrot=%f cdelt1=%f "
"cdelt2=%f" % (name, xrot, yrot, cdelt1, cdelt2))
# scale if necessary
# TODO: combine with rotation?
if (not np.isclose(math.fabs(cdelt1), scale_x) or
not np.isclose(math.fabs(cdelt2), scale_y)):
nscale_x = math.fabs(cdelt1) / scale_x
nscale_y = math.fabs(cdelt2) / scale_y
baseimage.logger.debug("scaling piece by x(%f), y(%f)" % (
nscale_x, nscale_y))
data_np, (ascale_x, ascale_y) = trcalc.get_scaled_cutout_basic(
data_np, 0, 0, wd - 1, ht - 1, nscale_x, nscale_y,
logger=baseimage.logger)
# Rotate piece into our orientation, according to wcs
rot_dx, rot_dy = xrot - xrot_ref, yrot - yrot_ref
flip_x = False
flip_y = False
# Optomization for 180 rotations
if (np.isclose(math.fabs(rot_dx), 180.0) or
np.isclose(math.fabs(rot_dy), 180.0)):
rotdata = trcalc.transform(data_np,
flip_x=True, flip_y=True)
rot_dx = 0.0
rot_dy = 0.0
else:
rotdata = data_np
# Finish with any necessary rotation of piece
if not np.isclose(rot_dy, 0.0):
rot_deg = rot_dy
baseimage.logger.debug("rotating %s by %f deg" % (name, rot_deg))
rotdata = trcalc.rotate(rotdata, rot_deg,
#rotctr_x=ctr_x, rotctr_y=ctr_y
logger=baseimage.logger)
# Flip X due to negative CDELT1
if np.sign(cdelt1) != np.sign(cdelt1_ref):
flip_x = True
# Flip Y due to negative CDELT2
if np.sign(cdelt2) != np.sign(cdelt2_ref):
flip_y = True
if flip_x or flip_y:
rotdata = trcalc.transform(rotdata,
flip_x=flip_x, flip_y=flip_y)
# Get size and data of new image
ht, wd = rotdata.shape[:2]
ctr_x, ctr_y = trcalc.get_center(rotdata)
# Find location of image piece (center) in our array
x0, y0 = baseimage.radectopix(ra, dec)
# Merge piece as closely as possible into our array
# Unfortunately we lose a little precision rounding to the
# nearest pixel--can't be helped with this approach
x0, y0 = int(np.round(x0)), int(np.round(y0))
baseimage.logger.debug("Fitting image '%s' into mosaic at %d,%d" % (
name, x0, y0))
# This is for useful debugging info only
my_ctr_x, my_ctr_y = trcalc.get_center(mydata)
off_x, off_y = x0 - my_ctr_x, y0 - my_ctr_y
baseimage.logger.debug("centering offsets: %d,%d" % (off_x, off_y))
# Sanity check piece placement
xlo, xhi = x0 - ctr_x, x0 + wd - ctr_x
ylo, yhi = y0 - ctr_y, y0 + ht - ctr_y
assert (xhi - xlo == wd), \
Exception("Width differential %d != %d" % (xhi - xlo, wd))
assert (yhi - ylo == ht), \
Exception("Height differential %d != %d" % (yhi - ylo, ht))
mywd, myht = baseimage.get_size()
if xlo < 0 or xhi > mywd or ylo < 0 or yhi > myht:
if not allow_expand:
raise Exception("New piece doesn't fit on image and "
"allow_expand=False")
# <-- Resize our data array to allow the new image
# determine amount to pad expansion by
expand_x = max(int(expand_pad_deg / scale_x), 0)
expand_y = max(int(expand_pad_deg / scale_y), 0)
nx1_off, nx2_off = 0, 0
if xlo < 0:
nx1_off = abs(xlo) + expand_x
if xhi > mywd:
nx2_off = (xhi - mywd) + expand_x
xlo, xhi = xlo + nx1_off, xhi + nx1_off
ny1_off, ny2_off = 0, 0
if ylo < 0:
ny1_off = abs(ylo) + expand_y
if yhi > myht:
ny2_off = (yhi - myht) + expand_y
ylo, yhi = ylo + ny1_off, yhi + ny1_off
new_wd = mywd + nx1_off + nx2_off
new_ht = myht + ny1_off + ny2_off
# sanity check on new mosaic size
old_area = mywd * myht
new_area = new_wd * new_ht
expand_pct = new_area / old_area
if ((max_expand_pct is not None) and
(expand_pct > max_expand_pct)):
raise Exception("New area exceeds current one by %.2f %%;"
"increase max_expand_pct (%.2f) to allow" %
(expand_pct * 100, max_expand_pct))
# go for it!
new_data = np.zeros((new_ht, new_wd))
# place current data into new data
new_data[ny1_off:ny1_off + myht, nx1_off:nx1_off + mywd] = \
mydata
baseimage._data = new_data
mydata = new_data
if (nx1_off > 0) or (ny1_off > 0):
# Adjust our WCS for relocation of the reference pixel
crpix1, crpix2 = baseimage.get_keywords_list('CRPIX1', 'CRPIX2')
kwds = dict(CRPIX1=crpix1 + nx1_off,
CRPIX2=crpix2 + ny1_off,
NAXIS1=new_wd, NAXIS2=new_ht)
baseimage.update_keywords(kwds)
# fit image piece into our array
try:
if merge:
mydata[ylo:yhi, xlo:xhi, ...] += rotdata[0:ht, 0:wd, ...]
else:
idx = (mydata[ylo:yhi, xlo:xhi, ...] == 0.0)
mydata[ylo:yhi, xlo:xhi, ...][idx] = \
rotdata[0:ht, 0:wd, ...][idx]
except Exception as e:
baseimage.logger.error("Error fitting tile: %s" % (str(e)))
raise
res.append((xlo, ylo, xhi, yhi))
# TODO: recalculate min and max values
# Can't use usual techniques because it adds too much time to the
# mosacing
#baseimage._set_minmax()
# Notify watchers that our data has changed
if not suppress_callback:
baseimage.make_callback('modified')
return res
def mosaic(logger, itemlist, fov_deg=None):
"""
Parameters
----------
logger : logger object
a logger object passed to created AstroImage instances
itemlist : sequence like
a sequence of either filenames or AstroImage instances
"""
if isinstance(itemlist[0], AstroImage.AstroImage):
image0 = itemlist[0]
name = image0.get('name', 'image0')
else:
# Assume it is a file and load it
filepath = itemlist[0]
logger.info("Reading file '%s' ..." % (filepath))
image0 = loader.load_data(filepath, logger=logger)
name = filepath
ra_deg, dec_deg = image0.get_keywords_list('CRVAL1', 'CRVAL2')
header = image0.get_header()
(rot_deg, cdelt1, cdelt2) = wcs.get_rotation_and_scale(header)
logger.debug("image0 rot=%f cdelt1=%f cdelt2=%f" % (rot_deg,
cdelt1, cdelt2))
px_scale = math.fabs(cdelt1)
expand = False
if fov_deg is None:
# TODO: calculate fov?
expand = True
cdbase = [np.sign(cdelt1), np.sign(cdelt2)]
img_mosaic = dp.create_blank_image(ra_deg, dec_deg,
fov_deg, px_scale, rot_deg,
cdbase=cdbase,
logger=logger)
header = img_mosaic.get_header()
(rot, cdelt1, cdelt2) = wcs.get_rotation_and_scale(header)
logger.debug("mosaic rot=%f cdelt1=%f cdelt2=%f" % (rot, cdelt1, cdelt2))
logger.debug("Processing '%s' ..." % (name))
tup = mosaic_inline(img_mosaic, [image0], allow_expand=expand)
logger.debug("placement %s" % (str(tup)))
count = 1
for item in itemlist[1:]:
if isinstance(item, AstroImage.AstroImage):
image = item
else:
# Create and load the image
filepath = item
logger.info("Reading file '%s' ..." % (filepath))
image = io_fits.load_file(filepath, logger=logger)
name = image.get('name', 'image%d' % (count))
logger.debug("Inlining '%s' ..." % (name))
tup = mosaic_inline(img_mosaic, [image])
logger.debug("placement %s" % (str(tup)))
count += 1
logger.info("Done.")
return img_mosaic
def main(options, args):
logger = log.get_logger(name="mosaic", options=options)
img_mosaic = mosaic(logger, args, fov_deg=options.fov)
if options.outfile:
outfile = options.outfile
io_fits.use('astropy')
logger.info("Writing output to '%s'..." % (outfile))
try:
os.remove(outfile)
except OSError:
pass
img_mosaic.save_as_file(outfile)
if __name__ == "__main__":
# Parse command line options
from argparse import ArgumentParser
argprs = ArgumentParser()
argprs.add_argument("--debug", dest="debug", default=False,
action="store_true",
help="Enter the pdb debugger on main()")
argprs.add_argument("--fov", dest="fov", metavar="DEG",
type=float,
help="Set output field of view")
argprs.add_argument("--log", dest="logfile", metavar="FILE",
help="Write logging output to FILE")
argprs.add_argument("--loglevel", dest="loglevel", metavar="LEVEL",
type=int,
help="Set logging level to LEVEL")
argprs.add_argument("-o", "--outfile", | |
'concentration'), **kwargs)
def plot_displacement(self, recording_step, file_name=None, **kwargs):
if file_name is None:
file_name = "displacement_%04d.png"%recording_step
plot_params = {"label" : "displacement",
"title" : "displacement @ step %04d"%recording_step}
kwargs.update(plot_params)
self.plot(recording_step=recording_step, file_name=file_name, subspace_name="displacement",
output_dir=os.path.join(self.output_dir, 'displacement'), **kwargs)
def plot_all(self, recording_step):
if self._results._functionspace.has_subspaces:
for subspace_name in self._results._functionspace.subspaces.get_subspace_names():
plot_name = subspace_name+"_%04d.png"%recording_step
plot_params = {"label": subspace_name,
"title": "%s @ step %04d" % (subspace_name, recording_step)}
self.plot(recording_step=recording_step, file_name=plot_name, subspace_name=subspace_name,
output_dir=os.path.join(self.output_dir, subspace_name), **plot_params)
class PostProcess(ABC):
def __init__(self, results, params, output_dir=config.output_dir_simulation_tmp, plot_params={}):
"""
Init routine.
:param results: Instance of Results.
"""
self.logger = logging.getLogger(__name__)
self._results = results
self._params = params
self._functionspace = self._results._functionspace
self._subdomains = self._results._subdomains
self._mesh = self._functionspace._mesh
self._projection_parameters = self._functionspace._projection_parameters
self.set_output_dir(output_dir)
self.plot_params = { "showmesh": False,
"contour": False,
"exclude_min_max": False,
"colormap":'viridis',
"n_cmap_levels" : 20,
"dpi" : 300,
"alpha" : 1,
"alpha_f" : 1,
"shading" : "gouraud"}
self.update_plot_params(plot_params)
def update_plot_params(self, plot_params={}):
self.plot_params.update(plot_params)
def set_output_dir(self, output_dir):
self.output_dir = output_dir
fu.ensure_dir_exists(self.output_dir)
def get_output_dir(self):
if hasattr(self, 'output_dir'):
return self.output_dir
else:
self.logger.warning("No output directory has been defined. Specify 'output_dir'")
def get_solution_displacement(self, recording_step=None):
return self._results.get_solution_function(subspace_name='displacement', recording_step=recording_step)
def get_solution_concentration(self, recording_step=None):
return self._results.get_solution_function(subspace_name='concentration', recording_step=recording_step)
def get_strain_tensor(self, recording_step=None):
VT = fenics.TensorFunctionSpace(self._mesh, "Lagrange", 1)
displacement = self.get_solution_displacement(recording_step=recording_step)
strain_tensor = mle.compute_strain(displacement)
strain_tensor_fct = fenics.project(strain_tensor, VT, **self._projection_parameters)
strain_tensor_fct.rename("strain_tensor", "")
return strain_tensor_fct
@abstractmethod
def get_stress_tensor(self, recording_step=None):
pass
@abstractmethod
def get_logistic_growth(self, recording_step=None):
pass
@abstractmethod
def get_mech_expansion(self, recording_step=None):
pass
def get_pressure(self, recording_step=None):
stress = self.get_stress_tensor(recording_step=recording_step)
pressure = mle.compute_pressure_from_stress_tensor(stress)
F = fenics.FunctionSpace(self._mesh, "Lagrange", 1)
pressure_fct = fenics.project(pressure, F, **self._projection_parameters)
pressure_fct.rename("pressure", '')
return pressure_fct
def get_van_mises_stress(self, recording_step=None):
stress = self.get_stress_tensor(recording_step=recording_step)
van_mises_stress = mle.compute_van_mises_stress(stress, self._functionspace.dim_geo)
F = fenics.FunctionSpace(self._mesh, "Lagrange", 1)
van_mises_stress_fct = fenics.project(van_mises_stress, F, **self._projection_parameters)
van_mises_stress_fct.rename("pressure", '')
return van_mises_stress_fct
def compute_force(self, recording_step=None, subdomain_id=None):
n = fenics.FacetNormal(self._mesh)
stress_tensor = self.get_stress_tensor(recording_step=recording_step)
traction = fenics.dot(stress_tensor, n)
dss = self._results._subdomains.ds
if (subdomain_id is not None):
dss = dss(subdomain_id)
force = [fenics.assemble(traction[i] * dss) for i in range(traction.ufl_shape[0])]
return force
def get_displacement_norm(self, recording_step=None):
displacement = self.get_solution_displacement(recording_step=recording_step)
disp_norm = fenics.inner(displacement,displacement)**0.5
F = fenics.FunctionSpace(self._mesh, "Lagrange", 1)
disp_norm_fct = fenics.project(disp_norm, F, **self._projection_parameters)
disp_norm_fct.rename("displacement_norm", '')
return disp_norm_fct
def plot_function(self, function, recording_step, name, file_name=None, units=None, output_dir=None,
show_labels=False, **kwargs):
if output_dir is None:
output_dir = self.get_output_dir()
if file_name is None:
file_name = "%s_%04d.png" % (name.replace(" ", "_"), recording_step)
if units is not None:
label_name = "%s [%s]"%(name, units)
else:
label_name = "%s"%(name)
plot_params = self.plot_params.copy()
plot_params_local = {"label": label_name}
plot_params.update(plot_params_local)
title = "%s @ step %04d" %(name, recording_step)
if 'title' in plot_params.keys():
if not plot_params.get('title') is None:
plot_params.update({'title' : title})
else:
plot_params.update({'title': title})
plot_params.update(kwargs)
save_path = os.path.join(output_dir, file_name)
if not show_labels:
plott.show_img_seg_f(function=function, path=save_path, **plot_params)
else:
labels = self.get_label_function()
plot_obj_label = {'object': labels,
'cbar_label': None,
'exclude_below': None,
'exclude_above': None,
'exclude_min_max': False,
'exclude_around': None,
'cmap': 'Greys_r',
'n_cmap_levels': None,
'range_f': None,
'showmesh': False,
'shading': "gouraud",
'alpha': 1,
'norm': None,
'norm_ref': None,
'color': None
}
plott.show_img_seg_f(function=function, path=save_path,
add_plot_object_pre=plot_obj_label,
**plot_params)
def plot_concentration(self, recording_step, **kwargs):
conc = self.get_solution_concentration(recording_step=recording_step)
plot_params = { "range_f" : [0.000, 1.0] }
plot_params.update(kwargs)
self.plot_function(conc, recording_step=recording_step, name="concentration",
file_name=None, units=None, output_dir=os.path.join(self.get_output_dir(), 'concentration'), **plot_params)
def plot_displacement(self, recording_step, **kwargs):
disp = self.get_solution_displacement(recording_step=recording_step)
plot_params = {"range_f": [0.000, None]}
plot_params.update(kwargs)
self.plot_function(disp, recording_step=recording_step, name="displacement",
file_name=None, units="mm", output_dir=os.path.join(self.get_output_dir(), 'displacement'), **kwargs)
def plot_pressure(self, recording_step, **kwargs):
pressure = self.get_pressure(recording_step=recording_step)
self.plot_function(pressure, recording_step=recording_step, name="pressure",
file_name=None, units="Pa", output_dir=os.path.join(self.get_output_dir(), 'pressure'), **kwargs)
def plot_displacement_norm(self, recording_step, **kwargs):
disp_norm = self.get_displacement_norm(recording_step=recording_step)
self.plot_function(disp_norm, recording_step=recording_step, name="displacement norm",
file_name=None, units="mm", output_dir=os.path.join(self.get_output_dir(), 'displacement_norm'), **kwargs)
def plot_van_mises_stress(self, recording_step, **kwargs):
van_mises = self.get_van_mises_stress(recording_step=recording_step)
self.plot_function(van_mises, recording_step=recording_step, name="van mises stress",
file_name=None, units=None,
output_dir=os.path.join(self.get_output_dir(), 'van_mises_stress'), **kwargs)
def get_label_function(self):
if hasattr(self._subdomains, 'label_function'):
labelfunction = self._subdomains.label_function
else:
if self._functionspace.dim_geo==2:
labelfunction = vh.convert_meshfunction_to_function(self._mesh, self._subdomains.subdomains)
return labelfunction
def plot_label_function(self, recording_step, **kwargs):
plot_params = {"colormap": 'Set1'}
plot_params.update(kwargs)
labelfunction = self.get_label_function()
self.plot_function(labelfunction, recording_step=recording_step, name="label function",
file_name=None, units=None,
output_dir=os.path.join(self.get_output_dir(), 'label_function'), **plot_params)
def _update_mesh_displacements(self, displacement):
"""
Applies displacement function to mesh.
.. warning:: This changes the current mesh! Multiple updates result in additive mesh deformations!
"""
fenics.ALE.move(self._mesh, displacement)
self._mesh.bounding_box_tree().build(self._mesh)
def update_mesh_displacement(self, recording_step=None, reverse=False):
"""
Update mesh with simulated displacement from specified time-point.
.. warning:: This changes the current mesh! Multiple updates result in additive mesh deformations!
"""
displacement = self.get_solution_displacement(recording_step)
if reverse:
neg_disp = fenics.project(-1*displacement, displacement.function_space(), **self._projection_parameters)
self._update_mesh_displacements(neg_disp)
else:
self._update_mesh_displacements(displacement)
class PostProcessTumorGrowth(PostProcess):
def get_stress_tensor(self, recording_step=None):
VT = fenics.TensorFunctionSpace(self._mesh, "Lagrange", 1)
displacement = self.get_solution_displacement(recording_step=recording_step)
mu = mle.compute_mu(self._params.E, self._params.poisson)
lmbda = mle.compute_lambda(self._params.E, self._params.poisson)
stress_tensor = mle.compute_stress(displacement, mu=mu, lmbda=lmbda)
stress_tensor_fct = fenics.project(stress_tensor, VT, **self._projection_parameters)
stress_tensor_fct.rename("stress_tensor", "")
return stress_tensor_fct
def get_logistic_growth(self, recording_step=None):
concentration = self.get_solution_concentration(recording_step=recording_step)
log_growth = mrd.compute_growth_logistic(concentration, self._params.proliferation, 1.0)
F = fenics.FunctionSpace(self._mesh, "Lagrange", 1)
log_growth_fct = fenics.project(log_growth, F, **self._projection_parameters)
log_growth_fct.rename("log_growth", '')
return log_growth_fct
def get_mech_expansion(self, recording_step=None):
VT = fenics.TensorFunctionSpace(self._mesh, "Lagrange", 1)
concentration = self.get_solution_concentration(recording_step=recording_step)
mech_exp = mle.compute_growth_induced_strain(concentration, self._params.coupling, self._functionspace.dim_geo)
mech_exp_fct = fenics.project(mech_exp, VT, **self._projection_parameters)
mech_exp_fct.rename("mech_expansion", '')
return mech_exp_fct
def get_total_jacobian(self, recording_step=None):
V = fenics.FunctionSpace(self._mesh, "Lagrange", 1)
displacement = self.get_solution_displacement(recording_step=recording_step)
jac = mle.compute_total_jacobian(displacement)
jac_fct = fenics.project(jac, V, **self._projection_parameters)
jac_fct.rename("total_jacobian", '')
return jac_fct
def get_growth_induced_jacobian(self, recording_step=None):
V = fenics.FunctionSpace(self._mesh, "Lagrange", 1)
strain_growth = self.get_mech_expansion(recording_step)
jac_growth = mle.compute_growth_induced_jacobian(strain_growth, self._functionspace.dim_geo)
jac_growth_fct = fenics.project(jac_growth, V, **self._projection_parameters)
jac_growth_fct.rename("growth_induced_jacobian", '')
return jac_growth_fct
def get_concentration_deformed_configuration(self, recording_step=None):
concentration = self.get_solution_concentration(recording_step=recording_step)
displacement = self.get_solution_displacement(recording_step=recording_step)
V = fenics.FunctionSpace(self._mesh, "Lagrange", 1)
conc_def = mle.compute_concentration_deformed(concentration, displacement, self._params.coupling, self._functionspace.dim_geo)
conc_def_fct = fenics.project(conc_def, V, **self._projection_parameters)
conc_def_fct.rename("concentration_deformed_config", '')
return conc_def_fct
def plot_concentration_deformed_configuration(self, recording_step, **kwargs):
conc_def = self.get_concentration_deformed_configuration(recording_step=recording_step)
plot_params = {"range_f": [0.000, 1.0]}
plot_params.update(kwargs)
self.plot_function(conc_def, recording_step=recording_step, name="concentration deformed configuration",
file_name=None, units=None,
output_dir=os.path.join(self.get_output_dir(), 'concentration_deformed'), **plot_params)
def plot_log_growth(self, recording_step, **kwargs):
log_growth = self.get_logistic_growth(recording_step=recording_step)
plot_params = { # "exclude_around" :(0, 0.0001),
"colormap": 'RdBu_r',
"cmap_ref": 0.0}
plot_params.update(kwargs)
self.plot_function(log_growth, recording_step=recording_step, name="logistic growth term",
file_name=None, units=None, output_dir=os.path.join(self.get_output_dir(), 'logistic_growth_term'), **plot_params)
def plot_total_jacobian(self, recording_step, **kwargs):
jac = self.get_total_jacobian(recording_step=recording_step)
plot_params = { # "exclude_around" :(0, 0.0001),
"range_f": [0.8, 1.2],
"colormap": 'RdBu_r',
"cmap_ref": 1.0}
plot_params.update(kwargs)
self.plot_function(jac, recording_step=recording_step, name="total jacobian",
file_name=None, units=None,
output_dir=os.path.join(self.get_output_dir(), 'total_jacobian'), **plot_params)
def plot_growth_induced_jacobian(self, recording_step, **kwargs):
jac = self.get_growth_induced_jacobian(recording_step=recording_step)
plot_params = { # "exclude_around" :(0, 0.0001),
"range_f": [0.8, 1.2],
"colormap": 'RdBu_r',
"cmap_ref": 1.0}
plot_params.update(kwargs)
self.plot_function(jac, recording_step=recording_step, name="growth induced jacobian",
file_name=None, units=None,
output_dir=os.path.join(self.get_output_dir(), 'growth_induced_jacobian'), **plot_params)
def plot_all(self, deformed=False, selection=slice(None), output_dir=None, **kwargs):
"""
:param deformed: boolean flag for mesh deformation
:param selection: slice object, e.g. slice(10,-1,5)
:return:
"""
if output_dir is not None:
self.set_output_dir(output_dir)
if deformed:
self.set_output_dir(self.get_output_dir() + '_deformed')
else:
self.plot_label_function(recording_step=0)
if type(selection) == slice:
steps=self._results.get_recording_steps()[selection]
elif type(selection) == list:
steps = selection
else:
print("cannot handle selection '%s'"%selection)
for recording_step in steps:
if deformed:
self.update_mesh_displacement(recording_step)
self.plot_label_function(recording_step, **kwargs) # if deformed, plot label function in every time step
self.plot_concentration(recording_step, **kwargs)
self.plot_displacement(recording_step, **kwargs)
self.plot_pressure(recording_step, **kwargs)
self.plot_displacement_norm(recording_step, **kwargs)
self.plot_log_growth(recording_step, **kwargs)
self.plot_total_jacobian(recording_step, **kwargs)
self.plot_growth_induced_jacobian(recording_step, **kwargs)
self.plot_van_mises_stress(recording_step, **kwargs)
self.plot_concentration_deformed_configuration(recording_step, **kwargs)
if deformed:
self.update_mesh_displacement(recording_step, reverse=True)
def plot_for_pub(self, deformed=False, selection=slice(None), output_dir=None, **kwargs):
"""
:param deformed: boolean flag for mesh deformation
:param selection: slice object, e.g. slice(10,-1,5)
:return:
"""
if output_dir is not None:
fu.ensure_dir_exists(output_dir)
self.set_output_dir(output_dir)
if deformed:
self.set_output_dir(self.get_output_dir() + '_deformed')
else:
self.plot_label_function(recording_step=0)
# plot without axes, cbar, etc
plot_params = {'show_axes': False,
'show_ticks': False,
'show_title': False,
'show_cbar': False}
plot_params.update(kwargs)
if type(selection) == slice:
steps=self._results.get_recording_steps()[selection]
elif type(selection) == list:
steps = selection
else:
print("cannot handle selection '%s'"%selection)
for recording_step in steps:
if deformed:
self.update_mesh_displacement(recording_step)
self.plot_label_function(recording_step, n_cmap_levels=4, colormap='Greys_r',
**plot_params) # if deformed, plot label function in every time step
self.plot_concentration(recording_step, exclude_below=0.01, exclude_min_max=True, show_labels=True,
**plot_params)
self.plot_displacement(recording_step, **plot_params)
self.plot_pressure(recording_step, **plot_params)
self.plot_displacement_norm(recording_step, **plot_params)
self.plot_log_growth(recording_step, **plot_params)
self.plot_total_jacobian(recording_step, **plot_params)
self.plot_growth_induced_jacobian(recording_step, **plot_params)
self.plot_van_mises_stress(recording_step, **plot_params)
self.plot_concentration_deformed_configuration(recording_step, **plot_params)
if deformed:
self.update_mesh_displacement(recording_step, reverse=True)
# plot colorbars separately
plot_params_2 = {'show_axes': False,
'show_ticks': False,
'show_title': False,
'show_cbar': True,
'dpi': 600,
'cbar_size': '20%',
'cbar_pad': 0.2,
'cbar_fontsize': 15}
plot_params_2.update(kwargs)
step = 1
self.set_output_dir(os.path.join(self.get_output_dir(), 'cbar'))
self.plot_label_function(step, n_cmap_levels=4, colormap='Greys_r', **plot_params_2)
self.plot_concentration(step, **plot_params_2)
self.plot_displacement(step, **plot_params_2, range_f=[0, 8])
self.plot_displacement_norm(step, **plot_params_2, range_f=[0, 8])
self.plot_total_jacobian(step, **plot_params_2)
def save_all(self, save_method='xdmf', clear_all=False, selection=slice(None), output_dir=None):
if output_dir is not None:
self.set_output_dir(output_dir)
self._results.set_save_output_dir(self.get_output_dir())
self._results.save_solution_start(method=save_method, clear_all=clear_all)
if type(selection) == slice:
steps=self._results.get_recording_steps()[selection]
elif type(selection) == list:
steps = selection
else:
print("cannot handle selection '%s'"%selection)
for recording_step in steps:
current_sim_time = self._results.get_result(recording_step=recording_step).get_time_step()
u = self._results.get_solution_function(recording_step=recording_step)
self._results.save_solution(recording_step, current_sim_time, function=u, method=save_method)
# try merging those files into single vtu
if save_method != 'xdmf':
dio.merge_vtus_timestep(self.get_output_dir(), recording_step, remove=False, reference_file_path=None)
self._results.save_solution_end(method=save_method)
class PostProcessTumorGrowthBrain(PostProcessTumorGrowth):
def map_params(self):
"""
This function maps the parameters defined explicitly in the TumorGrowthBrain class into instances of DiscontinousScalar, so that they can be processed by function defined in PostProcessTumorGrowth.
:return:
"""
if not hasattr(self._params, 'E'):
youngmod = {'outside': 10E6,
'CSF': self._params.E_CSF,
'WM': self._params.E_WM,
'GM': self._params.E_GM,
'Ventricles': self._params.E_VENT}
self._params.set_parameter('E', youngmod)
if not hasattr(self._params, 'poisson'):
poisson | |
<filename>modules/volterra/volterra_resource_site_create.py<gh_stars>0
#!/usr/bin/env python3
import os
import sys
import json
import argparse
import urllib.request
from urllib.error import HTTPError
def get_tenant_id(tenant, token):
headers = {
"Authorization": "APIToken %s" % token
}
try:
url = "https://%s.console.ves.volterra.io/api/web/namespaces/system" % tenant
request = urllib.request.Request(
url, headers=headers, method='GET')
response = urllib.request.urlopen(request)
return json.load(response)['system_metadata']['tenant']
except HTTPError as her:
sys.stderr.write(
"Error retrieving tenant ID - %s\n" % her)
sys.exit(1)
def assure_site_token(tenant, token, site_token_name):
site_token_name = site_token_name.encode('utf-8').decode('utf-8')
headers = {
"Authorization": "APIToken %s" % token
}
# Does the site token exist
try:
url = "https://%s.console.ves.volterra.io/api/register/namespaces/system/tokens/%s" % (
tenant, site_token_name)
request = urllib.request.Request(
url, headers=headers, method='GET')
response = urllib.request.urlopen(request)
return json.load(response)['system_metadata']['uid']
except HTTPError as her:
if her.code == 404:
try:
url = "https://%s.console.ves.volterra.io/api/register/namespaces/system/tokens" % tenant
headers['volterra-apigw-tenant'] = tenant
headers['content-type'] = 'application/json'
data = {
"metadata": {
"annotations": {},
"description": "Site Authorization Token for %s" % site_token_name,
"disable": False,
"labels": {},
"name": site_token_name,
"namespace": "system"
},
"spec": {}
}
data = json.dumps(data)
request = urllib.request.Request(
url=url, headers=headers, data=bytes(data.encode('utf-8')), method='POST')
response = urllib.request.urlopen(request)
site_token = json.load(response)
return site_token['system_metadata']['uid']
except HTTPError as err:
sys.stderr.write(
"Error creating site token resources %s: %s\n" % (url, err))
sys.exit(1)
else:
sys.stderr.write(
"Error retrieving site token resources %s: %s\n" % (url, her))
sys.exit(1)
except Exception as er:
sys.stderr.write(
"Error retrieving site token resources %s\n" % er)
sys.exit(1)
def assure_k8s_cluster(tenant, token, site_name, k8sdomain):
headers = {
"Authorization": "APIToken %s" % token
}
# create K8s cluster object
try:
url = "https://%s.console.ves.volterra.io/api/config/namespaces/system/k8s_clusters/%s" % (
tenant, site_name)
request = urllib.request.Request(
url, headers=headers, method='GET')
urllib.request.urlopen(request)
except HTTPError as her:
if her.code == 404:
try:
url = "https://%s.console.ves.volterra.io/api/config/namespaces/system/k8s_clusters" % tenant
headers['volterra-apigw-tenant'] = tenant
headers['content-type'] = 'application/json'
data = {
"namespace": "system",
"metadata": {
"name": site_name,
"namespace": None,
"labels": {},
"annotations": {},
"description": None,
"disable": None
},
"spec": {
"local_access_config": {
"local_domain": k8sdomain,
"default_port": {}
},
"global_access_enable": {},
"use_default_psp": {},
"use_default_cluster_roles": {},
"use_default_cluster_role_bindings": {},
"no_insecure_registries": {}
}
}
data = json.dumps(data)
request = urllib.request.Request(
url=url, headers=headers, data=bytes(data.encode('utf-8')), method='POST')
urllib.request.urlopen(request)
except HTTPError as err:
sys.stderr.write(
"Error creating k8s_clusters resources %s: %s\n" % (url, err))
sys.exit(1)
def assure_voltstack_site(tenant, token, site_name, tenant_id, cluster_size, latitude, longitude, inside_networks, inside_gateways):
headers = {
"Authorization": "APIToken %s" % token
}
# create Voltstack site
try:
url = "https://%s.console.ves.volterra.io/api/config/namespaces/system/sites/%s" % (
tenant, site_name)
request = urllib.request.Request(
url, headers=headers, method='GET')
response = urllib.request.urlopen(request)
return json.load(response)['system_metadata']['uid']
except HTTPError as her:
if her.code == 404:
try:
v_static_routes = []
for gw in inside_gateways:
v_static_routes.append(
{
"ip_prefixes": inside_networks,
"ip_address": gw,
"attrs": ['ROUTE_ATTR_INSTALL_HOST', 'ROUTE_ATTR_INSTALL_FORWARDING']
})
url = "https://%s.console.ves.volterra.io/api/config/namespaces/system/voltstack_sites" % tenant
headers['volterra-apigw-tenant'] = tenant
headers['content-type'] = 'application/json'
data = {
"metadata": {
"name": site_name,
"namespace": None,
"labels": {},
"annotations": {},
"description": None,
"disable": None
},
"spec": {
"volterra_certified_hw": "kvm-volstack-combo",
"master_nodes": [],
"worker_nodes": [],
"no_bond_devices": {},
"custom_network_config": {
"slo_config": {
"labels": {},
"static_routes": {
"static_routes": v_static_routes
},
"no_dc_cluster_group": {}
},
"default_interface_config": {},
"no_network_policy": {},
"no_forward_proxy": {},
"global_network_list": {
"global_network_connections": [
{
"slo_to_global_dr": {
"global_vn": {
"tenant": "ves-io",
"namespace": "shared",
"name": "public"
}
}
}
]
},
},
"default_storage_config": {},
"disable_gpu": {},
"address": None,
"coordinates": {
"latitude": latitude,
"longitude": longitude
},
"k8s_cluster": {
"tenant": tenant_id,
"namespace": "system",
"name": site_name
},
"logs_streaming_disabled": {},
"deny_all_usb": {}
},
"resource_version": None
}
masters = []
for indx in range(min(cluster_size, 3)):
masters.append("%s-vce-%d" % (site_name, indx))
data['spec']['master_nodes'] = masters
workers = []
for indx in range(cluster_size - 3):
workers.append("%s-vce-%d" % (site_name, indx + 3))
data['spec']['worker_nodes'] = workers
data = json.dumps(data)
request = urllib.request.Request(
url=url, headers=headers, data=bytes(data.encode('utf-8')), method='POST')
response = urllib.request.urlopen(request)
site = json.load(response)
return site['system_metadata']['uid']
except HTTPError as err:
sys.stderr.write(
"Error creating volstack site resources %s: %s\n" % (url, err))
sys.exit(1)
else:
sys.stderr.write(
"Error retrieving voltstack site resources %s: %s\n" % (url, her))
sys.exit(1)
except Exception as er:
sys.stderr.write(
"Error retrieving voltstack site resources %s\n" % er)
sys.exit(1)
def assure_virtual_network(tenant, token, site_name, fleet_label, tenant_id, inside_networks, inside_gateways):
headers = {
"Authorization": "APIToken %s" % token
}
if inside_networks:
# Does virtual network exist
try:
url = "https://%s.console.ves.volterra.io/api/config/namespaces/system/virtual_networks/%s" % (
tenant, fleet_label)
request = urllib.request.Request(
url, headers=headers, method='GET')
response = urllib.request.urlopen(request)
except HTTPError as her:
if her.code == 404:
try:
v_static_routes = []
for gw in inside_gateways:
v_static_routes.append({
"ip_prefixes": inside_networks,
"ip_address": gw,
"attrs": ['ROUTE_ATTR_INSTALL_HOST', 'ROUTE_ATTR_INSTALL_FORWARDING']
})
url = "https://%s.console.ves.volterra.io/api/config/namespaces/system/virtual_networks" % tenant
headers['volterra-apigw-tenant'] = tenant
headers['content-type'] = 'application/json'
data = {
"namespace": "system",
"metadata": {
"name": site_name,
"namespace": "system",
"labels": {
"ves.io/fleet": fleet_label
},
"annotations": {},
"description": "Routes inside %s" % site_name,
"disable": False
},
"spec": {
"site_local_inside_network": {},
"static_routes": v_static_routes
}
}
data = json.dumps(data)
request = urllib.request.Request(
url=url, headers=headers, data=bytes(data.encode('utf-8')), method='POST')
urllib.request.urlopen(request)
except HTTPError as her:
sys.stderr.write(
"Error creating virtual_networks resources %s: %s - %s\n" % (url, data, her))
sys.exit(1)
else:
sys.stderr.write(
"Error retrieving virtual_networks resources %s: %s\n" % (url, her))
sys.exit(1)
def assure_network_connector(tenant, token, site_name, fleet_label):
headers = {
"Authorization": "APIToken %s" % token
}
# Does Global Network connector exist?
try:
url = "https://%s.console.ves.volterra.io/api/config/namespaces/system/network_connectors/%s" % (
tenant, site_name)
request = urllib.request.Request(
url, headers=headers, method='GET')
urllib.request.urlopen(request)
except HTTPError as her:
if her.code == 404:
try:
url = "https://%s.console.ves.volterra.io/api/config/namespaces/system/network_connectors" % tenant
headers['volterra-apigw-tenant'] = tenant
headers['content-type'] = 'application/json'
data = {
"namespace": "system",
"metadata": {
"name": site_name,
"namespace": None,
"labels": {
"ves.io/fleet": fleet_label
},
"annotations": {},
"description": "connecting %s to the global shared network" % site_name,
"disable": False
},
"spec": {
"sli_to_global_dr": {
"global_vn": {
"tenant": "ves-io",
"namespace": "shared",
"name": "public"
}
},
"disable_forward_proxy": {}
}
}
data = json.dumps(data)
request = urllib.request.Request(
url=url, headers=headers, data=bytes(data.encode('utf-8')), method='POST')
urllib.request.urlopen(request)
except HTTPError as her:
sys.stderr.write(
"Error creating network_connectors resources %s: %s - %s\n" % (url, data, her))
sys.exit(1)
else:
sys.stderr.write(
"Error retrieving network_connectors resources %s: %s\n" % (url, her))
sys.exit(1)
def assure_fleet(tenant, token, site_name, fleet_label, tenant_id):
headers = {
"Authorization": "APIToken %s" % token
}
# Does the fleet exist
try:
url = "https://%s.console.ves.volterra.io/api/config/namespaces/system/fleets/%s" % (
tenant, site_name)
request = urllib.request.Request(
url, headers=headers, method='GET')
response = urllib.request.urlopen(request)
return json.load(response)['spec']['fleet_label']
except HTTPError as her:
if her.code == 404:
try:
url = "https://%s.console.ves.volterra.io/api/config/namespaces/system/fleets" % tenant
headers['volterra-apigw-tenant'] = tenant
headers['content-type'] = 'application/json'
data = {
"namespace": "system",
"metadata": {
"name": site_name,
"namespace": None,
"labels": {},
"annotations": {},
"description": "Fleet provisioning object for %s" % site_name,
"disable": None
},
"spec": {
"fleet_label": fleet_label,
"volterra_software_version": None,
"network_connectors": [
{
"kind": "network_connector",
"uuid": None,
"tenant": tenant_id,
"namespace": "system",
"name": site_name
}
],
"network_firewall": None,
"operating_system_version": None,
"outside_virtual_network": None,
"inside_virtual_network": [
{
"kind": "virtual_network",
"uid": None,
"tenant": tenant_id,
"namespace": "system",
"name": site_name
}
],
"default_config": {},
"no_bond_devices": {},
"no_storage_interfaces": {},
"no_storage_device": {},
"default_storage_class": {},
"no_dc_cluster_group": {},
"disable_gpu": {},
"no_storage_static_routes": {},
"enable_default_fleet_config_download": None,
"logs_streaming_disabled": {},
"deny_all_usb": {}
}
}
data = json.dumps(data)
request = urllib.request.Request(
url=url, headers=headers, data=bytes(data.encode('utf-8')), method='POST')
response = urllib.request.urlopen(request)
return json.load(response)['spec']['fleet_label']
except HTTPError as her:
sys.stderr.write(
"Error creating fleets resources %s: %s - %s\n" % (url, data, her))
sys.exit(1)
else:
sys.stderr.write(
"Error retrieving feet resources %s: %s\n" % (url, her))
sys.exit(1)
except Exception as er:
sys.stderr.write(
"Error retrieving fleet resources %s\n" % er)
sys.exit(1)
def assure_service_discovery(tenant, token, site_name, tenant_id, consul_servers, ca_cert_encoded):
headers = {
"Authorization": "APIToken %s" % token
}
for indx, consul_server in enumerate(consul_servers):
name = "%s-consul-%d" % (site_name, indx)
# Does service discovery exist
try:
url = "https://%s.console.ves.volterra.io/api/config/namespaces/system/discoverys/%s" % (
tenant, name)
request = urllib.request.Request(
url, headers=headers, method='GET')
urllib.request.urlopen(request)
except HTTPError as her:
if her.code == 404:
try:
url = "https://%s.console.ves.volterra.io/api/config/namespaces/system/discoverys" % tenant
data = {
"namespace": "system",
"metadata": {
"name": name,
"namespace": None,
"labels": {},
"annotations": {},
"description": None,
"disable": False
},
"spec": {
"where": {
"site": {
"ref": [{
"kind": "site",
"uid": None,
"tenant": tenant_id,
"namespace": "system",
"name": site_name
}],
"network_type": "VIRTUAL_NETWORK_SITE_LOCAL_INSIDE"
}
},
"discovery_consul": {
"access_info": {
"connection_info": {
"api_server": consul_server,
"tls_info": {
"server_name": None,
"certificate_url": None,
"certificate": None,
"key_url": None,
"ca_certificah signal has different support and stability in OTLP, described through its own maturity level, which in turn applies to all the OTLP Transports listed below.te_url": None,
"trusted_ca_url": "string:///%s" % ca_cert_encoded
}
},
"scheme": None,
"http_basic_auth_info": None
},
"publish_info": {
"disable": {}
}
}
}
}
data = json.dumps(data)
request = urllib.request.Request(
url=url, headers=headers, data=bytes(data.encode('utf-8')), method='POST')
urllib.request.urlopen(request)
except HTTPError as her:
sys.stderr.write(
"Error creating | |
<gh_stars>0
import math
import sys
from scipy.interpolate import interp2d
from scipy.ndimage import rotate, center_of_mass
from scipy.spatial import distance
from skimage.feature import canny
from skimage.filters import rank, gaussian
from skimage.measure import subdivide_polygon
from skimage.morphology import medial_axis, square, erosion, disk
from skimage.segmentation import active_contour
from skimage.transform import probabilistic_hough_line, rescale
from sklearn.linear_model import LinearRegression
from credo_cf import load_json, progress_and_process_image, group_by_id, GRAY, nkg_mark_hit_area, NKG_MASK, nkg_make_track, NKG_PATH, NKG_DIRECTION, \
NKG_DERIVATIVE, ID, NKG_THRESHOLD, NKG_UPSCALE, NKG_SKELETON, point_to_point_distance, center_of_points, NKG_MASKED, NKG_REGRESSION, NKG_PATH_FIT, \
store_png, IMAGE
import matplotlib.pyplot as plt
from numpy import unravel_index, ma
import numpy as np
import itertools
from scipy.sparse import csr_matrix
from scipy.sparse.dok import dok_matrix
from scipy.sparse.csgraph import dijkstra
# prepare dataset: hits - JSON's objects, and grays - numpy grayscale images 60x60
from credo_cf.classification.preprocess.nkg_processings import search_longest_path_dijkstra, bitmap_to_graph, analyse_path
objects, count, errors = load_json('../data/manual.json', progress_and_process_image)
by_id = group_by_id(objects)
used_hits1 = {4711435, 6234182, 9152349, 4913621, 5468291, 7097636, 4976474, 5206452, 4876475, 5951007, 4714801, 4819239, 4660572, 4705446, 8280225, 8459656,
8471578, 9124308, 9314789, 4813841}
used_hits2 = [7741225, 7238971, 5973441, 4892405, 17432760,
17432645, 4731298, 6229582, 17571002, 17368987,
7148947, 4899235, 18349704, 18250739, 6908292,
9129139, 17771578, 17861029, 17337669, 7470695]
used_hits3 = [7741225, 4580817, 5973441, 4892405, 17432760,
17432645, 4731298, 6229582, 17571002, 17368987,
7148947, 4899235, 18349704, 18250739, 6908292,
9129139, 17771578, 17861029, 17337669, 7470695,
4711435, 6234182, 9152349, 4913621, 5468291,
7097636, 4976474, 5206452, 4876475, 5951007,
4714801, 4819239, 4660572, 4705446, 8280225,
8459656, 8471578, 9124308, 9314789, 4813841]
used_hits = used_hits3
hits = []
for u in used_hits:
hits.append(by_id[u][0])
grays = list(map(lambda x: x['gray'], hits))
# utils
def display(img):
plt.matshow(img)
plt.colorbar()
plt.show()
def display_all(values):
f, axs = plt.subplots(4, 5, constrained_layout=True, figsize=(32, 24))
i = 0
for ax in axs.flat:
im = ax.matshow(values[i])
i += 1
# f.colorbar(im, ax=axs.flat)
plt.show()
def display_all_from(hits, _from, title_func=None, scale=6):
cols = 5
rows = int(math.ceil(len(hits) / cols))
f, axs = plt.subplots(rows, cols, constrained_layout=True, figsize=(4*scale, 3*scale*rows/4))
i = 0
for ax in axs.flat:
if len(hits) <= i:
break
im = ax.matshow(hits[i].get(_from))
if title_func is not None:
ax.title.set_text(title_func(hits[i]))
i += 1
# f.colorbar(im, ax=axs.flat)
plt.show()
# wycinek 1/8 drogi promienia sprawdzania
ray_way_octet = [
[
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[1, 1, 1, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
],
[
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[1, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
],
[
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[1, 1, 0, 0, 0],
[0, 0, 0, 0, 0],
],
[
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 1, 0, 0, 0],
[1, 0, 0, 0, 0],
],
]
ray_way_octet2 = [
[
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
],
[
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
],
[
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
],
[
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
],
[
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
],
[
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
],
[
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0],
],
]
fit_mask = [
[0, 1, 1, 1, 0],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[0, 1, 1, 1, 0],
]
def build_ray_way(octet):
ret = []
for r in range(0, 4):
for i in range(0, len(octet)):
o = octet[i]
oct = np.array(o)
angle = r * 90.0 + 45.0 / (len(octet) - 1) * i - 180
ret.append({'way': np.rot90(oct, r), 'angle': angle})
for i in range(1, len(octet) - 1):
o = octet[-(i + 1)]
oct = np.array(o)
fl = np.flip(oct, axis=0)
angle = r * 90.0 + 45.0 / (len(octet) - 1) * i + 45 - 180
ret.append({'way': np.rot90(rotate(fl, angle=90), r), 'angle': angle})
return ret
def way_next_point(way, step=1):
w, h = way.shape
a = np.zeros(way.shape)
a[step:w-step, step:h-step] = way[step:w-step, step:h-step]
a[step+1:w-step-1, step+1:h-step-1] = 0
next = unravel_index(a.argmax(), a.shape)
return next[0] - (w - 1) / 2, next[1] - (h - 1) / 2
def calc_ways(img, pos, ways):
w = int((ways[0]['way'].shape[0] - 1) / 2)
cut = img[pos[0]-w:pos[0]+w+1, pos[1]-w:pos[1]+w+1]
sums = []
for way in ways:
calc = cut * way['way']
dw = np.ones(calc.shape)
dw[1:4, 1:4] = calc[1:4, 1:4]
calc = calc + dw # najbliższe srodka są 2x
# calc = calc * dw # najbliższe środka są x^2
s = np.sum(calc)
sums.append({**way, 'value': s})
return sums
def calc_pos(a, b):
return int(a[0]+b[0]), int(a[1]+b[1])
def normalize_angle(angle):
n = angle % 360
return n if n <= 180 else n - 360
def in_angle(a, b, v):
pa = normalize_angle(a) + 360
pb = normalize_angle(b) + 360
pv = normalize_angle(v) + 360
if pa <= pb:
return pa <= pv <= pb
else:
return not (pa >= pv >= pb)
def nkg_pather_step(img, next_pos, angle, threshold, fov, step):
path = []
while img[next_pos] > threshold:
path.append(next_pos)
img[next_pos] = 0
try:
calc = calc_ways(img, next_pos, ray_way)
except:
# edge achieved
break
filtered = list(filter(lambda x: in_angle(angle - fov / 2, angle + fov / 2, x['angle']) and img[calc_pos(next_pos, way_next_point(x['way'], step))] > threshold, calc))
if len(filtered) == 0:
break
direction = max(filtered, key=lambda x: x['value'])
next_pos = calc_pos(next_pos, way_next_point(direction['way'], step))
angle = direction['angle']
return path
def nkg_pather(img, threshold, fov=90, step=1):
# mask = np.zeros(img.shape)
img = img.copy()
start = unravel_index(img.argmax(), img.shape)
try:
calc = calc_ways(h['smooth'], start, ray_way)
except:
return np.array([])
direction = max(calc, key=lambda x: x['value'])
next_pos = calc_pos(start, way_next_point(direction['way'], step))
angle = direction['angle']
next_angle = direction['angle'] - 180
path = nkg_pather_step(img, next_pos, angle, threshold, fov, step)
angle = next_angle
next_pos = start
path2 = nkg_pather_step(img, next_pos, angle, threshold, fov, step)
return np.array([*reversed(path2), *path])
def line_to_mask(img, path, scale=1, value=1, create_new_mask=False):
if create_new_mask:
mask = np.zeros(img.shape)
else:
mask = img
for a in path:
if scale > 1:
mask[round(a[0] * scale + scale/2.0), round(a[1] * scale + scale/2.0)] = value
else:
mask[round(a[0]), round(a[1])] = value
return ma.masked_array(img, mask)
def path_to_center_of_weight(img, fm, path):
path2 = []
fit_mask = np.array(fm)
w = fit_mask.shape[0]
h = fit_mask.shape[1]
fit = img.copy()
for i in path:
x1 = int(i[0] - (w - 1) / 2)
x2 = int(i[0] + (w + 1) / 2)
y1 = int(i[1] - (h - 1) / 2)
y2 = int(i[1] + (h + 1) / 2)
# cut = fit[i[0]-2:i[0]+3, i[1]-2:i[1]+3]
cut = fit[x1:x2, y1:y2]
if cut.shape[0] != 5 or cut.shape[1] | |
already exists: ", topdir)
return False
def _search_print_v1(self, repo_list):
"""Print search results from v1 API"""
for repo in repo_list["results"]:
if "is_official" in repo and repo["is_official"]:
is_official = "[OK]"
else:
is_official = "----"
description = ""
if "description" in repo and repo["description"] is not None:
for char in repo["description"]:
if char in string.printable:
description += char
Msg().out("%-40.40s %8.8s %s"
% (repo["name"], is_official, description))
def _search_print_v2(self, repo_list):
"""Print catalog results from v2 API"""
for reponame in repo_list["repositories"]:
Msg().out("%-40.40s %8.8s"
% (reponame, " "))
def do_search(self, cmdp):
"""
search: search dockerhub for container images
search [options] <expression>
-a :do not pause
--index=https://index.docker.io/v1 :docker index
--registry=https://registry-1.docker.io :docker registry
"""
pause = not cmdp.get("-a")
index_url = cmdp.get("--index=")
registry_url = cmdp.get("--registry=")
expression = cmdp.get("P1")
if index_url:
self.dockerioapi.set_index(index_url)
if registry_url:
self.dockerioapi.set_registry(registry_url)
if cmdp.missing_options(): # syntax error
return False
Msg().out("%-40.40s %8.8s %s" %
("NAME", "OFFICIAL", "DESCRIPTION"), l=Msg.INF)
self.dockerioapi.search_init(pause)
v2_auth_token = self.keystore.get(self.dockerioapi.registry_url)
self.dockerioapi.set_v2_login_token(v2_auth_token)
while True:
repo_list = self.dockerioapi.search_get_page(expression)
if not repo_list:
return True
elif "results" in repo_list:
self._search_print_v1(repo_list)
elif "repositories" in repo_list:
self._search_print_v2(repo_list)
if pause and not self.dockerioapi.search_ended:
key_press = raw_input("[press return or q to quit]")
if key_press in ("q", "Q", "e", "E"):
return True
def do_load(self, cmdp):
"""
load: load a container image saved by docker with 'docker save'
load --input=<docker-saved-container-file>
load -i <docker-saved-container-file>
load < <docker-saved-container-file>
"""
imagefile = cmdp.get("--input=")
if not imagefile:
imagefile = cmdp.get("-i=")
if imagefile is False:
imagefile = "-"
if cmdp.missing_options(): # syntax error
return False
if not imagefile:
Msg().err("Error: must specify filename of docker exported image")
return False
repos = self.dockerlocalfileapi.load(imagefile)
if not repos:
Msg().err("Error: loading failed")
return False
else:
for repo_item in repos:
Msg().out(repo_item)
return True
def do_import(self, cmdp):
"""
import : import image (directory tree) from tar file or stdin
import <tar-file> <repo/image:tag>
import - <repo/image:tag>
--mv :if possible move tar-file instead of copy
--tocontainer :import to container, no image is created
--clone :import udocker container format with metadata
--name=<container-name> :with --tocontainer or --clone to add an alias
"""
move_tarball = cmdp.get("--mv")
to_container = cmdp.get("--tocontainer")
name = cmdp.get("--name=")
clone = cmdp.get("--clone")
from_stdin = cmdp.get("-")
if from_stdin:
tarfile = "-"
imagespec = cmdp.get("P1")
move_tarball = False
else:
tarfile = cmdp.get("P1")
imagespec = cmdp.get("P2")
if not tarfile:
Msg().err("Error: must specify tar filename")
return False
if cmdp.missing_options(): # syntax error
return False
if to_container or clone:
if clone:
container_id = self.dockerlocalfileapi.import_clone(
tarfile, name)
else:
(imagerepo, tag) = self._check_imagespec(imagespec,
"IMPORTED:unknown")
container_id = self.dockerlocalfileapi.import_tocontainer(
tarfile, imagerepo, tag, name)
if container_id:
Msg().out(container_id)
return True
else:
(imagerepo, tag) = self._check_imagespec(imagespec)
if not imagerepo:
return False
if self.dockerlocalfileapi.import_toimage(tarfile, imagerepo, tag,
move_tarball):
return True
Msg().err("Error: importing")
return False
def do_export(self, cmdp):
"""
export : export container (directory tree) to a tar file or stdin
export -o <tar-file> <container-id>
export - <container-id>
-o :export to file, instead of stdout
--clone :export in clone (udocker) format
"""
to_file = cmdp.get("-o")
clone = cmdp.get("--clone")
if to_file:
tarfile = cmdp.get("P1")
container_id = cmdp.get("P2")
else:
tarfile = "-"
container_id = cmdp.get("P1")
container_id = self.localrepo.get_container_id(container_id)
if not container_id:
Msg().err("Error: invalid container id", container_id)
return False
if not tarfile:
Msg().err("Error: invalid output file name", tarfile)
return False
if clone:
if ContainerStructure(self.localrepo,
container_id).clone_tofile(tarfile):
return True
else:
if ContainerStructure(self.localrepo,
container_id).export_tofile(tarfile):
return True
Msg().err("Error: exporting")
return False
def do_clone(self, cmdp):
"""
clone : create a duplicate copy of an existing container
clone <source-container-id>
--name=<container-name> :add an alias to the cloned container
"""
name = cmdp.get("--name=")
container_id = cmdp.get("P1")
container_id = self.localrepo.get_container_id(container_id)
if not container_id:
Msg().err("Error: invalid container id", container_id)
return False
if self.dockerlocalfileapi.clone_container(container_id, name):
Msg().out(container_id)
return True
Msg().err("Error: cloning")
return False
def do_login(self, cmdp):
"""
login: authenticate into docker repository e.g. dockerhub
--username=username
--password=password
--registry=https://registry-1.docker.io
"""
username = cmdp.get("--username=")
password = cmdp.get("--password=")
registry_url = cmdp.get("--registry=")
if registry_url:
self.dockerioapi.set_registry(registry_url)
if not username:
username = raw_input("username: ")
if not password:
password = getpass("password: ")
if password and password == password.upper():
Msg().err("Warning: password in uppercase",
"Caps Lock ?", l=Msg.WAR)
v2_auth_token = \
self.dockerioapi.get_v2_login_token(username, password)
if self.keystore.put(self.dockerioapi.registry_url, v2_auth_token, ""):
return True
Msg().err("Error: invalid credentials")
return False
def do_logout(self, cmdp):
"""
logout: authenticate into docker repository e.g. dockerhub
-a remove all authentication credentials
--registry=https://registry-1.docker.io
"""
all_credentials = cmdp.get("-a")
registry_url = cmdp.get("--registry=")
if registry_url:
self.dockerioapi.set_registry(registry_url)
if all_credentials:
status = self.keystore.erase()
else:
status = self.keystore.delete(self.dockerioapi.registry_url)
if not status:
Msg().err("Error: deleting credentials")
return status
def do_pull(self, cmdp):
"""
pull: download images from docker hub
pull [options] <repo/image:tag>
--httpproxy=socks4://user:pass@host:port :use http proxy
--httpproxy=socks5://user:pass@host:port :use http proxy
--httpproxy=socks4://host:port :use http proxy
--httpproxy=socks5://host:port :use http proxy
--index=https://index.docker.io/v1 :docker index
--registry=https://registry-1.docker.io :docker registry
"""
index_url = cmdp.get("--index=")
registry_url = cmdp.get("--registry=")
http_proxy = cmdp.get("--httpproxy=")
(imagerepo, tag) = self._check_imagespec(cmdp.get("P1"))
if not registry_url and self.keystore.get(imagerepo.split("/")[0]):
registry_url = imagerepo.split("/")[0]
if (not imagerepo) or cmdp.missing_options(): # syntax error
return False
else:
if http_proxy:
self.dockerioapi.set_proxy(http_proxy)
if index_url:
self.dockerioapi.set_index(index_url)
if registry_url:
self.dockerioapi.set_registry(registry_url)
v2_auth_token = self.keystore.get(self.dockerioapi.registry_url)
self.dockerioapi.set_v2_login_token(v2_auth_token)
if self.dockerioapi.get(imagerepo, tag):
return True
else:
Msg().err("Error: no files downloaded")
return False
def do_create(self, cmdp):
"""
create: extract image layers and create a container
create [options] <repo/image:tag>
--name=xxxx :set or change the name of the container
"""
imagespec = cmdp.get("P1")
name = cmdp.get("--name=")
if cmdp.missing_options(): # syntax error
return False
container_id = self._create(imagespec)
if container_id:
Msg().out(container_id)
if name and not self.localrepo.set_container_name(container_id,
name):
Msg().err("Error: invalid container name may already exist "
"or wrong format")
return False
return True
return False
def _create(self, imagespec):
"""Auxiliary to create(), performs the creation"""
if not self.dockerioapi.is_repo_name(imagespec):
Msg().err("Error: must specify image:tag or repository/image:tag")
return False
(imagerepo, tag) = self._check_imagespec(imagespec)
if imagerepo:
return ContainerStructure(self.localrepo).create_fromimage(
imagerepo, tag)
return False
def _get_run_options(self, cmdp, exec_engine=None):
"""Read command line options into variables"""
cmdp.declare_options("-v= -e= -w= -u= -p= -i -t -a -P")
cmd_options = {
"netcoop": {
"fl": ("-P", "--publish-all", "--netcoop",), "act": "R",
"p2": "CMD_OPT", "p3": False
},
"portsmap": {
"fl": ("-p=", "--publish=",), "act": "E",
"p2": "CMD_OPT", "p3": True
},
"novol": {
"fl": ("--novol=",), "act": "R",
"p2": "CMD_OPT", "p3": True
},
"vol": {
"fl": ("-v=", "--volume=",), "act": "E",
"p2": "CMD_OPT", "p3": True
},
"env": {
"fl": ("-e=", "--env=",), "act": "E",
"p2": "CMD_OPT", "p3": True
},
"user": {
"fl": ("-u=", "--user=",), "act": "R",
"p2": "CMD_OPT", "p3": False
},
"cwd": {
"fl": ("-w=", "--workdir=",), "act": "R",
"p2": "CMD_OPT", "p3": False
},
"entryp": {
"fl": ("--entrypoint=",), "act": "R",
"p2": "CMD_OPT", "p3": False
},
"cpuset": {
"fl": ("--cpuset-cpus=",), "act": "R",
"p2": "CMD_OPT", "p3": False
},
"hostauth": {
"fl": ("--hostauth",), "act": "R",
"p2": "CMD_OPT", "p3": False
},
"nosysdirs": {
"fl": ("--nosysdirs",), "act": "R",
"p2": "CMD_OPT", "p3": False
},
"hostenv": {
"fl": ("--hostenv",), "act": "R",
"p2": "CMD_OPT", "p3": False
},
"bindhome": {
"fl": ("--bindhome",), "act": "R",
"p2": "CMD_OPT", "p3": False
},
"nometa": {
"fl": ("--nometa",), "act": "R",
"p2": "CMD_OPT", "p3": False
},
"dri": {
"fl": ("--dri",), "act": "R",
"p2": "CMD_OPT", "p3": False
},
"cmd": {
"fl": ("P+",), "act": "R",
"p2": "CMD_OPT", "p3": False
},
"volfrom": {
"fl": ("--volumes-from=",), "act": "R",
"p2": "CMD_OPT", "p3": False
},
"dns": {
"fl": ("--dns=",), "act": "R",
"p2": "CMD_OPT", "p3": False
},
"dnssearch": {
"fl": ("--dns-search=",), "act": "R",
"p2": "CMD_OPT", "p3": False
},
"kernel": {
"fl": ("--kernel=",), "act": "R",
"p2": "CMD_OPT", "p3": False
},
"devices": {
"fl": ("--device=",), "act": "E",
"p2": "CMD_OPT", "p3": True
}
}
for option, cmdp_args in cmd_options.iteritems():
last_value = None
for cmdp_fl in cmdp_args["fl"]:
option_value = cmdp.get(cmdp_fl,
cmdp_args["p2"], cmdp_args["p3"])
if not exec_engine:
continue
if cmdp_args["act"] == "R": # action is replace
if option_value or last_value is None:
exec_engine.opt[option] = option_value
elif cmdp_args["act"] == "E": # action is extend
exec_engine.opt[option].extend(option_value)
last_value = option_value
def do_run(self, cmdp):
"""
run: execute a container
run [options] <container-id-or-name>
run [options] <repo/image:tag>
--rm :delete container upon exit
--workdir=/home/userXX :working directory set to /home/userXX
--user=userXX :run as userXX
--user=root :run as root
--volume=/data:/mnt :mount host directory /data in /mnt
--novol=/proc :remove /proc from list of volumes to mount
--env="MYTAG=xxx" :set environment variable
--hostauth :bind the host /etc/passwd /etc/group ...
--nosysdirs :do not bind the host /proc /sys /run /dev
--nometa :ignore container metadata
--dri :bind directories relevant for dri graphics
--hostenv :pass the host environment to the container
--cpuset-cpus=<1,2,3-4> | |
a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_contact_info(email, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str email: Email (urlencoded) of the contact OR its SMS attribute value (required)
:return: GetExtendedContactDetails
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_contact_info_with_http_info(email, **kwargs) # noqa: E501
else:
(data) = self.get_contact_info_with_http_info(email, **kwargs) # noqa: E501
return data
def get_contact_info_with_http_info(self, email, **kwargs): # noqa: E501
"""Get a contact's details # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_contact_info_with_http_info(email, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str email: Email (urlencoded) of the contact OR its SMS attribute value (required)
:return: GetExtendedContactDetails
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['email'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_contact_info" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'email' is set
if ('email' not in params or
params['email'] is None):
raise ValueError("Missing the required parameter `email` when calling `get_contact_info`") # noqa: E501
collection_formats = {}
path_params = {}
if 'email' in params:
path_params['email'] = params['email'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api-key', 'partner-key'] # noqa: E501
return self.api_client.call_api(
'/contacts/{email}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GetExtendedContactDetails', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_contact_stats(self, email, **kwargs): # noqa: E501
"""Get email campaigns' statistics for a contact # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_contact_stats(email, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str email: Email address (urlencoded) of the contact (required)
:param date start_date: Mandatory if endDate is used. Starting date (YYYY-MM-DD) of the statistic events specific to campaigns. Must be lower than equal to endDate
:param date end_date: Mandatory if startDate is used. Ending date (YYYY-MM-DD) of the statistic events specific to campaigns. Must be greater than equal to startDate
:return: GetContactCampaignStats
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_contact_stats_with_http_info(email, **kwargs) # noqa: E501
else:
(data) = self.get_contact_stats_with_http_info(email, **kwargs) # noqa: E501
return data
def get_contact_stats_with_http_info(self, email, **kwargs): # noqa: E501
"""Get email campaigns' statistics for a contact # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_contact_stats_with_http_info(email, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str email: Email address (urlencoded) of the contact (required)
:param date start_date: Mandatory if endDate is used. Starting date (YYYY-MM-DD) of the statistic events specific to campaigns. Must be lower than equal to endDate
:param date end_date: Mandatory if startDate is used. Ending date (YYYY-MM-DD) of the statistic events specific to campaigns. Must be greater than equal to startDate
:return: GetContactCampaignStats
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['email', 'start_date', 'end_date'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_contact_stats" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'email' is set
if ('email' not in params or
params['email'] is None):
raise ValueError("Missing the required parameter `email` when calling `get_contact_stats`") # noqa: E501
collection_formats = {}
path_params = {}
if 'email' in params:
path_params['email'] = params['email'] # noqa: E501
query_params = []
if 'start_date' in params:
query_params.append(('startDate', params['start_date'])) # noqa: E501
if 'end_date' in params:
query_params.append(('endDate', params['end_date'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api-key', 'partner-key'] # noqa: E501
return self.api_client.call_api(
'/contacts/{email}/campaignStats', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GetContactCampaignStats', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_contacts(self, **kwargs): # noqa: E501
"""Get all the contacts # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_contacts(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int limit: Number of documents per page
:param int offset: Index of the first document of the page
:param datetime modified_since: Filter (urlencoded) the contacts modified after a given UTC date-time (YYYY-MM-DDTHH:mm:ss.SSSZ). Prefer to pass your timezone in date-time format for accurate result.
:return: GetContacts
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_contacts_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_contacts_with_http_info(**kwargs) # noqa: E501
return data
def get_contacts_with_http_info(self, **kwargs): # noqa: E501
"""Get all the contacts # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_contacts_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int limit: Number of documents per page
:param int offset: Index of the first document of the page
:param datetime modified_since: Filter (urlencoded) the contacts modified after a given UTC date-time (YYYY-MM-DDTHH:mm:ss.SSSZ). Prefer to pass your timezone in date-time format for accurate result.
:return: GetContacts
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['limit', 'offset', 'modified_since'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_contacts" % key
)
params[key] = val
del params['kwargs']
if 'limit' in params and params['limit'] > 1000: # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `get_contacts`, must be a value less than or equal to `1000`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'modified_since' in params:
query_params.append(('modifiedSince', params['modified_since'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api-key', 'partner-key'] # noqa: E501
return self.api_client.call_api(
'/contacts', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GetContacts', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_contacts_from_list(self, list_id, **kwargs): # noqa: E501
"""Get contacts in a list # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_contacts_from_list(list_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int list_id: Id of the list (required)
:param datetime modified_since: Filter (urlencoded) the contacts modified after a given UTC date-time (YYYY-MM-DDTHH:mm:ss.SSSZ). Prefer to pass your timezone in date-time format for accurate result.
:param int limit: Number of documents per page
:param int offset: Index of the first document of the page
:return: GetContacts
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_contacts_from_list_with_http_info(list_id, **kwargs) # noqa: E501
else:
(data) = self.get_contacts_from_list_with_http_info(list_id, **kwargs) # noqa: E501
return data
def get_contacts_from_list_with_http_info(self, list_id, **kwargs): # | |
Write register
self._set_bank(0)
return self._i2c.write_byte_data(self.address, self._AGB0_REG_PWR_MGMT_1, register)
def low_power(self, on):
"""
Sets the ICM20948 module in or out of low power mode
:return: Returns true if the power mode setting write was successful, otherwise False.
:rtype: bool
"""
# Read the Power Management Register, store in local variable "register"
self._set_bank(0)
register = self._i2c.read_byte_data(self.address, self._AGB0_REG_PWR_MGMT_1)
# Set/clear the low power mode bit [5] as needed
if on:
register |= (1<<5) # set bit
else:
register &= ~(1<<5) # clear bit
# Write register
self._set_bank(0)
return self._i2c.write_byte_data(self.address, self._AGB0_REG_PWR_MGMT_1, register)
def set_sample_mode(self, sensors, mode):
"""
Sets the sample mode of the ICM90248 module
:param sensors: byte representing the selected sensors (accelerometer, gyroscope, magnetometer)
:param mode: the mode in which the sensors are to be sampled. Two modes are available: continuos or cycled
:return: Returns true if the sample mode setting write was successful, otherwise False.
:rtype: bool
"""
# check for valid sensor ID from user of this function
if ((sensors & (self._ICM_20948_Internal_Acc | self._ICM_20948_Internal_Gyr | self._ICM_20948_Internal_Mst)) == False):
print("Invalid Sensor ID")
return False
# Read the LP CONFIG Register, store in local variable "register"
self._set_bank(0)
register = self._i2c.read_byte_data(self.address, self._AGB0_REG_LP_CONFIG)
if (sensors & self._ICM_20948_Internal_Acc):
# Set/clear the sensor specific sample mode bit as needed
if mode == self._ICM_20948_Sample_Mode_Cycled:
register |= (1<<5) # set bit
elif mode == self._ICM_20948_Sample_Mode_Continuous:
register &= ~(1<<5) # clear bit
if (sensors & self._ICM_20948_Internal_Gyr):
# Set/clear the sensor specific sample mode bit as needed
if mode == self._ICM_20948_Sample_Mode_Cycled:
register |= (1<<4) # set bit
elif mode == self._ICM_20948_Sample_Mode_Continuous:
register &= ~(1<<4) # clear bit
if (sensors & self._ICM_20948_Internal_Mst):
# Set/clear the sensor specific sample mode bit as needed
if mode == self._ICM_20948_Sample_Mode_Cycled:
register |= (1<<6) # set bit
elif mode == self._ICM_20948_Sample_Mode_Continuous:
register &= ~(1<<6) # clear bit
# Write register
self._set_bank(0)
return self._i2c.write_byte_data(self.address, self._AGB0_REG_LP_CONFIG, register)
def set_full_scale_range_accel(self):
"""
Sets the full scale range for the accel in the ICM20948 module
:return: Returns true if the full scale range setting write was successful, otherwise False.
:rtype: bool
"""
# Read the Accel Config Register, store in local variable "register"
self._set_bank(2)
register = self._i2c.read_byte_data(self.address, self._AGB2_REG_ACCEL_CONFIG_1)
register &= ~(0b00000110) # clear bits 2:1 (0b0000.0XX0)
register |= (self._selected_accelerometer_sensitvity << 1) # place mode select into bits 2:1 of _AGB2_REG_ACCEL_CONFIG
# Write register
self._set_bank(2)
return self._i2c.write_byte_data(self.address, self._AGB2_REG_ACCEL_CONFIG_1, register)
def set_full_scale_range_gyro(self):
"""
Sets the full scale range for the gyro in the ICM20948 module
:return: Returns true if the full scale range setting write was successful, otherwise False.
:rtype: bool
"""
# Read the Gyro Config Register, store in local variable "register"
self._set_bank(2)
register = self._i2c.read_byte_data(self.address, self._AGB2_REG_GYRO_CONFIG_1)
register &= ~(0b00000110) # clear bits 2:1 (0b0000.0XX0)
register |= (self._selected_gyroscope_sensitvity << 1) # place mode select into bits 2:1 of _AGB2_REG_GYRO_CONFIG_1
# Write register
self._set_bank(2)
return self._i2c.write_byte_data(self.address, self._AGB2_REG_GYRO_CONFIG_1, register)
def get_ODR_gyro(self):
self._set_bank(2)
register = self._i2c.read_byte_data(self.address, self._AGB2_REG_GYRO_SMPLRT_DIV)
return register
def set_ODR_gyro(self, rate=4):
self._set_bank(2)
register = self._i2c.read_byte_data(self.address, self._AGB2_REG_GYRO_SMPLRT_DIV)
# clear register
register &= ~(0b11111111)
register |= (rate << 0)
ret = self._i2c.write_byte_data(self.address, self._AGB2_REG_GYRO_SMPLRT_DIV, register)
logging.debug(f'write function returned {ret}')
logging.debug(f'current output divisor {self.get_ODR_gyro()}')
def set_DLPF_cfg_accel(self, dlpcfg):
"""
Sets the digital low pass filter for the accel in the ICM20948 module
:return: Returns true if the dlp setting write was successful, otherwise False.
:rtype: bool
"""
# Read the Accel Config Register, store in local variable "register"
self._set_bank(2)
register = self._i2c.read_byte_data(self.address, self._AGB2_REG_ACCEL_CONFIG_1)
logging.debug(f'current low pass filer for acceleration is: {register}')
register &= ~(0b00111000) # clear bits 5:3 (0b00XX.X000)
register |= (dlpcfg << 3) # place dlpcfg select into bits 5:3 of _AGB2_REG_ACCEL_CONFIG_1
# Write register
self._set_bank(2)
return self._i2c.write_byte_data(self.address, self._AGB2_REG_ACCEL_CONFIG_1, register)
def set_DLPF_cfg_gyro(self, dlpcfg):
"""
Sets the digital low pass filter for the gyro in the ICM20948 module
:return: Returns true if the dlp setting write was successful, otherwise False.
:rtype: bool
"""
# Read the gyro Config Register, store in local variable "register"
self._set_bank(2)
register = self._i2c.read_byte_data(self.address, self._AGB2_REG_GYRO_CONFIG_1)
logging.debug(f'current low pass filer for gyroscope is: {register}')
register &= ~(0b00111000) # clear bits 5:3 (0b00XX.X000)
register |= (dlpcfg << 3) # place dlpcfg select into bits 5:3 of _AGB2_REG_GYRO_CONFIG_1
# Write register
self._set_bank(2)
return self._i2c.write_byte_data(self.address, self._AGB2_REG_GYRO_CONFIG_1, register)
def enable_DLPF_accel(self, on):
"""
Enables or disables the accelerometer DLPF of the ICM90248 module
:return: Returns true if the DLPF mode setting write was successful, otherwise False.
:rtype: bool
"""
# Read the _AGB2_REG_ACCEL_CONFIG_1, store in local variable "register"
self._set_bank(2)
register = self._i2c.read_byte_data(self.address, self._AGB2_REG_ACCEL_CONFIG_1)
# Set/clear the ACCEL_FCHOICE bit [0] as needed
if on:
register |= (1<<0) # set bit
else:
register &= ~(1<<0) # clear bit
# Write register
self._set_bank(2)
return self._i2c.write_byte_data(self.address, self._AGB2_REG_ACCEL_CONFIG_1, register)
def enable_DLPF_gyro(self, on):
"""
Enables or disables the Gyro DLPF of the ICM90248 module
:return: Returns true if the DLPF mode setting write was successful, otherwise False.
:rtype: bool
"""
# Read the _AGB2_REG_GYRO_CONFIG_1, store in local variable "register"
self._set_bank(2)
register = self._i2c.read_byte_data(self.address, self._AGB2_REG_GYRO_CONFIG_1)
# Set/clear the GYRO_FCHOICE bit [0] as needed
if on:
register |= (1<<0) # set bit
else:
register &= ~(1<<0) # clear bit
# Write register
self._set_bank(2)
return self._i2c.write_byte_data(self.address, self._AGB2_REG_GYRO_CONFIG_1, register)
def data_ready(self):
"""
Returns status of RAW_DATA_0_RDY_INT the ICM90248 module
:return: Returns true if raw data is ready, otherwise False.
:rtype: bool
"""
# Read the _AGB0_REG_INT_STATUS_1, store in local variable "register"
self._set_bank(0)
register = self._i2c.read_byte_data(self.address, self._AGB0_REG_INT_STATUS_1)
# check bit [0]
if (register & (1<<0)):
return True
else:
return False
def to_signed_int(self, input):
"""
Takes an input data of 16 bits, and returns the signed 32 bit int version of this data
this is necessary, because python does not overflow
:return: Signed 32 bit integer
:rtype: int
"""
if input > 32767:
input -= 65536
return input
def i2c_master_passthrough(self, passthrough):
"""
Enables or disables I2C Master Passthrough
:return: Returns true if the setting write was successful, otherwise False.
:rtype: bool
"""
# Read the _AGB0_REG_INT_PIN_CONFIG, store in local variable "register"
self._set_bank(0)
register = self._i2c.read_byte_data(self.address, self._AGB0_REG_INT_PIN_CONFIG)
# Set/clear the BYPASS_EN bit [1] as needed
if passthrough:
register |= (1<<1) # set bit
else:
register &= ~(1<<1) # clear bit
# Write register
self._set_bank(0)
return self._i2c.write_byte_data(self.address, self._AGB0_REG_INT_PIN_CONFIG, register)
def i2c_master_enable(self, enable):
"""
Enables or disables I2C Master
:return: Returns true if the setting write was successful, otherwise False.
:rtype: bool
"""
self.i2c_master_passthrough(False) # Disable BYPASS_EN
# Setup Master Clock speed as 345.6 kHz, and NSP (aka next slave read) to "stop between reads"
# Read the _AGB3_REG_I2C_MST_CTRL, store in local variable "register"
self._set_bank(3)
register = self._i2c.read_byte_data(self.address, self._AGB3_REG_I2C_MST_CTRL)
register &= ~(0x0F) # clear bits for master clock [3:0]
register |= (0x07) # set bits for master clock [3:0], 0x07 corresponds to 345.6 kHz, good for up to 400 kHz
register |= (1<<4) # set bit [4] for NSR (next slave read). 0 = restart between reads. 1 = stop between reads.
# Write register
self._set_bank(3)
self._i2c.write_byte_data(self.address, self._AGB3_REG_I2C_MST_CTRL, register)
# enable/disable Master I2C
# Read the _AGB0_REG_USER_CTRL, store in local variable "register"
self._set_bank(0)
register = self._i2c.read_byte_data(self.address, self._AGB0_REG_USER_CTRL)
# Set/clear the I2C_MST_EN bit [5] as needed
if enable:
register |= (1<<5) # set bit
else:
register &= ~(1<<5) # clear bit
# Write register
self._set_bank(0)
return self._i2c.write_byte_data(self.address, self._AGB0_REG_USER_CTRL, register)
def i2c_master_slv4_txn(self, addr, reg, data, rw, send_reg_addr):
# Used to configure a device before it is setup into a normal 0-3 slave slot
# Transact directly with an I2C device, one byte at a time
# Thanks MikeFair! // https://github.com/kriswiner/MPU9250/issues/86
if rw:
addr |= 0x80
self._set_bank(3)
self._i2c.write_byte_data(self.address, self._AGB3_REG_I2C_SLV4_ADDR, addr)
self._set_bank(3)
self._i2c.write_byte_data(self.address, self._AGB3_REG_I2C_SLV4_REG, reg)
ctrl_register_slv4 = 0x00
ctrl_register_slv4 |= (1<<7) # EN bit [7] (set)
ctrl_register_slv4 &= ~(1<<6) # INT_EN bit [6] (cleared)
ctrl_register_slv4 &= ~(0x0F) # DLY bits [4:0] (cleared = 0)
if(send_reg_addr):
ctrl_register_slv4 &= ~(1<<5) # REG_DIS bit [5] (cleared)
else:
ctrl_register_slv4 |= (1<<5) # REG_DIS bit [5] (set)
txn_failed = False
if (rw == False):
self._set_bank(3)
self._i2c.write_byte_data(self.address, self._AGB3_REG_I2C_SLV4_DO, data)
# Kick | |
<gh_stars>1-10
# -*- coding: utf-8 -*-
import json
import math
import os
import sys
import time
from dynet import *
from optparse import OptionParser
from evaluation import *
from discrete_argid_feats import ArgPosition, OutHeads, SpanWidth
from raw_data import make_data_instance
from semafor_evaluation import convert_conll_to_frame_elements
optpr = OptionParser()
optpr.add_option("--testf", dest="test_conll", help="Annotated CoNLL test file", metavar="FILE", default=TEST_CONLL)
optpr.add_option("--mode", dest="mode", type="choice", choices=["train", "test", "refresh", "ensemble", "predict"],
default="train")
optpr.add_option("--saveensemble", action="store_true", default=False)
optpr.add_option("-n", "--model_name", help="Name of model directory to save model to.")
optpr.add_option("--exemplar", action="store_true", default=False)
optpr.add_option("--spanlen", type="choice", choices=["clip", "filter"], default="clip")
optpr.add_option("--loss", type="choice", choices=["log", "softmaxm", "hinge"], default="softmaxm")
optpr.add_option("--cost", type="choice", choices=["hamming", "recall"], default="recall")
optpr.add_option("--roc", type="int", default=2)
optpr.add_option("--hier", action="store_true", default=False)
optpr.add_option("--syn", type="choice", choices=["dep", "constit", "none"], default="none")
optpr.add_option("--ptb", action="store_true", default=False)
optpr.add_option("--raw_input", type="str", metavar="FILE")
optpr.add_option("--config", type="str", metavar="FILE")
(options, args) = optpr.parse_args()
model_dir = "logs/{}/".format(options.model_name)
model_file_name = "{}best-argid-{}-model".format(model_dir, VERSION)
if not os.path.exists(model_dir):
os.makedirs(model_dir)
if options.exemplar:
train_conll = TRAIN_EXEMPLAR
# TODO(Swabha): Still don"t have exemplar constituent parses.
else:
train_conll = TRAIN_FTE
train_constits = TRAIN_FTE_CONSTITS
USE_SPAN_CLIP = (options.spanlen == "clip")
USE_DROPOUT = True
if options.mode in ["test", "predict"]:
USE_DROPOUT = False
USE_WV = True
USE_HIER = options.hier
USE_DEPS = USE_CONSTITS = False
if options.syn == "dep":
USE_DEPS = True
elif options.syn == "constit":
USE_CONSTITS = True
USE_PTB_CONSTITS = options.ptb
SAVE_FOR_ENSEMBLE = (options.mode == "test") and options.saveensemble
RECALL_ORIENTED_COST = options.roc
sys.stderr.write("_____________________\n")
sys.stderr.write("COMMAND: {}\n".format(" ".join(sys.argv)))
if options.mode in ["train", "refresh"]:
sys.stderr.write("VALIDATED MODEL SAVED TO:\t{}\n".format(model_file_name))
else:
sys.stderr.write("MODEL FOR TEST / PREDICTION:\t{}\n".format(model_file_name))
sys.stderr.write("SAVING ENSEMBLE?\t{}\n".format(SAVE_FOR_ENSEMBLE))
sys.stderr.write("PARSING MODE:\t{}\n".format(options.mode))
sys.stderr.write("_____________________\n\n")
if USE_PTB_CONSTITS:
ptbexamples = read_ptb()
trainexamples, _, _ = read_conll(train_conll, options.syn)
post_train_lock_dicts()
frmfemap, corefrmfemap, _ = read_frame_maps()
# Hack to handle FE in version 1.5 annotation!
frmfemap[FRAMEDICT.getid("Measurable_attributes")].append(FEDICT.getid("Dimension"))
frmfemap[FRAMEDICT.getid("Removing")].append(FEDICT.getid("Frequency"))
if USE_WV:
wvs = get_wvec_map()
PRETDIM = len(wvs.values()[0])
if USE_HIER:
frmrelmap, feparents = read_frame_relations()
lock_dicts()
# Default labels - in CoNLL format these correspond to _
UNKTOKEN = VOCDICT.getid(UNK)
NOTANLU = LUDICT.getid(EMPTY_LABEL)
NOTANFEID = FEDICT.getid(EMPTY_FE) # O in CoNLL format.
if options.mode in ["train", "refresh"]:
devexamples, _, _ = read_conll(DEV_CONLL, options.syn)
out_conll_file = "{}predicted-{}-argid-dev.conll".format(model_dir, VERSION)
elif options.mode in ["test", "ensemble"]:
devexamples, _, _ = read_conll(options.test_conll, options.syn)
out_conll_file = "{}predicted-{}-argid-test.conll".format(model_dir, VERSION)
fe_file = "{}predicted-{}-argid-test.fes".format(model_dir, VERSION)
if SAVE_FOR_ENSEMBLE:
out_ens_file = "{}ensemble.{}".format(model_dir, out_conll_file.split("/")[-1][:-11])
if options.mode == "ensemble":
in_ens_file = "{}full-ensemble-{}".format(model_dir, out_conll_file.split("/")[-1][:-11])
elif options.mode == "predict":
assert options.raw_input is not None
instances, _, _ = read_conll(options.raw_input)
out_conll_file = "{}predicted-args.conll".format(model_dir)
else:
raise Exception("Invalid parser mode", options.mode)
# Default configurations.
configuration = {"train": train_conll,
"use_exemplar": options.exemplar,
"use_hierarchy": USE_HIER,
"use_span_clip": USE_SPAN_CLIP,
"allowed_max_span_length": 20,
"using_dependency_parses": USE_DEPS,
"using_constituency_parses": USE_CONSTITS,
"using_scaffold_loss": USE_PTB_CONSTITS,
"loss_type": options.loss,
"cost_type": options.cost,
"recall_oriented_cost": RECALL_ORIENTED_COST,
"unk_prob": 0.1,
"dropout_rate": 0.01,
"token_dim": 60,
"pos_dim": 4,
"lu_dim": 64,
"lu_pos_dim": 2,
"frame_dim": 100,
"fe_dim": 50,
"phrase_dim": 16,
"path_lstm_dim": 64,
"path_dim": 64,
"dependency_relation_dim": 8,
"lstm_input_dim": 64,
"lstm_dim": 64,
"lstm_depth": 1,
"hidden_dim": 64,
"use_dropout": USE_DROPOUT,
"pretrained_embedding_dim": PRETDIM,
"num_epochs": 10 if not options.exemplar else 25,
"patience": 3,
"eval_after_every_epochs": 100,
"dev_eval_epoch_frequency": 5}
configuration_file = os.path.join(model_dir, "configuration.json")
if options.mode == "train":
if options.config:
config_json = open(options.config, "r")
configuration = json.load(config_json)
with open(configuration_file, "w") as fout:
fout.write(json.dumps(configuration))
fout.close()
else:
json_file = open(configuration_file, "r")
configuration = json.load(json_file)
UNK_PROB = configuration["unk_prob"]
DROPOUT_RATE = configuration["dropout_rate"]
ALLOWED_SPANLEN = configuration["allowed_max_span_length"]
TOKDIM = configuration["token_dim"]
POSDIM = configuration["pos_dim"]
LUDIM = configuration["lu_dim"]
LUPOSDIM = configuration["lu_pos_dim"]
FRMDIM = configuration["frame_dim"]
FEDIM = configuration["fe_dim"]
INPDIM = TOKDIM + POSDIM + 1
PATHLSTMDIM = configuration["path_lstm_dim"]
PATHDIM = configuration["path_dim"]
if USE_CONSTITS:
PHRASEDIM = configuration["phrase_dim"]
LSTMINPDIM = configuration["lstm_input_dim"]
LSTMDIM = configuration["lstm_dim"]
LSTMDEPTH = configuration["lstm_depth"]
HIDDENDIM = configuration["hidden_dim"]
ARGPOSDIM = ArgPosition.size()
SPANDIM = SpanWidth.size()
ALL_FEATS_DIM = 2 * LSTMDIM \
+ LUDIM \
+ LUPOSDIM \
+ FRMDIM \
+ LSTMINPDIM \
+ LSTMDIM \
+ FEDIM \
+ ARGPOSDIM \
+ SPANDIM \
+ 2 # spanlen and log spanlen features and is a constitspan
if USE_DEPS:
DEPHEADDIM = LSTMINPDIM + POSDIM
DEPRELDIM = configuration["dependency_relation_dim"]
OUTHEADDIM = OutHeads.size()
PATHLSTMINPDIM = DEPHEADDIM + DEPRELDIM
ALL_FEATS_DIM += OUTHEADDIM + PATHDIM
if USE_CONSTITS:
ALL_FEATS_DIM += 1 + PHRASEDIM # is a constit and what is it
ALL_FEATS_DIM += PATHDIM
NUMEPOCHS = configuration["num_epochs"]
PATIENCE = configuration["patience"]
LOSS_EVAL_EPOCH = configuration["eval_after_every_epochs"]
DEV_EVAL_EPOCHS = configuration["dev_eval_epoch_frequency"] * LOSS_EVAL_EPOCH
trainexamples = filter_long_ex(trainexamples, USE_SPAN_CLIP, ALLOWED_SPANLEN, NOTANFEID)
sys.stderr.write("\nPARSER SETTINGS (see {})\n_____________________\n".format(configuration_file))
for key in sorted(configuration):
sys.stderr.write("{}:\t{}\n".format(key.upper(), configuration[key]))
sys.stderr.write("\n")
def print_data_status(fsp_dict, vocab_str):
sys.stderr.write("# {} = {}\n\tUnseen in dev/test = {}\n\tUnlearnt in dev/test = {}\n".format(
vocab_str, fsp_dict.size(), fsp_dict.num_unks()[0], fsp_dict.num_unks()[1]))
print_data_status(VOCDICT, "Tokens")
print_data_status(POSDICT, "POS tags")
print_data_status(LUDICT, "LUs")
print_data_status(LUPOSDICT, "LU POS tags")
print_data_status(FRAMEDICT, "Frames")
print_data_status(FEDICT, "FEs")
print_data_status(CLABELDICT, "Constit Labels")
print_data_status(DEPRELDICT, "Dependency Relations")
sys.stderr.write("\n_____________________\n\n")
model = Model()
adam = AdamTrainer(model, 0.0005, 0.01, 0.9999, 1e-8)
v_x = model.add_lookup_parameters((VOCDICT.size(), TOKDIM))
p_x = model.add_lookup_parameters((POSDICT.size(), POSDIM))
lu_x = model.add_lookup_parameters((LUDICT.size(), LUDIM))
lp_x = model.add_lookup_parameters((LUPOSDICT.size(), LUPOSDIM))
frm_x = model.add_lookup_parameters((FRAMEDICT.size(), FRMDIM))
ap_x = model.add_lookup_parameters((ArgPosition.size(), ARGPOSDIM))
sp_x = model.add_lookup_parameters((SpanWidth.size(), SPANDIM))
if USE_DEPS:
dr_x = model.add_lookup_parameters((DEPRELDICT.size(), DEPRELDIM))
oh_s = model.add_lookup_parameters((OutHeads.size(), OUTHEADDIM))
if USE_CONSTITS:
ct_x = model.add_lookup_parameters((CLABELDICT.size(), PHRASEDIM))
fe_x = model.add_lookup_parameters((FEDICT.size(), FEDIM))
if USE_WV:
e_x = model.add_lookup_parameters((VOCDICT.size(), PRETDIM))
for wordid in wvs:
e_x.init_row(wordid, wvs[wordid])
w_e = model.add_parameters((LSTMINPDIM, PRETDIM))
b_e = model.add_parameters((LSTMINPDIM, 1))
w_i = model.add_parameters((LSTMINPDIM, INPDIM))
b_i = model.add_parameters((LSTMINPDIM, 1))
builders = [
LSTMBuilder(LSTMDEPTH, LSTMINPDIM, LSTMDIM, model),
LSTMBuilder(LSTMDEPTH, LSTMINPDIM, LSTMDIM, model),
]
basefwdlstm = LSTMBuilder(LSTMDEPTH, LSTMINPDIM, LSTMINPDIM, model)
baserevlstm = LSTMBuilder(LSTMDEPTH, LSTMINPDIM, LSTMINPDIM, model)
w_bi = model.add_parameters((LSTMINPDIM, 2 * LSTMINPDIM))
b_bi = model.add_parameters((LSTMINPDIM, 1))
tgtlstm = LSTMBuilder(LSTMDEPTH, LSTMINPDIM, LSTMDIM, model)
ctxtlstm = LSTMBuilder(LSTMDEPTH, LSTMINPDIM, LSTMDIM, model)
if USE_DEPS:
w_di = model.add_parameters((LSTMINPDIM, LSTMINPDIM + DEPHEADDIM + DEPRELDIM))
b_di = model.add_parameters((LSTMINPDIM, 1))
pathfwdlstm = LSTMBuilder(LSTMDEPTH, LSTMINPDIM, PATHLSTMDIM, model)
pathrevlstm = LSTMBuilder(LSTMDEPTH, LSTMINPDIM, PATHLSTMDIM, model)
w_p = model.add_parameters((PATHDIM, 2 * PATHLSTMDIM))
b_p = model.add_parameters((PATHDIM, 1))
elif USE_CONSTITS:
cpathfwdlstm = LSTMBuilder(LSTMDEPTH, PHRASEDIM, PATHLSTMDIM, model)
cpathrevlstm = LSTMBuilder(LSTMDEPTH, PHRASEDIM, PATHLSTMDIM, model)
w_cp = model.add_parameters((PATHDIM, 2 * PATHLSTMDIM))
b_cp = model.add_parameters((PATHDIM, 1))
w_z = model.add_parameters((HIDDENDIM, ALL_FEATS_DIM))
b_z = model.add_parameters((HIDDENDIM, 1))
w_f = model.add_parameters((1, HIDDENDIM))
b_f = model.add_parameters((1, 1))
if USE_PTB_CONSTITS:
w_c = model.add_parameters((2, LSTMDIM))
b_c = model.add_parameters((2, 1))
w_fb = model.add_parameters((LSTMDIM, 2 * LSTMDIM))
b_fb = model.add_parameters((LSTMDIM, 1))
DELTA = len(trainexamples) * 1.0 / len(ptbexamples)
sys.stderr.write("weighing PTB down by %f\n" % DELTA)
def get_base_embeddings(trainmode, unkdtokens, tg_start, sentence):
sentlen = len(unkdtokens)
if trainmode:
emb_x = [noise(v_x[tok], 0.1) for tok in unkdtokens]
else:
emb_x = [v_x[tok] for tok in unkdtokens]
pos_x = [p_x[pos] for pos in sentence.postags]
dist_x = [scalarInput(i - tg_start + 1) for i in xrange(sentlen)]
baseinp_x = [(w_i * concatenate([emb_x[j], pos_x[j], dist_x[j]]) + b_i) for j in xrange(sentlen)]
if USE_WV:
for j in xrange(sentlen):
if unkdtokens[j] in wvs:
nonupdatedwv = nobackprop(e_x[unkdtokens[j]])
baseinp_x[j] = baseinp_x[j] + w_e * nonupdatedwv + b_e
embposdist_x = [rectify(baseinp_x[j]) for j in xrange(sentlen)]
if USE_DROPOUT:
basefwdlstm.set_dropout(DROPOUT_RATE)
baserevlstm.set_dropout(DROPOUT_RATE)
bfinit = basefwdlstm.initial_state()
basefwd = bfinit.transduce(embposdist_x)
brinit = baserevlstm.initial_state()
baserev = brinit.transduce(reversed(embposdist_x))
basebi_x = [rectify(w_bi * concatenate([basefwd[eidx], baserev[sentlen - eidx - 1]]) +
b_bi) for eidx in xrange(sentlen)]
if USE_DEPS:
dhead_x = [embposdist_x[dephead] for dephead in sentence.depheads]
dheadp_x = [pos_x[dephead] for dephead in sentence.depheads]
drel_x = [dr_x[deprel] for deprel in sentence.deprels]
baseinp_x = [rectify(w_di * concatenate([dhead_x[j], dheadp_x[j], drel_x[j], basebi_x[j]]) +
b_di) for j in xrange(sentlen)]
basebi_x = baseinp_x
return basebi_x
def get_target_frame_embeddings(embposdist_x, tfdict):
tfkeys = sorted(tfdict)
tg_start = tfkeys[0]
sentlen = len(embposdist_x)
# Adding target word feature
lu, frame = tfdict[tg_start]
tginit = tgtlstm.initial_state()
target_x = tginit.transduce(embposdist_x[tg_start: tg_start + len(tfkeys) + 1])[-1]
# Adding context features
ctxt = range(tg_start - 1, tfkeys[-1] + 2)
if ctxt[0] < 0: ctxt = ctxt[1:]
if ctxt[-1] > sentlen: ctxt = ctxt[:-1]
c_init = ctxtlstm.initial_state()
ctxt_x = c_init.transduce(embposdist_x[ctxt[0]:ctxt[-1]])[-1]
# Adding features specific to LU and frame
lu_v = lu_x[lu.id]
lp_v = lp_x[lu.posid]
if USE_HIER and frame.id in frmrelmap:
frame_v = esum([frm_x[frame.id]] + [frm_x[par] for par in frmrelmap[frame.id]])
else:
frame_v = frm_x[frame.id]
tfemb = concatenate([lu_v, lp_v, frame_v, target_x, ctxt_x])
return tfemb, frame
def get_span_embeddings(embpos_x):
sentlen = len(embpos_x)
fws = [[None for _ in xrange(sentlen)] for _ in xrange(sentlen)]
bws = [[None for _ in xrange(sentlen)] for _ in xrange(sentlen)]
if USE_DROPOUT:
builders[0].set_dropout(DROPOUT_RATE)
builders[1].set_dropout(DROPOUT_RATE)
for i in xrange(sentlen):
fw_init = builders[0].initial_state()
tmpfws = fw_init.transduce(embpos_x[i:])
if len(tmpfws) != sentlen - i:
raise Exception("incorrect number of forwards", len(tmpfws), i, sentlen)
spanend = sentlen
if USE_SPAN_CLIP: spanend = min(sentlen, i + ALLOWED_SPANLEN + 1)
for j in xrange(i, spanend):
# for j in xrange(i, sentlen):
fws[i][j] = tmpfws[j - i]
bw_init = builders[1].initial_state()
tmpbws = bw_init.transduce(reversed(embpos_x[:i + 1]))
if len(tmpbws) != i + 1:
raise Exception("incorrect number of backwards", i, len(tmpbws))
spansize = i + 1
if USE_SPAN_CLIP and spansize - 1 > ALLOWED_SPANLEN:
spansize = ALLOWED_SPANLEN + 1
for k in xrange(spansize):
bws[i - k][i] = tmpbws[k]
return fws, bws
def get_deppath_embeddings(sentence, embpos_x):
spaths = {}
for spath in set(sentence.shortest_paths.values()):
shp = [embpos_x[node] for node in spath]
if USE_DROPOUT:
pathfwdlstm.set_dropout(DROPOUT_RATE)
pathrevlstm.set_dropout(DROPOUT_RATE)
pfinit = pathfwdlstm.initial_state()
pathfwd = pfinit.transduce(shp)
prinit = pathrevlstm.initial_state()
pathrev = prinit.transduce(reversed(shp))
pathlstm = rectify(w_p * concatenate([pathfwd[-1], pathrev[-1]]) + b_p)
spaths[spath] = pathlstm
return spaths
def get_cpath_embeddings(sentence):
phrpaths = {}
for phrpath in set(sentence.cpaths.values()):
shp = [ct_x[node] for node in phrpath]
if USE_DROPOUT:
cpathfwdlstm.set_dropout(DROPOUT_RATE)
cpathrevlstm.set_dropout(DROPOUT_RATE)
cpfinit = cpathfwdlstm.initial_state()
cpathfwd = cpfinit.transduce(shp)
cprinit = cpathrevlstm.initial_state()
cpathrev = cprinit.transduce(reversed(shp))
cpathlstm = rectify(w_cp * concatenate([cpathfwd[-1], cpathrev[-1]]) + b_cp)
phrpaths[phrpath] = cpathlstm
return phrpaths
def | |
element in enumerate(sequence):
# default input, argument name is 'inputs', value specified below
if index == 0:
inputs = 'inputs.0'
else:
inputs = 'steps.{}.produce'.format(index - 1)
if isinstance(element, pipeline_module.Pipeline):
step = pipeline_module.SubpipelineStep(element.to_json_structure(nest_subpipelines=True))
step.add_input(inputs)
elif isinstance(element, dict):
primitive_description = element['primitive_class'].metadata.query()
step = pipeline_module.PrimitiveStep(primitive_description)
if 'INPUTS' in element:
for arg_name, value in element['INPUTS']:
value_str = 'steps.{}.produce'.format(value)
step.add_argument(arg_name, metadata_base.ArgumentType.CONTAINER, value_str)
else:
# if not specified, use default
step.add_argument('inputs', metadata_base.ArgumentType.CONTAINER, inputs)
if 'HYPERPARAMS' in element:
for hyperparam_name in element['HYPERPARAMS']:
hyperparam = element['HYPERPARAMS'][hyperparam_name]
step.add_hyperparameter(hyperparam_name, hyperparam['TYPE'], hyperparam['DATA'])
else:
raise exceptions.InvalidArgumentTypeError(
'Unknown type {} in parameter \'sequence\''.format(type(element)))
step.add_output('produce')
pipe.add_step(step)
pipe.add_output('steps.{}.produce'.format(len(sequence) - 1))
return pipe
def _get_inputs(self):
# TODO: Make tests use a real Dataset instead of a list. Pipeline runs are defined on standard pipelines.
input_data = container.List([1, 3, 4, 2, 5, 3], generate_metadata=True)
# First have to add dummy metadata to the list, which otherwise exist in the dataset.
input_data.metadata = input_data.metadata.update((), {
'id': '0000000000000000000000000000000000000000000000000000000000000000',
'digest': '0000000000000000000000000000000000000000000000000000000000000000'
})
inputs = [input_data]
return inputs
def _fit_pipeline(
self, pipeline, inputs, problem_description=None, context=metadata_base.Context.TESTING, return_values=None
):
r = runtime.Runtime(
pipeline, problem_description=problem_description, context=context,
environment=self.runtime_enviroment,
)
fit_result = r.fit(inputs, return_values=return_values)
self.assertTrue(fit_result.pipeline_run)
# We fake that inputs were added even if this is not a standard pipeline.
# TODO: Make tests not require this.
for input_dataset in inputs:
fit_result.pipeline_run.add_input_dataset(input_dataset)
return fit_result.pipeline_run
def _fit_and_produce_pipeline(
self, pipeline, inputs, problem_description = None, context = metadata_base.Context.TESTING
):
r = runtime.Runtime(
pipeline, problem_description=problem_description, context=context,
environment=self.runtime_enviroment,
)
fit_result = r.fit(inputs)
self.assertTrue(fit_result.pipeline_run)
self._fake_inputs(r, fit_result.pipeline_run, inputs)
self._check_pipelines_valid_and_succeeded([fit_result.pipeline_run])
produce_result = r.produce(inputs)
self.assertTrue(produce_result.pipeline_run)
self._fake_inputs(r, produce_result.pipeline_run, inputs)
self._check_pipelines_valid_and_succeeded([produce_result.pipeline_run])
return (fit_result.pipeline_run, produce_result.pipeline_run)
def _is_pipeline_run_successful(self, pipeline_run_json):
if pipeline_run_json['status']['state'] == metadata_base.PipelineRunStatusState.SUCCESS:
return True
elif pipeline_run_json['status']['state'] == metadata_base.PipelineRunStatusState.FAILURE:
return False
else:
self.fail('Pipeline-run document status state set to invalid value')
def _validate_pipeline_run_structure(self, json_structure):
try:
PIPELINE_RUN_SCHEMA_VALIDATOR.validate(json_structure)
_validate_pipeline_run_status_consistency(json_structure)
_validate_pipeline_run_timestamps(json_structure)
_validate_pipeline_run_random_seeds(json_structure)
except jsonschema.exceptions.ValidationError as error:
print('\n', error, '\n')
print("##### PRINTING RECURSIVE SUBERRORS #####\n")
self.print_recursive_suberrors(error, indent='\n')
self.fail("Pipeline_run document failed to validate against the schema")
def _invalidate_pipeline_run_structure(self, json_structure):
is_valid = False
try:
PIPELINE_RUN_SCHEMA_VALIDATOR.validate(json_structure)
is_valid = True
except jsonschema.exceptions.ValidationError as error:
pass
if is_valid:
self.fail("Pipeline_run document should not have validated against the schema")
def _check_pipelines_valid_and_succeeded(self, pipeline_runs):
for pipeline_run in pipeline_runs:
pipeline_run_json = pipeline_run.to_json_structure()
self._validate_pipeline_run_structure(pipeline_run_json)
self.assertTrue(self._is_pipeline_run_successful(pipeline_run_json), json.dumps(pipeline_run_json, indent=4))
def _check_pipelines_valid_and_failed(self, pipeline_runs):
for pipeline_run in pipeline_runs:
pipeline_run_json = pipeline_run.to_json_structure()
self._validate_pipeline_run_structure(pipeline_run_json)
self.assertFalse(self._is_pipeline_run_successful(pipeline_run_json))
def _check_pipelines_invalid(self, pipeline_runs):
for pipeline_run in pipeline_runs:
pipeline_run_json = pipeline_run.to_json_structure()
self._invalidate_pipeline_run_structure(pipeline_run_json)
def test_basic_pipeline_run(self):
inputs = self._get_inputs()
pipe = self._build_pipeline('1490432b-b48a-4a62-8977-5a56e52a3e85')
pipeline_runs = self._fit_and_produce_pipeline(pipe, inputs)
self._check_pipelines_valid_and_succeeded(pipeline_runs)
def test_pipeline_fit_with_return_values(self):
inputs = self._get_inputs()
pipe = self._build_pipeline('cf2e4f93-4b9a-4a49-9ab5-92927b3125df')
pipeline_runs = self._fit_pipeline(pipe, inputs, return_values=['steps.0.produce'])
self._check_pipelines_valid_and_succeeded([pipeline_runs])
def test_pipeline_run_failure(self):
inputs = self._get_inputs()
for hyperparam in ('__init__', 'set_training_data', 'fit', 'produce'):
failure_pipeline = self._build_pipeline('18e96ab3-e3c5-4b29-a446-3e81982eba9c', sequence=[{'primitive_class': RandomPrimitive},
{'primitive_class': FailPrimitive, 'HYPERPARAMS': {'method_to_fail': {'TYPE': metadata_base.ArgumentType.VALUE, 'DATA': hyperparam}}}])
fit_pipeline_run = self._fit_pipeline(failure_pipeline, inputs)
self._check_pipelines_valid_and_failed([fit_pipeline_run])
def test_pipeline_run_failure_return_error(self):
inputs = self._get_inputs()
pipeline = self._build_pipeline('80dee50d-9ca4-4ad5-9a52-7ea30f3eb3e5', sequence=[{'primitive_class': RandomPrimitive},
{'primitive_class': FailPrimitive, 'HYPERPARAMS': {'method_to_fail': {'TYPE': metadata_base.ArgumentType.VALUE, 'DATA': 'fit'}}}])
r = runtime.Runtime(
pipeline, context=metadata_base.Context.TESTING,
environment=self.runtime_enviroment,
)
fit_result = r.fit(inputs)
self.assertTrue(fit_result.error)
self.assertEqual(str(fit_result.error), 'Step 1 for pipeline 80dee50d-9ca4-4ad5-9a52-7ea30f3eb3e5 failed.')
self.assertIsInstance(fit_result.error, exceptions.StepFailedError)
with self.assertRaises(exceptions.StepFailedError) as cm:
fit_result.check_success()
self.assertEqual(str(cm.exception), 'Step 1 for pipeline 80dee50d-9ca4-4ad5-9a52-7ea30f3eb3e5 failed.')
def test_pipeline_run_failure_with_subpipeline(self):
inputs = self._get_inputs()
for hyperparam in ('__init__', 'set_training_data', 'fit', 'produce'):
failure_subpipeline = self._build_pipeline('bcd96144-34ae-4a67-a1b5-b911a07d03ed', sequence=[{'primitive_class': FailPrimitive, 'HYPERPARAMS': {'method_to_fail': {'TYPE': metadata_base.ArgumentType.VALUE, 'DATA': hyperparam}}}])
failure_pipeline = self._build_pipeline('cbec1cb2-64df-4d4a-81ea-a829eeac0612', sequence=[{'primitive_class': RandomPrimitive}, failure_subpipeline, {'primitive_class': IncrementPrimitive}])
fit_pipeline_run = self._fit_pipeline(failure_pipeline, inputs)
self._check_pipelines_valid_and_failed([fit_pipeline_run])
# tests previous_pipeline_run when it should be None, and when it should be full
def test_all_previous_pipeline_run_types(self):
inputs = self._get_inputs()
pipe = self._build_pipeline('2617ca0c-552a-4014-a999-2904184ed648')
fit_pipeline_run, produce_pipeline_run = self._fit_and_produce_pipeline(pipe, inputs)
self._check_pipelines_valid_and_succeeded([fit_pipeline_run, produce_pipeline_run])
fit_pipeline_run_json = fit_pipeline_run.to_json_structure()
self.assertTrue(
'previous_pipeline_run' not in fit_pipeline_run_json,
'pipeline_run should not contain previous_pipeline_run'
)
produce_pipeline_run_json = produce_pipeline_run.to_json_structure()
self.assertNotEqual(produce_pipeline_run_json['previous_pipeline_run'], None)
self.assertEqual(fit_pipeline_run_json['id'], produce_pipeline_run_json['previous_pipeline_run']['id'])
# tests pipeline_run given each type of context
def test_all_pipeline_run_context_types(self):
inputs = self._get_inputs()
pipe = self._build_pipeline('4fb64b4b-baa6-404a-afe3-1ad68a1993c1')
for context in metadata_base.Context:
pipeline_runs = self._fit_and_produce_pipeline(
pipe, inputs, context=context
)
self._check_pipelines_valid_and_succeeded(pipeline_runs)
class InvalidContext:
def __init__(self, name):
self.name = name
invalid_context = InvalidContext('INVALID_CONTEXT')
pipe = self._build_pipeline('1c05ae77-1f74-48bd-9341-c31338a9c9f0')
with self.assertRaises(jsonschema.exceptions.ValidationError):
pipeline_runs = self._fit_and_produce_pipeline(pipe, inputs, context=invalid_context)
# tests pipeline_run given primitive steps and given subpipeline steps
def test_all_pipeline_run_step_types(self):
inputs = self._get_inputs()
pipeline_without_subpipeline = self._build_pipeline('dca8efbe-4daa-47a6-a811-9ca633ffc90b', [{'primitive_class': RandomPrimitive}, {'primitive_class': IncrementPrimitive}, {'primitive_class': IncrementPrimitive}, {'primitive_class': IncrementPrimitive}])
pipeline_runs = self._fit_and_produce_pipeline(pipeline_without_subpipeline, inputs)
self._check_pipelines_valid_and_succeeded(pipeline_runs)
subpipeline = self._build_pipeline('06dfb07a-f151-467c-9f1c-51a6bf6378a3', [{'primitive_class': IncrementPrimitive}, {'primitive_class': IncrementPrimitive}])
pipeline_with_subpipeline = self._build_pipeline('293c1883-f81a-459d-a1a8-ba19467d5ad6', [{'primitive_class': RandomPrimitive}, subpipeline, {'primitive_class': IncrementPrimitive}])
pipeline_runs = self._fit_and_produce_pipeline(pipeline_with_subpipeline, inputs)
self._check_pipelines_valid_and_succeeded(pipeline_runs)
# tests when there is a subpipeline within a subpipeline
def test_recursive_subpipeline(self):
inputs = self._get_inputs()
subpipeline = self._build_pipeline('1eba8278-45da-448e-92a8-a6daf780563f', [{'primitive_class': IncrementPrimitive}, {'primitive_class': IncrementPrimitive}])
subpipeline = self._build_pipeline('b350beb3-4421-4627-906c-92cbbe900834', [{'primitive_class': IncrementPrimitive}, subpipeline, {'primitive_class': IncrementPrimitive}])
pipeline_with_recursive_subpipeline = self._build_pipeline('17e3ae59-e132-4c56-8573-20be6f84ea05', [{'primitive_class': RandomPrimitive}, subpipeline, {'primitive_class': IncrementPrimitive}])
pipeline_runs = self._fit_and_produce_pipeline(pipeline_with_recursive_subpipeline, inputs)
self._check_pipelines_valid_and_succeeded(pipeline_runs)
def test_all_pipeline_run_hyperparam_types(self):
inputs = self._get_inputs()
# test value_argument hyperparams (runtime sets defaults)
pipeline = self._build_pipeline('301702a9-cf1e-4332-9116-696c9908586a')
pipeline_runs = self._fit_and_produce_pipeline(pipeline, inputs)
self._check_pipelines_valid_and_succeeded(pipeline_runs)
# test container_argument
pipeline = self._build_pipeline('8390ab6f-d619-4cc5-b343-22b91f81eecd', sequence=[{'primitive_class': RandomPrimitive},
{'primitive_class': ContainerHyperparamPrimitive, 'HYPERPARAMS': {'dataframe': {'TYPE': metadata_base.ArgumentType.CONTAINER, 'DATA': 'steps.0.produce'}}}])
pipeline_runs = self._fit_and_produce_pipeline(pipeline, inputs)
self._check_pipelines_valid_and_succeeded(pipeline_runs)
# test data_argument
pipeline = self._build_pipeline('f0e0e370-97db-4e67-9eff-5e9b79f253e6', sequence=[{'primitive_class': RandomPrimitive}, {'primitive_class': AbsSumPrimitive},
{'primitive_class': DataHyperparamPrimitive, 'INPUTS': [('inputs', 0)], 'HYPERPARAMS': {'value': {'TYPE': metadata_base.ArgumentType.DATA, 'DATA': 'steps.1.produce'}}}])
pipeline_runs = self._fit_and_produce_pipeline(pipeline, inputs)
self._check_pipelines_valid_and_succeeded(pipeline_runs)
# test data_arguments
pipeline = self._build_pipeline('ab71ff74-5cd1-4e36-8c63-c2cd79085173', sequence=[{'primitive_class': RandomPrimitive}, {'primitive_class': AbsSumPrimitive}, {'primitive_class': AbsSumPrimitive, 'INPUTS': [('inputs', 0)]},
{'primitive_class': MultiDataHyperparamPrimitive, 'INPUTS': [('inputs', 0)], 'HYPERPARAMS': {'values': {'TYPE': metadata_base.ArgumentType.DATA, 'DATA': ['steps.1.produce', 'steps.2.produce']}}}])
pipeline_runs = self._fit_and_produce_pipeline(pipeline, inputs)
self._check_pipelines_valid_and_succeeded(pipeline_runs)
# test primitive argument
pipeline = self._build_pipeline('c8b291f1-ff67-49e0-b8a3-a0e6a2d6f013', sequence=[{'primitive_class': RandomPrimitive}, {'primitive_class': AbsSumPrimitive},
{'primitive_class': PrimitiveHyperparamPrimitive, 'INPUTS': [('inputs', 0)], 'HYPERPARAMS': {'primitive': {'TYPE': metadata_base.ArgumentType.PRIMITIVE, 'DATA': 1}}}])
pipeline_runs = self._fit_and_produce_pipeline(pipeline, inputs)
self._check_pipelines_valid_and_succeeded(pipeline_runs)
def test_all_pipeline_run_method_call_base_metadata_types(self):
pipeline = pipeline_module.Pipeline.from_json(TEST_PIPELINE_1, resolver=Resolver())
pipeline_run = PipelineRun(
pipeline, phase=metadata_base.PipelineRunPhase.FIT, context=metadata_base.Context.TESTING,
environment=self.runtime_enviroment, random_seed=0
)
inputs = self._get_inputs()[0]
pipeline_run.add_input_dataset(inputs)
pipeline_run.run_started()
pipeline_run.step_started(0)
primitive_step_id = pipeline_run.add_primitive_step(pipeline.steps[0])
method_call_id = pipeline_run.add_method_call_to_primitive_step(primitive_step_id, 'fit')
pipeline_run.method_call_started(method_call_id)
result = base.CallResult(inputs)
pipeline_run.method_call_successful(method_call_id)
pipeline_run.set_method_call_result_metadata(method_call_id, result)
pipeline_run.step_successful(primitive_step_id)
pipeline_run.run_successful()
self._validate_pipeline_run_structure(pipeline_run.to_json_structure())
# test that the phase is set correctly for fit and produce
def test_all_pipeline_run_phase_types(self):
inputs = self._get_inputs()
pipeline = self._build_pipeline('d95a9816-8ede-4fe2-89c5-f5c9d9f1d9fd')
pipeline_runs = self._fit_and_produce_pipeline(pipeline, inputs)
self._check_pipelines_valid_and_succeeded(pipeline_runs)
fit_pipeline_run = pipeline_runs[0]
fit_pipeline_run_json = fit_pipeline_run.to_json_structure()
self.assertEqual(fit_pipeline_run_json['run']['phase'], 'FIT')
produce_pipeline_run = pipeline_runs[1]
produce_pipeline_run_json = produce_pipeline_run.to_json_structure()
self.assertEqual(produce_pipeline_run_json['run']['phase'], 'PRODUCE')
# tests that the first method_call of each step is __init__()
def test_pipeline_run_init_method_calls(self):
inputs = self._get_inputs()
pipeline = self._build_pipeline('5a9321df-7e40-443b-9e12-f1d840a677cd')
pipeline_runs = self._fit_and_produce_pipeline(pipeline, inputs)
for pipeline_run in pipeline_runs:
pipeline_run_json = pipeline_run.to_json_structure()
if pipeline_run_json['run']['phase'] == 'FIT':
for step in pipeline_run_json['steps']:
first_method_call = step['method_calls'][0]
self.assertEqual(first_method_call['name'], '__init__')
def print_recursive_suberrors(self, error, indent):
for suberror in sorted(error.context, key=lambda e: e.schema_path):
print(f'{indent}', list(suberror.schema_path), ", ", suberror.message)
self.print_recursive_suberrors(suberror, indent + '\t')
def get_data(self, dataset_name='iris_dataset_1', problem_name='iris_problem_1'):
if problem_name:
problem_doc_path = os.path.join(
os.path.dirname(__file__), 'data', 'problems', problem_name, 'problemDoc.json'
)
problem_description = problem.Problem.load('file://' + problem_doc_path)
else:
problem_description = None
datasetDoc_path = 'file://' + os.path.join(os.path.dirname(__file__), 'data', 'datasets', dataset_name, 'datasetDoc.json')
iris_dataset = container.Dataset.load(datasetDoc_path)
return problem_description, iris_dataset
def test_recording_hyperparams(self):
pipeline = self._build_pipeline(
'84d5dbb8-6e82-4187-801e-83a46069608f',
sequence=[
{
'primitive_class': IncrementPrimitive
},
{
'primitive_class': IncrementPrimitive,
'HYPERPARAMS': {
'amount': {
'TYPE': metadata_base.ArgumentType.VALUE,
'DATA': 3.14
}
}
},
{
'primitive_class': IncrementPrimitive
}
],
)
runtime_hyperparams = [{}, {}, {'amount': 2.72}]
inputs = [container.DataFrame({'a': [1,2,3], 'b': [3,5,8]}, generate_metadata=True)]
# TODO: Make tests use a real Dataset instead of a dataframe. Pipeline runs are defined on standard pipelines.
# First have to add dummy metadata to the dataframe, which otherwise exist in the dataset.
inputs[0].metadata = inputs[0].metadata.update((), {
'id': '0000000000000000000000000000000000000000000000000000000000000000',
'digest': '0000000000000000000000000000000000000000000000000000000000000000'
})
r = runtime.Runtime(pipeline, runtime_hyperparams, context=metadata_base.Context.TESTING, environment=self.runtime_enviroment)
fit_result = r.fit(inputs=inputs)
self._fake_inputs(r, fit_result.pipeline_run, inputs)
fit_pipeline_run_json = fit_result.pipeline_run.to_json_structure()
# test default hyperparams recorded in pipeline_run
self.assertTrue(
'amount' in fit_pipeline_run_json['steps'][0]['hyperparams'],
'default hyperparams not recorded in pipeline_run'
)
self.assertEqual(
IncrementHyperparams.defaults().values_to_json_structure()['amount'],
fit_pipeline_run_json['steps'][0]['hyperparams']['amount']['data'],
'defualt hyperparams incorrectly recorded in pipeline_run'
)
# test hyperparams specified in pipeline not recored in pipeline_run
self.assertFalse(
'hyperparams' in fit_pipeline_run_json['steps'][1],
'hyperparams specified in the pipeline should not be recorded in the pipeline_run'
)
# test hyperparams set at runtime recored in pipeline_run
self.assertTrue(
'amount' in fit_pipeline_run_json['steps'][2]['hyperparams'],
'runtime hyperparams not recorded in pipeline_run'
)
self.assertEqual(
runtime_hyperparams[2]['amount'],
fit_pipeline_run_json['steps'][2]['hyperparams']['amount']['data'],
'defualt hyperparams incorrectly recorded in pipeline_run'
)
produce_result = r.produce(inputs=inputs)
self._fake_inputs(r, produce_result.pipeline_run, inputs)
for step in produce_result.pipeline_run.to_json_structure()['steps']:
self.assertFalse(
'hyperparams' in step,
'hyperparams should not be set in produce pipeline_runs'
)
def test_recording_arguments(self):
pipeline = self._build_pipeline('46bb32a5-f9a0-4c33-97c8-f426ed147e0a')
inputs = self._get_inputs()
r = runtime.Runtime(pipeline, context=metadata_base.Context.TESTING, environment=self.runtime_enviroment)
fit_result = r.fit(inputs=inputs)
self._fake_inputs(r, fit_result.pipeline_run, inputs)
fit_pipeline_run_json = fit_result.pipeline_run.to_json_structure()
pipeline_json_structure = pipeline.to_json_structure()
for pipeline_step, pipeline_run_step in zip(pipeline_json_structure['steps'], fit_pipeline_run_json['steps']):
if 'arguments' in pipeline_run_step:
for argument_name in pipeline_step['arguments']:
self.assertFalse(
argument_name in pipeline_run_step['arguments'],
'pipeline step arguments should not be recorded in pipeline_run method_call arguments'
)
produce_result = r.produce(inputs=inputs)
self._fake_inputs(r, produce_result.pipeline_run, inputs)
| |
from Voicelab.VoicelabWizard.VoicelabDataModel import VoicelabDataModel
from Voicelab.pipeline.Pipeline import Pipeline
import Voicelab.toolkits.Voicelab as Voicelab
import copy
from Voicelab.VoicelabWizard.InputTab import InputTab
import parselmouth
from parselmouth.praat import call
from Voicelab.default_settings import visualize_list, function_requirements, display_whitelist
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from pandas import ExcelWriter
from pandas import ExcelFile
from datetime import datetime
from PyQt5.QtWidgets import QMessageBox
"""
# Voicelab Controller: coordinates the interaction between the presentation of the data and its storage
# The data controller does not need to know how the data gets in from the user, nor how the data
# is stored. This could in the future let us change the front end and backend more flexibly.
"""
class VoicelabController:
"""
# init: setup the base state of a controller, including a data model
"""
def __init__(self):
self.data_model = VoicelabDataModel()
self.active_settings_cache = {}
self.active_functions_cache = {}
self.last_used_settings = {}
self.progress = 0
self.progress_callback = lambda node, start, current, end: print(
node.node_id, start, current, end
)
self.figures = []
self.spectra = []
self.displayable_results = {}
"""
# load_figure: load a single figure into the list of figures. This lets us keep track of and
# close the figures when they are not needed. This is important especially given matplotlib's statefulness
"""
def load_figure(self, figure):
self.figures.append(figure)
"""
# load_spectrum: load a single spectrum into the list of spectrums. This lets us keep track of and
# close the spectrums when they are not needed. This is important especially given matplotlib's statefulness
"""
def load_spectrum(self, spectrum):
self.spectra.append(spectrum)
"""
# reset_figures: clear all of the figures that we have saved and empty the figures variable
"""
def reset_figures(self):
for figure in self.figures:
figure.clear()
plt.close(figure)
self.figures = []
"""
# load voices: from a list of file paths, create voice objects and save them in the model
"""
def load_voices(self, file_paths):
for file_path in file_paths:
self.data_model.load_voice(parselmouth.Sound(file_path), file_path)
return self.data_model.loaded_voices
"""
# unload voices: from a list of file paths, remove the associated voice file from the model
"""
def unload_voices(self, file_paths):
for file_path in file_paths:
self.data_model.unload_voice(file_path)
return self.data_model.loaded_voices
"""
# activate_voices: from a list of file paths, set the associated voice files for processing
"""
def activate_voices(self, file_paths):
self.data_model.activate_voices(file_paths)
print(self.data_model.active_voices)
return self.data_model.active_voices
"""
# deactivate voices: from a list of file paths, remove the associated files from processing
"""
def deactivate_voices(self, file_paths):
for file_path in file_paths:
self.data_model.deactivate_voice(file_path)
return self.data_model.active_voices
"""
# load function: load a single function into the data model
"""
def load_function(self, fn_name, fn_node, default=False) -> object:
self.data_model.load_function(fn_name, fn_node, default)
return self.data_model.loaded_functions
"""
# activate function: set a single function to run during processing
# TODO: this behaviour could probably be handled only by the controller rather than by the model
"""
def activate_function(self, fn_name):
self.data_model.activate_function(fn_name)
return self.data_model.active_functions
"""
# deactivate function: set a single function to not run during processing
# todo: this behaviour could probably be handled only by the controller rather than by the model
"""
def deactivate_function(self, fn_name):
self.data_model.deactivate_function(fn_name)
return self.data_model.active_functions
"""
# set setting: set a value for a given setting
"""
def set_settings(self, fn, settings, values):
for i, setting in enumerate(settings):
self.data_model.set_setting(fn, settings[i], values[i])
return self.data_model.active_settings
"""
# activate setting: indicate that a setting is non-default
# TODO: this behaviour could probably be handled only by the controller rather than by the model
"""
def activate_settings(self, settings):
for i, setting in enumerate(settings):
self.data_model.activate_setting(setting)
return self.data_model.active_settings
"""
# reset setting: take a setting id and reset it to it's default value
"""
def reset_setting(self, fn_name, setting_name):
self.data_model.reset_setting(fn_name, setting_name)
"""
# save_state: caches the setting values so that they can be retrieved later. Used currently for
# toggling between default and non-default settings
"""
def save_state(self):
self.active_settings_cache = copy.copy(self.active_settings)
self.active_functions_cache = copy.copy(self.active_functions)
"""
# load state : swap whatever is currently loaded with what is cached.
"""
def load_state(self):
self.data_model.swap_active_settings(self.active_settings_cache)
self.data_model.swap_active_functions(self.active_functions_cache)
"""
# reset active settings : swap all active settings with the default values. convenience function
# so that we don't have to loop through all of the settings each time we want to do this action
"""
def reset_active_settings(self):
self.data_model.swap_active_settings(self.default_settings)
"""
# reset active functions : swap all active functions with the default functions. convenience
# so that we don't have to loop through all of the settings each time we want to do this action
"""
def reset_active_functions(self):
self.data_model.swap_active_functions(self.default_functions)
"""
# reset results : Empty the results in preperation for another run
"""
def reset_results(self):
self.data_model.reset_results()
"""
# start processing: Start processing the loaded voice files using the set of active functions
# and active settings.
"""
def start_processing(self, active_voices, active_functions, active_settings):
# we want to deep copy the active settings otherwise they may be unintentionally
# modifed with values during processing
# self.last_used_settings = copy.deepcopy(active_settings)
# DRF - I guess we don't really need to do that after all since it's commented out
# save the settings so we can put them in the excel file later
self.last_used_settings = active_settings
# reset the results in case this isn't our first run since the program opened
self.reset_results()
# Create an empty WARIO pipeline
pipeline = Pipeline()
# Create a node that will load all of the voices
# todo figure out why we need to do this, since we already loaded the voices
load_voices = Voicelab.LoadVoicesNode("Load Voice")
# Set up the load node with the appropriate file locations
load_voices.args["file_locations"] = active_voices
# Add the node to the pipeline
pipeline.add(load_voices)
# We want to specially configure the visualize voice node because later on we will be attaching things to it later
if "Create Spectrograms" in active_functions:
# Create a node that will draw the default spectrogram for the loaded voices, we always want to plot the spectrogram
visualize_voices = Voicelab.VisualizeVoiceNode("Create Spectrograms")
# if there are settings the user has configured, we want to attach them to the node
visualize_voices.args = active_settings["Create Spectrograms"]
# visualize_voices.args[value] = self.model['settings']['Visualize Voice']['value'][value]
# Connect the loaded voice to the visualize node so it has access to it
pipeline.connect((load_voices, "voice"), (visualize_voices, "voice"))
# Add the node to the pipeline
pipeline.add(visualize_voices)
# todo Fix this
if "Visualize Spectrum" in active_functions:
# Create a node that will draw the default spectrogram for the loaded voices, we always want to plot the spectrogram
visualize_spectrum = Voicelab.VisualizeSpectrumNode("Visualize Spectrum")
# if there are settings the user has configured, we want to attach them to the node
visualize_spectrum.args = active_settings["Visualize Spectrum"]
# Connect the loaded voice to the visualize node so it has access to it
pipeline.connect((load_voices, "voice"), (visualize_spectrum, "voice"))
# Add the node to the pipeline
pipeline.add(visualize_spectrum)
# For each checked operation we create the appropriate node, assign its associated
# parameters, and add it to the pipeline connecting it to the load voice node and
# visualize node. those two functions are always performed
for fn in active_functions:
# Visualize is handled outside of this
# if fn != "Create Spectrograms":
active_functions[fn].args = active_settings[fn]
pipeline.add(active_functions[fn])
pipeline.connect(
(load_voices, "voice"), (active_functions[fn], "voice")
)
pipeline.connect(
(load_voices, "file_path"), (active_functions[fn], "file_path")
)
# if "Create Spectrograms" in active_functions and fn in visualize_list:
# pipeline.connect(
# (active_functions[fn], visualize_list[fn]),
# (visualize_voices, visualize_list[fn]),
# )
# Some nodes may require specific values from upstream nodes (as specified in the default settings file)
# Resolve these dependancies and create the relevant connections
for fn_name in function_requirements:
if fn_name in active_functions:
child_node = active_functions[fn_name]
# function requirements are a defined as a tuple of parent_name followed by the name of the shared argument
for parent_name, argument in function_requirements[fn_name]:
parent_node = active_functions[parent_name]
pipeline.connect((parent_node, argument), (child_node, argument))
pipeline.listen(self.progress_callback)
pipeline_results = pipeline.start()
finished_window = QMessageBox()
finished_window.setWindowTitle("Finished")
finished_window.setText("Finished processing.\nCheck your data, then save.")
finished_window.setIcon(QMessageBox.Information)
finished_window.exec_()
# Collect the results of the pipeline running
for i, result_file in enumerate(pipeline_results):
for result_fn in pipeline_results[i]:
if result_fn.node_id == "Create Spectrograms":
# "figure" is the maptlotlib figure returned from VisualizeVoiceNode.py
# it is a dictionary key, the dictionary value is the actual figure `fig` from | |
# (c) 2020 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
lookup: schedule_rruleset
author: <NAME> (@john-westcott-iv)
short_description: Generate an rruleset string
requirements:
- pytz
- python-dateutil >= 2.7.0
description:
- Returns a string based on criteria which represents an rrule
options:
_terms:
description:
- The start date of the ruleset
- Used for all frequencies
- Format should be YYYY-MM-DD [HH:MM:SS]
required: True
type: str
timezone:
description:
- The timezone to use for this rule
- Used for all frequencies
- Format should be as US/Eastern
- Defaults to America/New_York
type: str
rules:
description:
- Array of rules in the rruleset
type: array
required: True
suboptions:
frequency:
description:
- The frequency of the schedule
- none - Run this schedule once
- minute - Run this schedule every x minutes
- hour - Run this schedule every x hours
- day - Run this schedule every x days
- week - Run this schedule weekly
- month - Run this schedule monthly
required: True
choices: ['none', 'minute', 'hour', 'day', 'week', 'month']
interval:
description:
- The repetition in months, weeks, days hours or minutes
- Used for all types except none
type: int
end_on:
description:
- How to end this schedule
- If this is not defined, this schedule will never end
- If this is a positive integer, this schedule will end after this number of occurrences
- If this is a date in the format YYYY-MM-DD [HH:MM:SS], this schedule ends after this date
- Used for all types except none
type: str
bysetpos:
description:
- Specify an occurrence number, corresponding to the nth occurrence of the rule inside the frequency period.
- A comma-separated list of positions (first, second, third, forth or last)
type: string
bymonth:
description:
- The months this schedule will run on
- A comma-separated list which can contain values 0-12
type: string
bymonthday:
description:
- The day of the month this schedule will run on
- A comma-separated list which can contain values 0-31
type: string
byyearday:
description:
- The year day numbers to run this schedule on
- A comma-separated list which can contain values 0-366
type: string
byweekno:
description:
- The week numbers to run this schedule on
- A comma-separated list which can contain values as described in ISO8601
type: string
byweekday:
description:
- The days to run this schedule on
- A comma-separated list which can contain values sunday, monday, tuesday, wednesday, thursday, friday
type: string
byhour:
description:
- The hours to run this schedule on
- A comma-separated list which can contain values 0-23
type: string
byminute:
description:
- The minutes to run this schedule on
- A comma-separated list which can contain values 0-59
type: string
include:
description:
- If this rule should be included (RRULE) or excluded (EXRULE)
type: bool
default: True
"""
EXAMPLES = """
- name: Create a ruleset for everyday except Sundays
set_fact:
complex_rule: "{{ query(awx.awx.schedule_rruleset, '2022-04-30 10:30:45', rules=rrules, timezone='UTC' ) }}"
vars:
rrules:
- frequency: 'day'
interval: 1
- frequency: 'day'
interval: 1
byweekday: 'sunday'
include: False
"""
RETURN = """
_raw:
description:
- String in the rrule format
type: string
"""
import re
from ansible.module_utils.six import raise_from
from ansible.plugins.lookup import LookupBase
from ansible.errors import AnsibleError
from datetime import datetime
try:
import pytz
from dateutil import rrule
except ImportError as imp_exc:
raise_from(AnsibleError('{0}'.format(imp_exc)), imp_exc)
class LookupModule(LookupBase):
frequencies = {
'none': rrule.DAILY,
'minute': rrule.MINUTELY,
'hour': rrule.HOURLY,
'day': rrule.DAILY,
'week': rrule.WEEKLY,
'month': rrule.MONTHLY,
}
weekdays = {
'monday': rrule.MO,
'tuesday': rrule.TU,
'wednesday': rrule.WE,
'thursday': rrule.TH,
'friday': rrule.FR,
'saturday': rrule.SA,
'sunday': rrule.SU,
}
set_positions = {
'first': 1,
'second': 2,
'third': 3,
'fourth': 4,
'last': -1,
}
# plugin constructor
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@staticmethod
def parse_date_time(date_string):
try:
return datetime.strptime(date_string, '%Y-%m-%d %H:%M:%S')
except ValueError:
return datetime.strptime(date_string, '%Y-%m-%d')
def process_integer(self, field_name, rule, min_value, max_value, rule_number):
# We are going to tolerate multiple types of input here:
# something: 1 - A single integer
# something: "1" - A single str
# something: "1,2,3" - A comma separated string of ints
# something: "1, 2,3" - A comma separated string of ints (with spaces)
# something: ["1", "2", "3"] - A list of strings
# something: [1,2,3] - A list of ints
return_values = []
# If they give us a single int, lets make it a list of ints
if type(rule[field_name]) == int:
rule[field_name] = [rule[field_name]]
# If its not a list, we need to split it into a list
if type(rule[field_name]) != list:
rule[field_name] = rule[field_name].split(',')
for value in rule[field_name]:
# If they have a list of strs we want to strip the str incase its space delineated
if type(value) == str:
value = value.strip()
# If value happens to be an int (from a list of ints) we need to coerce it into a str for the re.match
if not re.match(r"^\d+$", str(value)) or int(value) < min_value or int(value) > max_value:
raise AnsibleError('In rule {0} {1} must be between {2} and {3}'.format(rule_number, field_name, min_value, max_value))
return_values.append(int(value))
return return_values
def process_list(self, field_name, rule, valid_list, rule_number):
return_values = []
if type(rule[field_name]) != list:
rule[field_name] = rule[field_name].split(',')
for value in rule[field_name]:
value = value.strip()
if value not in valid_list:
raise AnsibleError('In rule {0} {1} must only contain values in {2}'.format(rule_number, field_name, ', '.join(valid_list.keys())))
return_values.append(valid_list[value])
return return_values
def run(self, terms, variables=None, **kwargs):
if len(terms) != 1:
raise AnsibleError('You may only pass one schedule type in at a time')
# Validate the start date
try:
start_date = LookupModule.parse_date_time(terms[0])
except Exception as e:
raise_from(AnsibleError('The start date must be in the format YYYY-MM-DD [HH:MM:SS]'), e)
if not kwargs.get('rules', None):
raise AnsibleError('You must include rules to be in the ruleset via the rules parameter')
# All frequencies can use a timezone but rrule can't support the format that AWX uses.
# So we will do a string manip here if we need to
timezone = 'America/New_York'
if 'timezone' in kwargs:
if kwargs['timezone'] not in pytz.all_timezones:
raise AnsibleError('Timezone parameter is not valid')
timezone = kwargs['timezone']
rules = []
got_at_least_one_rule = False
for rule_index in range(0, len(kwargs['rules'])):
rule = kwargs['rules'][rule_index]
rule_number = rule_index + 1
valid_options = [
"frequency",
"interval",
"end_on",
"bysetpos",
"bymonth",
"bymonthday",
"byyearday",
"byweekno",
"byweekday",
"byhour",
"byminute",
"include",
]
invalid_options = list(set(rule.keys()) - set(valid_options))
if invalid_options:
raise AnsibleError('Rule {0} has invalid options: {1}'.format(rule_number, ', '.join(invalid_options)))
frequency = rule.get('frequency', None)
if not frequency:
raise AnsibleError("Rule {0} is missing a frequency".format(rule_number))
if frequency not in LookupModule.frequencies:
raise AnsibleError('Frequency of rule {0} is invalid {1}'.format(rule_number, frequency))
rrule_kwargs = {
'freq': LookupModule.frequencies[frequency],
'interval': rule.get('interval', 1),
'dtstart': start_date,
}
# If we are a none frequency we don't need anything else
if frequency == 'none':
rrule_kwargs['count'] = 1
else:
# All non-none frequencies can have an end_on option
if 'end_on' in rule:
end_on = rule['end_on']
if re.match(r'^\d+$', end_on):
rrule_kwargs['count'] = end_on
else:
try:
rrule_kwargs['until'] = LookupModule.parse_date_time(end_on)
except Exception as e:
raise_from(
AnsibleError('In rule {0} end_on must either be an integer or in the format YYYY-MM-DD [HH:MM:SS]'.format(rule_number)), e
)
if 'bysetpos' in rule:
rrule_kwargs['bysetpos'] = self.process_list('bysetpos', rule, LookupModule.set_positions, rule_number)
if 'bymonth' in rule:
rrule_kwargs['bymonth'] = self.process_integer('bymonth', rule, 1, 12, rule_number)
if 'bymonthday' in rule:
rrule_kwargs['bymonthday'] = self.process_integer('bymonthday', rule, 1, 31, rule_number)
if 'byyearday' in rule:
rrule_kwargs['byyearday'] = self.process_integer('byyearday', rule, 1, 366, rule_number) # 366 for leap years
if 'byweekno' in rule:
rrule_kwargs['byweekno'] = self.process_integer('byweekno', rule, 1, 52, rule_number)
if 'byweekday' in rule:
rrule_kwargs['byweekday'] = self.process_list('byweekday', rule, LookupModule.weekdays, rule_number)
if 'byhour' in rule:
rrule_kwargs['byhour'] = self.process_integer('byhour', rule, 0, 23, rule_number)
if 'byminute' in rule:
rrule_kwargs['byminute'] = self.process_integer('byminute', rule, 0, 59, rule_number)
try:
generated_rule = str(rrule.rrule(**rrule_kwargs))
except Exception as e:
raise_from(AnsibleError('Failed to parse rrule for rule {0} {1}: {2}'.format(rule_number, str(rrule_kwargs), e)), e)
# AWX requires an interval. rrule will not add interval if it's set to 1
if rule.get('interval', 1) == 1:
generated_rule = "{0};INTERVAL=1".format(generated_rule)
if rule_index == 0:
# rrule puts a \n | |
<reponame>capriele/Crazyflie-Indoor-Position-Logger-Controller
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2011-2013 Bitcraze AB
#
# Crazyflie Nano Quadcopter Client
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
"""
Fake link driver used to debug the UI without using the Crazyflie.
The operation of this driver can be controlled in two ways, either by
connecting to different URIs or by sending messages to the DebugDriver port
though CRTP once connected.
For normal connections a console thread is also started that will send
generated console output via CRTP.
"""
import errno
import logging
import random
import re
import string
import struct
import sys
import time
from datetime import datetime
from threading import Thread
from .crtpdriver import CRTPDriver
from .crtpstack import CRTPPacket
from .crtpstack import CRTPPort
from .exceptions import WrongUriType
from cflib.crazyflie.log import LogTocElement
from cflib.crazyflie.param import ParamTocElement
if sys.version_info < (3,):
import Queue as queue
else:
import queue
__author__ = '<EMAIL> <NAME>'
__all__ = ['DebugDriver']
logger = logging.getLogger(__name__)
# This setup is used to debug raw memory logging
memlogging = {0x01: {'min': 0, 'max': 255, 'mod': 1, 'vartype': 1},
0x02: {'min': 0, 'max': 65000, 'mod': 100, 'vartype': 2},
0x03: {'min': 0, 'max': 100000, 'mod': 1000, 'vartype': 3},
0x04: {'min': -100, 'max': 100, 'mod': 1, 'vartype': 4},
0x05: {'min': -10000, 'max': 10000, 'mod': 2000, 'vartype': 5},
0x06: {'min': -50000, 'max': 50000, 'mod': 1000, 'vartype': 6},
0x07: {'min': 0, 'max': 255, 'mod': 1, 'vartype': 1}}
class FakeMemory:
TYPE_I2C = 0
TYPE_1W = 1
def __init__(self, type, size, addr, data=None):
self.type = type
self.size = size
self.addr = addr
self.data = [0] * size
if data:
for i in range(len(data)):
self.data[i] = data[i]
def erase(self):
self.data = [0] * self.size
class DebugDriver(CRTPDriver):
""" Debug driver used for debugging UI/communication without using a
Crazyflie"""
def __init__(self):
self.fakeLoggingThreads = []
self._fake_mems = []
self.needs_resending = False
# Fill up the fake logging TOC with values and data
self.fakeLogToc = []
self.fakeLogToc.append({'varid': 0, 'vartype': 5, 'vargroup': 'imu',
'varname': 'gyro_x', 'min': -10000,
'max': 10000, 'mod': 1000})
self.fakeLogToc.append({'varid': 1, 'vartype': 5, 'vargroup': 'imu',
'varname': 'gyro_y', 'min': -10000,
'max': 10000, 'mod': 150})
self.fakeLogToc.append({'varid': 2, 'vartype': 5, 'vargroup': 'imu',
'varname': 'gyro_z', 'min': -10000,
'max': 10000, 'mod': 200})
self.fakeLogToc.append({'varid': 3, 'vartype': 5, 'vargroup': 'imu',
'varname': 'acc_x', 'min': -1000,
'max': 1000, 'mod': 15})
self.fakeLogToc.append({'varid': 4, 'vartype': 5, 'vargroup': 'imu',
'varname': 'acc_y', 'min': -1000,
'max': 1000, 'mod': 10})
self.fakeLogToc.append({'varid': 5, 'vartype': 5, 'vargroup': 'imu',
'varname': 'acc_z', 'min': -1000,
'max': 1000, 'mod': 20})
self.fakeLogToc.append({'varid': 6, 'vartype': 7,
'vargroup': 'stabilizer', 'varname': 'roll',
'min': -90, 'max': 90, 'mod': 2})
self.fakeLogToc.append({'varid': 7, 'vartype': 7,
'vargroup': 'stabilizer', 'varname': 'pitch',
'min': -90, 'max': 90, 'mod': 1.5})
self.fakeLogToc.append({'varid': 8, 'vartype': 7,
'vargroup': 'stabilizer', 'varname': 'yaw',
'min': -90, 'max': 90, 'mod': 2.5})
self.fakeLogToc.append({'varid': 9, 'vartype': 7, 'vargroup': 'pm',
'varname': 'vbat', 'min': 3.0,
'max': 4.2, 'mod': 0.1})
self.fakeLogToc.append({'varid': 10, 'vartype': 6, 'vargroup': 'motor',
'varname': 'm1', 'min': 0, 'max': 65000,
'mod': 1000})
self.fakeLogToc.append({'varid': 11, 'vartype': 6, 'vargroup': 'motor',
'varname': 'm2', 'min': 0, 'max': 65000,
'mod': 1000})
self.fakeLogToc.append({'varid': 12, 'vartype': 6, 'vargroup': 'motor',
'varname': 'm3', 'min': 0, 'max': 65000,
'mod': 1000})
self.fakeLogToc.append({'varid': 13, 'vartype': 6, 'vargroup': 'motor',
'varname': 'm4', 'min': 0, 'max': 65000,
'mod': 1000})
self.fakeLogToc.append({'varid': 14, 'vartype': 2,
'vargroup': 'stabilizer', 'varname': 'thrust',
'min': 0, 'max': 65000, 'mod': 1000})
self.fakeLogToc.append({'varid': 15, 'vartype': 7,
'vargroup': 'baro', 'varname': 'asl',
'min': 540, 'max': 545, 'mod': 0.5})
self.fakeLogToc.append({'varid': 16, 'vartype': 7,
'vargroup': 'baro', 'varname': 'aslRaw',
'min': 540, 'max': 545, 'mod': 1.0})
self.fakeLogToc.append({'varid': 17, 'vartype': 7,
'vargroup': 'posEstimatorAlt',
'varname': 'estimatedZ',
'min': 540, 'max': 545, 'mod': 0.5})
self.fakeLogToc.append({'varid': 18, 'vartype': 7,
'vargroup': 'baro', 'varname': 'temp',
'min': 26, 'max': 38, 'mod': 1.0})
self.fakeLogToc.append({'varid': 19, 'vartype': 7,
'vargroup': 'posCtlAlt',
'varname': 'targetZ',
'min': 542, 'max': 543, 'mod': 0.1})
self.fakeLogToc.append({'varid': 20, 'vartype': 6,
'vargroup': 'gps', 'varname': 'lat',
'min': 556112190, 'max': 556112790,
'mod': 10})
self.fakeLogToc.append({'varid': 21, 'vartype': 6,
'vargroup': 'gps', 'varname': 'lon',
'min': 129945110, 'max': 129945710,
'mod': 10})
self.fakeLogToc.append({'varid': 22, 'vartype': 6,
'vargroup': 'gps', 'varname': 'hMSL',
'min': 0, 'max': 100000,
'mod': 1000})
self.fakeLogToc.append({'varid': 23, 'vartype': 6,
'vargroup': 'gps', 'varname': 'heading',
'min': -10000000, 'max': 10000000,
'mod': 100000})
self.fakeLogToc.append({'varid': 24, 'vartype': 6,
'vargroup': 'gps', 'varname': 'gSpeed',
'min': 0, 'max': 1000,
'mod': 100})
self.fakeLogToc.append({'varid': 25, 'vartype': 3,
'vargroup': 'gps', 'varname': 'hAcc',
'min': 0, 'max': 5000,
'mod': 100})
self.fakeLogToc.append({'varid': 26, 'vartype': 1,
'vargroup': 'gps', 'varname': 'fixType',
'min': 0, 'max': 5,
'mod': 1})
# Fill up the fake logging TOC with values and data
self.fakeParamToc = []
self.fakeParamToc.append({'varid': 0, 'vartype': 0x08,
'vargroup': 'blah', 'varname': 'p',
'writable': True, 'value': 100})
self.fakeParamToc.append({'varid': 1, 'vartype': 0x0A,
'vargroup': 'info', 'varname': 'cid',
'writable': False, 'value': 1234})
self.fakeParamToc.append({'varid': 2, 'vartype': 0x06,
'vargroup': 'rpid', 'varname': 'prp',
'writable': True, 'value': 1.5})
self.fakeParamToc.append({'varid': 3, 'vartype': 0x06,
'vargroup': 'rpid', 'varname': 'pyaw',
'writable': True, 'value': 2.5})
self.fakeParamToc.append({'varid': 4, 'vartype': 0x06,
'vargroup': 'rpid', 'varname': 'irp',
'writable': True, 'value': 3.5})
self.fakeParamToc.append({'varid': 5, 'vartype': 0x06,
'vargroup': 'rpid', 'varname': 'iyaw',
'writable': True, 'value': 4.5})
self.fakeParamToc.append({'varid': 6, 'vartype': 0x06,
'vargroup': 'pid_attitude',
'varname': 'pitch_kd', 'writable': True,
'value': 5.5})
self.fakeParamToc.append({'varid': 7, 'vartype': 0x06,
'vargroup': 'rpid', 'varname': 'dyaw',
'writable': True, 'value': 6.5})
self.fakeParamToc.append({'varid': 8, 'vartype': 0x06,
'vargroup': 'apid', 'varname': 'prp',
'writable': True, 'value': 7.5})
self.fakeParamToc.append({'varid': 9, 'vartype': 0x06,
'vargroup': 'apid', 'varname': 'pyaw',
'writable': True, 'value': 8.5})
self.fakeParamToc.append({'varid': 10, 'vartype': 0x06,
'vargroup': 'apid', 'varname': 'irp',
'writable': True, 'value': 9.5})
self.fakeParamToc.append({'varid': 11, 'vartype': 0x06,
'vargroup': 'apid', 'varname': 'iyaw',
'writable': True, 'value': 10.5})
self.fakeParamToc.append({'varid': 12, 'vartype': 0x06,
'vargroup': 'apid', 'varname': 'drp',
'writable': True, 'value': 11.5})
self.fakeParamToc.append({'varid': 13, 'vartype': 0x06,
'vargroup': 'apid', 'varname': 'dyaw',
'writable': True, 'value': 12.5})
self.fakeParamToc.append({'varid': 14, 'vartype': 0x08,
'vargroup': 'flightctrl',
'varname': 'xmode', 'writable': True,
'value': 1})
self.fakeParamToc.append({'varid': 15, 'vartype': 0x08,
'vargroup': 'flightctrl',
'varname': 'ratepid', 'writable': True,
'value': 1})
self.fakeParamToc.append({'varid': 16, 'vartype': 0x08,
'vargroup': 'imu_sensors',
'varname': 'HMC5883L', 'writable': False,
'value': 1})
self.fakeParamToc.append({'varid': 17, 'vartype': 0x08,
'vargroup': 'imu_sensors',
'varname': 'MS5611', 'writable': False,
'value': 1})
self.fakeParamToc.append({'varid': 18, 'vartype': 0x0A,
'vargroup': 'firmware',
'varname': 'revision0', 'writable': False,
'value': 0xdeb})
self.fakeParamToc.append({'varid': 19, 'vartype': 0x09,
'vargroup': 'firmware',
'varname': 'revision1', 'writable': False,
'value': 0x99})
self.fakeParamToc.append({'varid': 20, 'vartype': 0x08,
'vargroup': 'firmware',
'varname': 'modified', 'writable': False,
'value': 1})
self.fakeParamToc.append({'varid': 21, 'vartype': 0x08,
'vargroup': 'imu_tests',
'varname': 'MPU6050', 'writable': False,
'value': 1})
self.fakeParamToc.append({'varid': 22, 'vartype': 0x08,
'vargroup': 'imu_tests',
'varname': 'HMC5883L', 'writable': False,
'value': 1})
self.fakeParamToc.append({'varid': 23, 'vartype': 0x08,
'vargroup': 'imu_tests',
'varname': 'MS5611', 'writable': False,
'value': 1})
self.fakeflash = {}
self._random_answer_delay = True
self.queue = queue.Queue()
self._packet_handler = _PacketHandlingThread(self.queue,
self.fakeLogToc,
self.fakeParamToc,
self._fake_mems)
self._packet_handler.start()
def scan_interface(self, address):
return [['debug://0/0', 'Normal connection'],
['debug://0/1', 'Fail to connect'],
['debug://0/2', 'Incomplete log TOC download'],
['debug://0/3', 'Insert random delays on replies'],
['debug://0/4',
'Insert random delays on replies and random TOC CRCs'],
['debug://0/5', 'Normal but random TOC CRCs'],
['debug://0/6', 'Normal but empty I2C and OW mems']]
def get_status(self):
return 'Ok'
def get_name(self):
return 'debug'
def connect(self, uri, linkQualityCallback, linkErrorCallback):
if not re.search('^debug://', uri):
raise WrongUriType('Not a debug URI')
self._packet_handler.linkErrorCallback = linkErrorCallback
self._packet_handler.linkQualityCallback = linkQualityCallback
# Debug-options for this driver that
# is set by using different connection URIs
self._packet_handler.inhibitAnswers = False
self._packet_handler.doIncompleteLogTOC = False
self._packet_handler.bootloader = False
self._packet_handler._random_answer_delay = False
self._packet_handler._random_toc_crcs = False
if (re.search('^debug://.*/1\Z', uri)):
self._packet_handler.inhibitAnswers = True
if (re.search('^debug://.*/110\Z', uri)):
self._packet_handler.bootloader = True
if (re.search('^debug://.*/2\Z', uri)):
self._packet_handler.doIncompleteLogTOC = True
if (re.search('^debug://.*/3\Z', uri)):
self._packet_handler._random_answer_delay = True
if (re.search('^debug://.*/4\Z', uri)):
self._packet_handler._random_answer_delay = True
self._packet_handler._random_toc_crcs = True
if (re.search('^debug://.*/5\Z', uri)):
self._packet_handler._random_toc_crcs = True
if len(self._fake_mems) == 0:
# Add empty EEPROM
self._fake_mems.append(FakeMemory(type=0, size=100, addr=0))
# Add EEPROM with | |
limited at the protocol level to a logically
adjacent topology.
ATTACK_VECTOR_LOCAL: The vulnerable component is not bound to the
network stack and the attacker's path is via read/write/execute
capabilities.
ATTACK_VECTOR_PHYSICAL: The attack requires the attacker to physically
touch or manipulate the vulnerable component.
"""
ATTACK_VECTOR_UNSPECIFIED = 0
ATTACK_VECTOR_NETWORK = 1
ATTACK_VECTOR_ADJACENT = 2
ATTACK_VECTOR_LOCAL = 3
ATTACK_VECTOR_PHYSICAL = 4
class AvailabilityImpactValueValuesEnum(_messages.Enum):
r"""This metric measures the impact to the availability of the impacted
component resulting from a successfully exploited vulnerability.
Values:
IMPACT_UNSPECIFIED: Invalid value.
IMPACT_HIGH: High impact.
IMPACT_LOW: Low impact.
IMPACT_NONE: No impact.
"""
IMPACT_UNSPECIFIED = 0
IMPACT_HIGH = 1
IMPACT_LOW = 2
IMPACT_NONE = 3
class ConfidentialityImpactValueValuesEnum(_messages.Enum):
r"""This metric measures the impact to the confidentiality of the
information resources managed by a software component due to a
successfully exploited vulnerability.
Values:
IMPACT_UNSPECIFIED: Invalid value.
IMPACT_HIGH: High impact.
IMPACT_LOW: Low impact.
IMPACT_NONE: No impact.
"""
IMPACT_UNSPECIFIED = 0
IMPACT_HIGH = 1
IMPACT_LOW = 2
IMPACT_NONE = 3
class IntegrityImpactValueValuesEnum(_messages.Enum):
r"""This metric measures the impact to integrity of a successfully
exploited vulnerability.
Values:
IMPACT_UNSPECIFIED: Invalid value.
IMPACT_HIGH: High impact.
IMPACT_LOW: Low impact.
IMPACT_NONE: No impact.
"""
IMPACT_UNSPECIFIED = 0
IMPACT_HIGH = 1
IMPACT_LOW = 2
IMPACT_NONE = 3
class PrivilegesRequiredValueValuesEnum(_messages.Enum):
r"""This metric describes the level of privileges an attacker must possess
before successfully exploiting the vulnerability.
Values:
PRIVILEGES_REQUIRED_UNSPECIFIED: Invalid value.
PRIVILEGES_REQUIRED_NONE: The attacker is unauthorized prior to attack,
and therefore does not require any access to settings or files of the
vulnerable system to carry out an attack.
PRIVILEGES_REQUIRED_LOW: The attacker requires privileges that provide
basic user capabilities that could normally affect only settings and
files owned by a user. Alternatively, an attacker with Low privileges
has the ability to access only non-sensitive resources.
PRIVILEGES_REQUIRED_HIGH: The attacker requires privileges that provide
significant (e.g., administrative) control over the vulnerable
component allowing access to component-wide settings and files.
"""
PRIVILEGES_REQUIRED_UNSPECIFIED = 0
PRIVILEGES_REQUIRED_NONE = 1
PRIVILEGES_REQUIRED_LOW = 2
PRIVILEGES_REQUIRED_HIGH = 3
class ScopeValueValuesEnum(_messages.Enum):
r"""The Scope metric captures whether a vulnerability in one vulnerable
component impacts resources in components beyond its security scope.
Values:
SCOPE_UNSPECIFIED: Invalid value.
SCOPE_UNCHANGED: An exploited vulnerability can only affect resources
managed by the same security authority.
SCOPE_CHANGED: An exploited vulnerability can affect resources beyond
the security scope managed by the security authority of the vulnerable
component.
"""
SCOPE_UNSPECIFIED = 0
SCOPE_UNCHANGED = 1
SCOPE_CHANGED = 2
class UserInteractionValueValuesEnum(_messages.Enum):
r"""This metric captures the requirement for a human user, other than the
attacker, to participate in the successful compromise of the vulnerable
component.
Values:
USER_INTERACTION_UNSPECIFIED: Invalid value.
USER_INTERACTION_NONE: The vulnerable system can be exploited without
interaction from any user.
USER_INTERACTION_REQUIRED: Successful exploitation of this vulnerability
requires a user to take some action before the vulnerability can be
exploited.
"""
USER_INTERACTION_UNSPECIFIED = 0
USER_INTERACTION_NONE = 1
USER_INTERACTION_REQUIRED = 2
attackComplexity = _messages.EnumField('AttackComplexityValueValuesEnum', 1)
attackVector = _messages.EnumField('AttackVectorValueValuesEnum', 2)
availabilityImpact = _messages.EnumField('AvailabilityImpactValueValuesEnum', 3)
baseScore = _messages.FloatField(4)
confidentialityImpact = _messages.EnumField('ConfidentialityImpactValueValuesEnum', 5)
integrityImpact = _messages.EnumField('IntegrityImpactValueValuesEnum', 6)
privilegesRequired = _messages.EnumField('PrivilegesRequiredValueValuesEnum', 7)
scope = _messages.EnumField('ScopeValueValuesEnum', 8)
userInteraction = _messages.EnumField('UserInteractionValueValuesEnum', 9)
class Details(_messages.Message):
r"""Details of a subscription.
Enums:
TypeValueValuesEnum: The type of subscription
Fields:
endTime: The time the subscription has or will end.
startTime: The time the subscription has or will start.
type: The type of subscription
"""
class TypeValueValuesEnum(_messages.Enum):
r"""The type of subscription
Values:
TYPE_UNSPECIFIED: Default value. This value is unused.
STANDARD: The standard subscription.
TRIAL: The trial subscription.
ALPHA: The alpha subscription.
DEMO: The demo subscription for channel partners.
"""
TYPE_UNSPECIFIED = 0
STANDARD = 1
TRIAL = 2
ALPHA = 3
DEMO = 4
endTime = _messages.StringField(1)
startTime = _messages.StringField(2)
type = _messages.EnumField('TypeValueValuesEnum', 3)
class EventThreatDetectionSettings(_messages.Message):
r"""Resource capturing the settings for the Event Threat Detection service.
Enums:
ServiceEnablementStateValueValuesEnum: The state of enablement for the
service at its level of the resource hierarchy. A DISABLED state will
override all module enablement_states to DISABLED.
Messages:
ModulesValue: The configurations including the state of enablement for the
service's different modules. The absence of a module in the map implies
its configuration is inherited from its parent's.
Fields:
modules: The configurations including the state of enablement for the
service's different modules. The absence of a module in the map implies
its configuration is inherited from its parent's.
name: The resource name of the EventThreatDetectionSettings. Formats: *
organizations/{organization}/eventThreatDetectionSettings *
folders/{folder}/eventThreatDetectionSettings *
projects/{project}/eventThreatDetectionSettings
serviceEnablementState: The state of enablement for the service at its
level of the resource hierarchy. A DISABLED state will override all
module enablement_states to DISABLED.
updateTime: Output only. The time the settings were last updated.
"""
class ServiceEnablementStateValueValuesEnum(_messages.Enum):
r"""The state of enablement for the service at its level of the resource
hierarchy. A DISABLED state will override all module enablement_states to
DISABLED.
Values:
ENABLEMENT_STATE_UNSPECIFIED: Default value. This value is unused.
INHERITED: State is inherited from the parent resource.
ENABLED: State is enabled.
DISABLED: State is disabled.
"""
ENABLEMENT_STATE_UNSPECIFIED = 0
INHERITED = 1
ENABLED = 2
DISABLED = 3
@encoding.MapUnrecognizedFields('additionalProperties')
class ModulesValue(_messages.Message):
r"""The configurations including the state of enablement for the service's
different modules. The absence of a module in the map implies its
configuration is inherited from its parent's.
Messages:
AdditionalProperty: An additional property for a ModulesValue object.
Fields:
additionalProperties: Additional properties of type ModulesValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a ModulesValue object.
Fields:
key: Name of the additional property.
value: A Config attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('Config', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
modules = _messages.MessageField('ModulesValue', 1)
name = _messages.StringField(2)
serviceEnablementState = _messages.EnumField('ServiceEnablementStateValueValuesEnum', 3)
updateTime = _messages.StringField(4)
class Finding(_messages.Message):
r"""Security Command Center finding. A finding is a record of assessment
data like security, risk, health, or privacy, that is ingested into Security
Command Center for presentation, notification, analysis, policy testing, and
enforcement. For example, a cross-site scripting (XSS) vulnerability in an
App Engine application is a finding.
Enums:
FindingClassValueValuesEnum: The class of the finding.
MuteValueValuesEnum: Indicates the mute state of a finding (either
unspecified, muted, unmuted or undefined).
SeverityValueValuesEnum: The severity of the finding. This field is
managed by the source that writes the finding.
StateValueValuesEnum: The state of the finding.
Messages:
ExternalSystemsValue: Output only. Third party SIEM/SOAR fields within
SCC, contains external system information and external system finding
fields.
SourcePropertiesValue: Source specific properties. These properties are
managed by the source that writes the finding. The key names in the
source_properties map must be between 1 and 255 characters, and must
start with a letter and contain alphanumeric characters or underscores
only.
Fields:
access: Access details associated to the Finding, such as more information
on the caller, which method was accessed, from where, etc.
canonicalName: The canonical name of the finding. It's either "organizatio
ns/{organization_id}/sources/{source_id}/findings/{finding_id}",
"folders/{folder_id}/sources/{source_id}/findings/{finding_id}" or
"projects/{project_number}/sources/{source_id}/findings/{finding_id}",
depending on the closest CRM ancestor of the resource associated with
the finding.
category: The additional taxonomy group within findings from a given
source. This field is immutable after creation time. Example:
"XSS_FLASH_INJECTION"
createTime: The time at which the finding was created in Security Command
Center.
eventTime: The time at which the event took place, or when an update to
the finding occurred. For example, if the finding represents an open
firewall it would capture the time the detector believes the firewall
became open. The accuracy is determined by the detector. If the finding
were to be resolved afterward, this time would reflect when the finding
was resolved. Must not be set to a value greater than the current
timestamp.
externalSystems: Output only. Third party SIEM/SOAR fields within SCC,
contains external system information and external system finding fields.
externalUri: The URI that, if available, points to a web page outside of
Security Command Center where additional information about the finding
can be found. This field is guaranteed to be either empty or a well
formed URL.
findingClass: The class of the finding.
indicator: Represents what's commonly known as an Indicator of compromise
(IoC) in computer forensics. This is an | |
10
try:
tempstep = int(values['-nxstepr-'])
if tempstep >= 0:
nxstep = tempstep
except:
pass
if (float(ntstep) * float(nxstep)) > 50000:
cancelRun = True
confirmLayout = [[sg.Text('The selected calculation is large and may take some time.')],[sg.Button('Continue'), sg.Button('Cancel')]]
confirmWindow = sg.Window('Large calculation confirmation', confirmLayout, location = [400,0], finalize=True, keep_on_top = True)
while True:
event, values = confirmWindow.read(timeout=timeout)
if event == sg.WIN_CLOSED or event == 'Cancel':
break
elif event == 'Continue':
cancelRun = False
break
confirmWindow.close()
xlo = 0
try:
templo = float(values['-xlor-'])
if 0 <= templo <= 1:
xlo = templo
except:
pass
xhi = 1
try:
temphi = float(values['-xhir-'])
if 0 <= temphi <= 1:
xhi = temphi
except:
pass
tlo = 300
try:
templo = float(values['-temperaturer-'])
if 295 <= templo <= 6000:
tlo = templo
except:
pass
thi = 1000
try:
temphi = float(values['-endtemperaturer-'])
if 295 <= temphi <= 6000:
thi = temphi
except:
pass
if not cancelRun:
self.parent.calculation.makeBackup()
self.parent.sgw.Element('Undo').Update(disabled = False)
self.parent.calculation.writeInputFile(xlo,xhi,nxstep,tlo,thi,ntstep)
self.parent.calculation.runCalc()
self.parent.calculation.makePlot()
self.parent.macro.append('macroPD.makeBackup()')
self.parent.macro.append(f'macroPD.writeInputFile({xlo},{xhi},{nxstep},{tlo},{thi},{ntstep})')
self.parent.macro.append('macroPD.runCalc()')
class LabelWindow:
def __init__(self, parent):
self.parent = parent
windowList.append(self)
xLabLayout = [[sg.Text('Element 2 Concentration')],[sg.Input(key='-xlab-',size=(inputSize,1))]]
tLabLayout = [[sg.Text('Temperature')],[sg.Input(key='-tlab-',size=(inputSize,1))]]
labelLayout = [xLabLayout,tLabLayout,[sg.Button('Add Label'), sg.Button('Cancel')]]
self.sgw = sg.Window('Add phase label', labelLayout, location = [400,0], finalize=True)
self.children = []
def close(self):
for child in self.children:
child.close()
self.sgw.close()
if self in windowList:
windowList.remove(self)
def read(self):
event, values = self.sgw.read(timeout=timeout)
if event == sg.WIN_CLOSED or event == 'Cancel':
self.close()
elif event =='Add Label':
try:
try:
xlab = float(values['-xlab-'])
except ValueError:
num, den = values['-xlab-'].split('/')
xlab = float(num)/float(den)
tlab = float(values['-tlab-'])
if (0 <= xlab <= 1) and (295 <= tlab <= 6000):
self.parent.calculation.makeBackup()
self.parent.sgw.Element('Undo').Update(disabled = False)
self.parent.calculation.addLabel(xlab,tlab)
self.parent.calculation.makePlot()
self.parent.sgw.Element('Remove Label').Update(disabled = False)
self.parent.macro.append('macroPD.makeBackup()')
self.parent.macro.append(f'macroPD.addLabel({xlab},{tlab})')
except:
pass
class RemoveWindow:
def __init__(self, parent):
self.parent = parent
windowList.append(self)
headingsLayout = [[sg.Text('Label Text', size = [55,1],justification='left'),
sg.Text('Concentration',size = [15,1],justification='center'),
sg.Text('Temperature', size = [15,1],justification='center'),
sg.Text('Remove Label?',size = [15,1])]]
labelListLayout = []
for i in range(len(self.parent.calculation.labels)):
labelListLayout.append([[sg.Text(self.parent.calculation.labels[i][1],size = [55,1],justification='left'),
sg.Text("{:.3f}".format(float(self.parent.calculation.labels[i][0][0])),size = [15,1],justification='center'),
sg.Text("{:.0f}".format(float(self.parent.calculation.labels[i][0][1])),size = [15,1],justification='center'),
sg.Checkbox('',key='-removeLabel'+str(i)+'-',pad=[[40,0],[0,0]])]])
removeLayout = [headingsLayout,labelListLayout,[sg.Button('Remove Label(s)'), sg.Button('Cancel')]]
self.sgw = sg.Window('Remove phase label', removeLayout, location = [400,0], finalize=True)
self.children = []
def close(self):
for child in self.children:
child.close()
self.sgw.close()
if self in windowList:
windowList.remove(self)
def read(self):
event, values = self.sgw.read(timeout=timeout)
if event == sg.WIN_CLOSED or event == 'Cancel':
self.close()
if event == 'Remove Label(s)':
self.parent.calculation.makeBackup()
self.parent.macro.append('macroPD.makeBackup()')
self.parent.sgw.Element('Undo').Update(disabled = False)
tempLength = len(self.parent.calculation.labels)
for i in reversed(range(tempLength)):
try:
if values['-removeLabel'+str(i)+'-']:
del self.parent.calculation.labels[i]
self.parent.macro.append(f'del macroPD.labels[{i}]')
except:
continue
if len(self.parent.calculation.labels) == 0:
self.parent.sgw.Element('Remove Label').Update(disabled = True)
self.parent.calculation.makePlot()
self.close()
class SettingsWindow:
def __init__(self, parent):
self.parent = parent
windowList.append(self)
if self.parent.calculation.plotMarker == '-':
line = True
point = False
both = False
elif self.parent.calculation.plotMarker == '.':
line = False
point = True
both = False
else:
line = False
point = False
both = True
if self.parent.calculation.plotColor == 'colorful':
colorful = True
bland = False
else:
colorful = False
bland = True
if self.parent.calculation.experimentColor == 'colorful':
expcolorful = True
expbland = False
else:
expcolorful = False
expbland = True
settingsLayout = [[sg.Text('Marker Style:')],
[sg.Radio('Lines', 'mstyle', default=line, enable_events=True, key='-mline-')],
[sg.Radio('Points','mstyle', default=point, enable_events=True, key='-mpoint-')],
[sg.Radio('Both', 'mstyle', default=both, enable_events=True, key='-mboth-')],
[sg.Text('Plot Colors:')],
[sg.Radio('Colorful', 'mcolor', default=colorful, enable_events=True, key='-mcolorful-')],
[sg.Radio('Black', 'mcolor', default=bland, enable_events=True, key='-mbland-')],
[sg.Text('Experimental Data Colors:')],
[sg.Radio('Colorful', 'mexpcolor', default=expcolorful, enable_events=True, key='-mexpcolorful-')],
[sg.Radio('Black', 'mexpcolor', default=expbland, enable_events=True, key='-mexpbland-')],
[sg.Text('Show:')],
[sg.Checkbox('Experimental Data', default=self.parent.calculation.showExperiment, key='-showExperiment-'),
sg.Checkbox('Loaded Diagram', default=self.parent.calculation.showLoaded, key='-showLoaded-')],
[sg.Text('Auto-Label Settings:')],
[sg.Checkbox('1-Phase Regions', default=self.parent.calculation.label1phase, key='-label1phase-'),
sg.Checkbox('2-Phase Regions', default=self.parent.calculation.label2phase, key='-label2phase-')],
[sg.Text('Export Filename'),sg.Input(key='-filename-',size=(inputSize,1))],
[sg.Text('Export Format'),sg.Combo(['png', 'pdf', 'ps', 'eps', 'svg'],default_value='png',key='-format-')],
[sg.Text('Export DPI'),sg.Input(key='-dpi-',size=(inputSize,1))],
[sg.Button('Accept')]]
self.sgw = sg.Window('Plot Settings', settingsLayout, location = [400,0], finalize=True)
self.children = []
def close(self):
for child in self.children:
child.close()
self.sgw.close()
if self in windowList:
windowList.remove(self)
def read(self):
event, values = self.sgw.read(timeout=timeout)
if event == sg.WIN_CLOSED:
self.close()
elif event == '-mline-':
self.parent.calculation.plotMarker = '-'
elif event =='-mpoint-':
self.parent.calculation.plotMarker = '.'
elif event =='-mboth-':
self.parent.calculation.plotMarker = '.-'
elif event =='-mcolorful-':
self.parent.calculation.plotColor = 'colorful'
elif event =='-mbland-':
self.parent.calculation.plotColor = 'bland'
elif event =='-mexpcolorful-':
self.parent.calculation.experimentColor = 'colorful'
elif event =='-mexpbland-':
self.parent.calculation.experimentColor = 'bland'
elif event =='Accept':
self.parent.calculation.showExperiment = values['-showExperiment-']
self.parent.calculation.showLoaded = values['-showLoaded-']
self.parent.calculation.label1phase = values['-label1phase-']
self.parent.calculation.label2phase = values['-label2phase-']
try:
if str(values['-filename-']) != '':
self.parent.calculation.exportFileName = str(values['-filename-'])
except:
pass
self.parent.calculation.exportFormat = values['-format-']
try:
tempDPI = int(values['-dpi-'])
if tempDPI > 0 > 10000:
self.parent.calculation.exportDPI = int(values['-dpi-'])
except:
pass
self.parent.calculation.makePlot()
self.close()
class AddDataWindow:
def __init__(self,parent):
self.parent = parent
windowList.append(self)
file_list_column = [
[
sg.Text("Experimental Data Folder"),
sg.In(size=(25, 1), enable_events=True, key="-FOLDER-"),
sg.FolderBrowse(),
],
[
sg.Listbox(
values=[], enable_events=True, size=(40, 20), key="-FILE LIST-"
)
],
]
self.folder = os.getcwd()
try:
file_list = os.listdir(self.folder)
except:
file_list = []
fnames = [
f
for f in file_list
if os.path.isfile(os.path.join(self.folder, f))
and f.lower().endswith((".csv"))
]
fnames = sorted(fnames, key=str.lower)
self.sgw = sg.Window('Experimental data selection', file_list_column, location = [0,0], finalize=True)
self.sgw["-FILE LIST-"].update(fnames)
self.children = []
def close(self):
for child in self.children:
child.close()
self.sgw.close()
if self in windowList:
windowList.remove(self)
def read(self):
event, values = self.sgw.read(timeout=timeout)
if event == sg.WIN_CLOSED or event == 'Exit':
self.close()
elif event == "-FOLDER-":
self.folder = values["-FOLDER-"]
try:
file_list = os.listdir(self.folder)
except:
file_list = []
fnames = [
f
for f in file_list
if os.path.isfile(os.path.join(self.folder, f))
and f.lower().endswith((".csv"))
]
fnames = sorted(fnames, key=str.lower)
self.sgw["-FILE LIST-"].update(fnames)
elif event == "-FILE LIST-": # A file was chosen from the listbox
newData = []
filename = values["-FILE LIST-"][0]
datafile = os.path.join(self.folder, filename)
with open(datafile) as f:
data = csv.reader(f)
next(data, None) # skip the header
for row in data:
newrow = []
for number in row:
newrow.append(float(number))
newData.append(newrow)
self.parent.experimentalData.append(np.array(newData))
self.parent.experimentNames.append(filename.split('.',1)[0])
self.parent.makePlot()
self.close()
class InspectWindow:
def __init__(self,parent):
self.parent = parent
windowList.append(self)
dataColumn = [
[sg.Text('Data Points')],
[sg.Listbox(values=[], enable_events=True, size=(30, 50), key='-dataList-')]
]
outputColumn = [
[sg.Text('Calculation Details')],
[sg.Multiline(key='-details-', size=(50,10), no_scrollbar=True)],
[sg.Text(key = '-status-')],
[sg.Button('Toggle Active/Suppressed Status', disabled = True)],
[sg.Text('Filter points', font='underline')],
[sg.Text('Temperature Range:')],
[sg.Input(key='-tfilterlow-',size=(inputSize,1)),sg.Input(key='-tfilterhi-',size=(inputSize,1))],
[sg.Text(f'{self.parent.calculation.el2} Concentration Range:')],
[sg.Input(key='-xfilterlow-',size=(inputSize,1)),sg.Input(key='-xfilterhi-',size=(inputSize,1))],
[sg.Text('Contains Phases:')],
[sg.Combo(['']+self.parent.calculation.phases, key = '-pfilter1-'),sg.Combo(['']+self.parent.calculation.phases, key = '-pfilter2-')],
[sg.Button('Apply Filter')]
]
self.data = [[i, f'{self.parent.calculation.ts[i]:6.2f} K {self.parent.calculation.x1[i]:4.3f} {self.parent.calculation.x2[i]:4.3f}'] for i in range(len(self.parent.calculation.ts))]
self.sgw = sg.Window('Data inspection',
[[sg.Pane([
sg.Column(dataColumn, element_justification='l', expand_x=True, expand_y=True),
sg.Column(outputColumn, element_justification='c', expand_x=True, expand_y=True)
], orientation='h', k='-PANE-')]],
location = [0,0], finalize=True)
self.sgw['-dataList-'].update(self.data)
self.children = []
self.index = -1
def close(self):
for child in self.children:
child.close()
self.sgw.close()
if self in windowList:
windowList.remove(self)
def read(self):
event, values = self.sgw.read(timeout=timeout)
if event == sg.WIN_CLOSED or event == 'Exit':
self.close()
elif event == '-dataList-':
self.index = self.parent.calculation.pointIndex[values['-dataList-'][0][0]]
self.sgw['-details-'].update(self.parent.calculation.pointDetails[self.index])
self.sgw['Toggle Active/Suppressed Status'].update(disabled = False)
self.sgw['-status-'].update(f'{"Suppressed" if self.parent.calculation.suppressed[self.index] else "Active"}')
elif event == 'Toggle Active/Suppressed Status':
if self.index >= 0:
self.parent.calculation.suppressed[self.index] = not(self.parent.calculation.suppressed[self.index])
self.parent.macro.append(f'macroPD.suppressed[{self.index}] = not(macroPD.suppressed[{self.index}])')
self.sgw['-status-'].update(f'{"Suppressed" if self.parent.calculation.suppressed[self.index] else "Active"}')
elif event == 'Apply Filter':
tlo = -np.Inf
thi = np.Inf
xlo = -np.Inf
xhi = np.Inf
try:
tlo = float(values['-tfilterlow-'])
except:
pass
try:
thi = float(values['-tfilterhi-'])
except:
pass
try:
xlo = float(values['-xfilterlow-'])
except:
pass
try:
xhi = float(values['-xfilterhi-'])
except:
pass
self.data = []
for i in range(len(self.parent.calculation.ts)):
if tlo <= self.parent.calculation.ts[i] and thi >= self.parent.calculation.ts[i] and ((xlo <= self.parent.calculation.x1[i] and xhi >= self.parent.calculation.x1[i]) or (xlo <= self.parent.calculation.x2[i] and xhi >= self.parent.calculation.x2[i])):
if (values['-pfilter1-'] == '' or values['-pfilter1-'] == self.parent.calculation.p1[i] or values['-pfilter1-'] == self.parent.calculation.p2[i]):
if (values['-pfilter2-'] == '' or values['-pfilter2-'] == self.parent.calculation.p1[i] or values['-pfilter2-'] == self.parent.calculation.p2[i]):
self.data.append([i, f'{self.parent.calculation.ts[i]:6.2f} K {self.parent.calculation.x1[i]:4.3f} {self.parent.calculation.x2[i]:4.3f}'])
self.sgw['-dataList-'].update(self.data)
class SaveData(object):
def __init__(self,ts,x1,x2,boundaries,phases,b,x0data,x1data,mint,maxt):
self.ts = ts
self.x1 = x1
self.x2 = x2
self.boundaries = boundaries
self.phases = phases
self.b = b
self.x0data = x0data
self.x1data = x1data
self.mint = mint
self.maxt = maxt
class SaveDataWindow:
def __init__(self, parent):
self.parent = parent
windowList.append(self)
self.children = []
layout = [[sg.Input(key='-saveName-',size=(inputSize,1)), sg.Text('.pkl')],
[sg.Button('Save'), sg.Button('Cancel')]]
self.sgw = sg.Window('Save Diagram Data', layout, location = [400,0], finalize=True)
def close(self):
for child in self.children:
child.close()
self.sgw.close()
if self in windowList:
windowList.remove(self)
def read(self):
event, values = self.sgw.read(timeout=timeout)
if event == sg.WIN_CLOSED or event == 'Cancel':
self.close()
elif event =='Save':
try:
tempName = str(values['-saveName-'])
if not tempName == '':
self.parent.saveDataName = tempName
except:
pass
saveData = SaveData(self.parent.calculation.ts,
self.parent.calculation.x1,
self.parent.calculation.x2,
self.parent.calculation.boundaries,
self.parent.calculation.phases,
self.parent.calculation.b,
self.parent.calculation.x0data,
self.parent.calculation.x1data,
self.parent.calculation.mint,
self.parent.calculation.maxt)
with open(self.parent.calculation.saveDataName+'.pkl','wb') as outp:
pickle.dump(saveData, outp, pickle.HIGHEST_PROTOCOL)
self.close()
class LoadDataWindow:
def __init__(self,parent):
self.parent = parent
windowList.append(self)
file_list_column = [
[
sg.Text("Phase Diagram Data Folder"),
sg.In(size=(25, 1), enable_events=True, key="-FOLDER-"),
sg.FolderBrowse(),
],
[
sg.Listbox(
values=[], enable_events=True, size=(40, 20), key="-FILE LIST-"
)
],
]
| |
data, "back": back, "err": back_err}
fig = self.draw.getfigure(**kargs)
fig.subplots_adjust(0.02, 0.02, 0.97, 0.97, wspace=0.1, hspace=0.1)
# Work out values for the histograms
erad = 0.69813170079773 # each segment gets 0.69813170079773 (or thereabouts) rads
eradh = erad / 2.0
eradq = eradh / 2.0
theta = numpy.arange(0.0, 2*numpy.pi, 2*numpy.pi/len(data))
width = (numpy.pi/4)*len(data) # in rads?
width = 0.5
# colour for each segment
colors = ["#FFF800", # (255, 248, 0)
"#000E7C", # (0, 14, 177)
"#001EFF", # (0, 30, 255)
"#6275FF", # (98, 117, 255)
"#B1BAFF", # (177, 186, 255)
"#FFB7B1", # (255, 183, 177)
"#FF6E62", # (255, 110, 98)
"#FF1300", # (255, 19, 0)
"#7C0900"] # (124, 9, 0)
for i, k in enumerate(res): # ugh. random order...
ax = fig.add_subplot(layout[0], layout[1], i+1, polar=True)
if res[k]["back"]:
axes[k].bar(theta-0.10, res[k]["back"], width=erad, bottom=0.0, alpha=0.8, ec="none", color="grey")
ax.bar(theta, res[k]["data"], width=erad-0.20, bottom=0.0, alpha=0.9, ec="none", color=colors)
ax.set_title(k, size=7)
ax.set_xticks(theta-0.10)
ax.set_xticklabels("")
l = ax.get_ylim()
#print k, ["%s%%" % i for i in range(l[0], l[1]+5, l[1]//len(axes[k].get_yticklabels()))]
#print [str(t) for t in axes[k].get_yticklabels()]
[t.set_fontsize(10) for t in ax.get_yticklabels()]
#print ["%s%%" % (i*10, ) for i, t in enumerate(axes[k].get_yticklabels())]
#print [t.get_text() for t in axes[k].get_yticklabels()]
ax.set_yticklabels(["%s%%" % i for i in range(int(l[0]), int(l[1]+5), int(l[1]//len(ax.get_yticklabels())))][1:])
actual_filename = self.draw.savefigure(fig, filename)
config.log.info("genome_dist_radial: Saved '%s'" % actual_filename)
def GO_heatmap(self, filename, p_value_limit=0.01, num_top=5, pvalue_key='pvalue',
size=[8, 6], bracket=[1.3,4], row_cluster=True, col_cluster=False, # heatmap args
heat_wid=0.15, cmap=cm.Reds, border=True, row_font_size=7,
heat_hei='proportional', grid=True, ontology=None, draw_numbers_fmt='%.1f',
draw_numbers=True, draw_numbers_threshold=2.0, draw_numbers_font_size=5, do_negative_log10=True,
**kargs):
'''
**Purpose**
Produce a heatmap of GO categories from a glglob of GO genelists (glgo's)
**Arguments**
filename (Required)
filename to save the resulting heatmap to
p_value_limit (Optional, default=0.01)
minimum p-value to include in list
pvalue_key (Optional, default='p_value')
The key in your GO lists that contains some sort of significance
result.
num_top (Optional, default=5)
Generally these heatmaps do not have much space to contain that
many categories, so you need to take the top N from each list.
GO_heatmap will 'fill in' categories in other lists even if they do not
fulfill the 'p_value_limit' and 'num_top' criteria.
However, this only occurs if your GO lists contain all GO terms. If
you have already truncated the lists for some p-value then glbase cannot
fill in the missing data.
ontology (Optional, default=False)
DAVID will give you a table containing all GO categories. Use this to specify using only
a single ontology to use. Assumes the genelists have a 'ontology' key.
This function will also accept all glbase heatmap arguments (see expression.heatmap).
A few args have altered defaults:
heat_hei (Optional, default='proportional')
Sets the heatmap to a fixed y-size for each row.
Set to a normal heat_wid value if you prefer.
bracket (Optional, default=[1.3, 4.0])
the bracket for the min and max of the heatmap. This sort of bracket
assumes your data is -log10 transformed and so the p-value would
range from 0.05 to 0.0001
do_negative_log10 (Optional, default=True)
By default convert the value in pvalue into the -log10()
Set this to False if you don't want to convert
**Returns**
The resorted row names (as a list) and a heatmap in filename
'''
format = {'force_tsv': True, 'pvalue': 1, 'name': 0}
main_cluster_membership = {}
number_of_clusters = len(self)
go_store = {}
main_cluster_membership = {}
cond_names_idx = {}
for idx, go in enumerate(self.linearData):
cond_names_idx[go.name] = idx
if not go:
config.log.warning("GO_heatmap: GO list '%s' was empty, skipping" % go.name)
continue
go.sort(pvalue_key)
#go.reverse() # huh?
#print(go)
if ontology:
this_ont = go.getRowsByKey('ontology', ontology)
topN = this_ont[0:num_top]
else:
topN = go[0:num_top]
for item in topN:
if do_negative_log10:
if float(item[pvalue_key]) < p_value_limit:
if item['name'] not in go_store:
go_store[item['name']] = [-1] * (number_of_clusters)
go_store[item['name']][idx] = -math.log10(item['pvalue'])
else:
if float(item[pvalue_key]) > -math.log10(p_value_limit): # i.e. 0.01
if item['name'] not in go_store:
go_store[item['name']] = [-1] * (number_of_clusters)
go_store[item['name']][idx] = item['pvalue']
# fill in the holes:
for go in self.linearData:
for k in go_store:
this_k = go.get(key='name', value=k, mode='lazy') # by default
if this_k:
if do_negative_log10:
if float(item[pvalue_key]) < p_value_limit:
if item['name'] not in go_store:
go_store[item['name']] = [-1] * (number_of_clusters)
go_store[k][cond_names_idx[go.name]] = -math.log10(float(this_k[0]['pvalue']))
else:
if float(item[pvalue_key]) > -math.log10(p_value_limit): # i.e. 0.01
if item['name'] not in go_store:
go_store[item['name']] = [-1] * (number_of_clusters)
go_store[k][cond_names_idx[go.name]] = float(this_k[0]['pvalue'])
newe = []
for k in go_store:
newe.append({'name': k.replace("~", ":"), 'conditions': go_store[k]}) # REPAIR DAVID GO names
cond_names = sorted(zip(list(cond_names_idx.keys()), list(cond_names_idx.values())), key=itemgetter(1))
cond_names = [i[0] for i in cond_names]
goex = expression(loadable_list=newe, cond_names=cond_names)
if len(goex) == 0:
config.log.warning('GO list was empty, skipping')
return(False)
if heat_hei == 'proportional':
heat_hei=0.011*len(goex)
res = goex.heatmap(filename=filename, size=size, bracket=bracket,
row_cluster=row_cluster, col_cluster=col_cluster,
heat_wid=heat_wid, cmap=cmap, border=border,
row_font_size=row_font_size, heat_hei=heat_hei, grid=grid,
draw_numbers=draw_numbers, colbar_label='-log10(%s)' % pvalue_key,
draw_numbers_threshold = -math.log10(p_value_limit),
draw_numbers_fmt=draw_numbers_fmt,
draw_numbers_font_size=draw_numbers_font_size)
config.log.warning("GO_heatmap: Saved heatmap '%s'" % filename)
return(reversed(res["reordered_rows"]))
def measure_density(self, trks, peaks, norm_by_library_size=True, log=False,
read_extend=0, pointify=True, expand=1000,
**kargs):
"""
**Purpose**
get the seq tag density from the trks, and return as an expression object
**Arguments**
trks (Required)
a list of tracks/flats
peaks (Required)
a list of peaks, a genelist containing a 'loc' key
read_extend (Optional, default=200)
read extend the sequence tags in the tracks by xbp
norm_by_library_size (Optional, default=True)
normalise the result by the [library size/1e6]
log (Optional, default=False)
log transform the resulting matrix
pointify (Optional, default=True)
convert the genomic locations to the center of the peak
expand (Optional, default=500)
expand the left and right flanks of the genomic coordiantes by <expand> base pairs
Performed AFTER pointify
**Returns**
an expression object, with the conditions as the tag density from the tracks
"""
assert isinstance(trks, list), 'measure_density: trks must be a list'
assert 'loc' in list(peaks.keys()), 'measure_density: no loc key found in peaks'
all_trk_names = [t["name"] for t in trks]
assert len(set(all_trk_names)) == len(all_trk_names), 'track names are not unique. Please change the track.meta_data["name"] to unique names'
peaks = peaks.deepcopy()
if pointify:
peaks = peaks.pointify()
if expand:
peaks = peaks.expand('loc', expand)
peaks.sort('loc')
newl = []
curr_chrom = None
curr_data = None
curr_n = 0
for p in peaks:
p["conditions"] = [0.0 for t in trks]
all_chroms = len(set([i['chr'] for i in peaks['loc']])) * len(trks)
all_sizes = [t.get_total_num_reads() / 1e6 for t in trks]
for it, t in enumerate(trks):
pb = progressbar(all_chroms)
curr_chrom = None
for p in peaks:
p_loc = p['loc']
if p_loc['chr'] != curr_chrom:
del curr_data
curr_data = t.get_array_chromosome(p_loc['chr'], read_extend=read_extend) # this is a numpy array
curr_chrom = p_loc['chr']
pb.update(curr_n)
curr_n += 1
d = curr_data[p_loc['left']:p_loc['right']]
if len(d) == 0: # fell off edge of array
p["conditions"][it] = 0 # Need to put a value in here
continue
if norm_by_library_size:
p["conditions"][it] = numpy.average(d) / all_sizes[it]
else:
p["conditions"][it] = numpy.average(d)
expn = expression(loadable_list=peaks.linearData, cond_names=[t["name"] for t in trks])
if log:
expn.log(2, .1)
return(expn)
def measure_enrichment(self, trks, peaks, log=False,
read_extend=0, peak_window=200,local_lambda=5000,
**kargs):
"""
**Purpose**
get the seq tag enrichment from the trks,
and return as an expression object
**Arguments**
trks (Required)
a list of tracks/flats
peaks (Required)
a list of peaks, a genelist containing a 'loc' key
read_extend (Optional, default=200)
read extend the sequence tags in the tracks by xbp
log (Optional, default=False)
log transform the resulting matrix
peak_window (Optional, default=200)
window around the center of the peak to score the peak enrichment.
local_lambda (Optional, default=5000)
Number of base pairs around the peak to score the local lambda
**Returns**
an expression object, with the conditions as the tag density from the tracks
"""
assert isinstance(trks, list), 'measure_enrichment: trks must be a list'
assert 'loc' in list(peaks.keys()), 'measure_enrichment: no loc key found in peaks'
all_trk_names = [t["name"] for t in trks]
print(all_trk_names)
assert len(set(all_trk_names)) == len(all_trk_names), 'track names are not unique. Please change the track.meta_data["name"] to unique names'
peaks = peaks.deepcopy()
peaks.sort('loc')
newl = []
curr_chrom = None
curr_data = None
curr_n = 0
for p in peaks:
p["conditions"] = [0.0 for t in trks]
all_chroms = len(set([i['chr'] for i in peaks['loc']])) * len(trks)
all_sizes = [t.get_total_num_reads() / 1e6 for t in trks]
lambda_window = local_lambda
peak_window = peak_window
peak_window_half = peak_window //2
lambda_inner = lambda_window - peak_window_half
prog = progressbar(len(trks))
for it, t in enumerate(trks):
for p in peaks:
| |
<reponame>attom/barnfire<filename>src/materials_bondarenko.py<gh_stars>1-10
'''
<NAME>
Summer 2014
Bondarenko iteration utility for materials
'''
#STDLIB
import os
import shutil
#TPL
import numpy as np
#MINE
from materials_util import is_fissionable
import materials_util as util
from directories import get_common_directories
import Readgroupr as readgroupr
import PDTXS as pdtxs
def perform_bondarenko_iterations(inputDict, materials, verbosity):
'''Driver for Bondarenko iterations'''
maxIterations = inputDict['numberiterationsmax']
maxError = inputDict['errormax']
useSimpleRxn = inputDict['simplereactions']
scatMatrixFormat = inputDict['format']
rxnsToPrint = inputDict['printopt']
energyMeshPath = inputDict['mesh']
fluxBasePath = inputDict['fluxes']
#
dirDict = get_common_directories()
rootDirr = dirDict['gendf']
outDirr = dirDict['pdtxs']
#
energyMesh = None
if energyMeshPath is not None:
energyMesh = np.loadtxt(energyMeshPath, dtype=np.int, skiprows=2, usecols=[0])[:-1]
numElements = len(np.unique(energyMesh))
energyMeshFilenameOut = 'mesh_{0}.txt'.format(numElements)
energyMeshPathOut = os.path.join(outDirr, energyMeshFilenameOut)
shutil.copy2(energyMeshPath, energyMeshPathOut)
#
if verbosity:
print '------- Bondarenko -------'
fluxDict = read_fluxes(fluxBasePath, materials)
for material in materials:
backgroundXSDict = iterate_one_material(rootDirr, material, maxError, maxIterations, energyMesh, fluxDict, verbosity)
if maxIterations < 0:
unset_background_xs_dict(material, backgroundXSDict, verbosity)
print_one_material(rootDirr, outDirr, material, backgroundXSDict, scatMatrixFormat, useSimpleRxn, rxnsToPrint, energyMesh, fluxDict, verbosity)
if verbosity:
key = backgroundXSDict.keys()[0]
numGroups = len(backgroundXSDict[key])
print 'Number of groups is', numGroups
def read_fluxes(fluxBasePath, materials):
'''Read in flux files'''
fluxDict = {}
if fluxBasePath is None:
return None
for material in materials:
shortName = material.shortName
fluxPath = fluxBasePath.format(m=shortName)
fluxDict[shortName] = np.loadtxt(fluxPath, skiprows=1, usecols=[1])
return fluxDict
def print_one_material(rootDirr, outDirr, material, backgroundXSDict, scatMatrixFormat, useSimpleRxn, rxnsToPrint, energyMesh, fluxDict, verbosity):
'''Print PDT XS for one material. Prints both the component-wise and combined material xs's. Requires unique shortName for each material to prevent over-writing.'''
txs2mtDict = readgroupr.get_short2mt_dict(readgroupr.get_endf_mt_list())
T = material.temperature
ZAList = sorted(material.ZAList)
for (Z,A) in ZAList:
sig0Vec = backgroundXSDict[(Z,A)]
numGroups = len(sig0Vec)
Sab = material.SabDict[(Z,A)]
sym = material.symDict[Z]
shortName = material.shortName
# Metastable isomeric states use the groundstate A + 400
effA = A % 400
metastableStr = ''
if A // 400 > 0:
metastableStr = 'm'
leafDirr = util.get_nuclide_dirr(sym, effA, Sab, metastableStr)
inDirr = os.path.join(rootDirr, leafDirr)
readerOpt = 'gendf'
outName = 'xs_{0}_{1}-{2}_{3}.data'.format(shortName, sym.lower(), A, numGroups)
pickleName = None
thermalMTList = ['{0}'.format(txs2mtDict[txs]) for txs in material.thermalXSDict[(Z,A)]]
thermalMTStr = ' '.join(thermalMTList)
parser = readgroupr.define_input_parser()
parseStr = '-i {i} -o {o} -O {O} -P {P} -w {w} -p {p} -t {t} -T {T} -f {f}'.format(
i=inDirr, o=outDirr, O=outName, P=pickleName, p=rxnsToPrint, w=readerOpt, t=thermalMTStr, T=T, f=scatMatrixFormat)
if useSimpleRxn:
parseStr += ' -m 1 2 18 102 221 452 -M 2 18 221 -t 221'
if verbosity > 2:
print 'Calling ./Readgroupr', parseStr
if verbosity:
print 'Printing XS to {0}'.format(os.path.join(outDirr, outName))
readerDict = vars(parser.parse_args(parseStr.split()))
if fluxDict is not None:
readerDict['flux'] = fluxDict[material.shortName]
readerDict['energyMesh'] = energyMesh
readerDict['sig0Vec'] = sig0Vec
readgroupr.finish_parsing(readerDict)
readgroupr.execute_reader(readerDict)
if verbosity > 2:
plot_bondarenko(rootDirr, backgroundXSDict)
form_and_print_macroscopic_xs(outDirr, ZAList, material, numGroups, verbosity)
def form_and_print_macroscopic_xs(dirr, ZAList, material, numGroups, verbosity=False):
'''Combine all microscopic component XS into one macroscopic material XS'''
shortName = material.shortName
MTinvel = 259
MTfission = 18
MTnutot = 452
MTnudelay = 455
MTnuprompt = 456
MTdecay = 457
MTfissEnergy = 458
MTwgt = 1099
MTchi = 1018
MTnuSigF = 1452
MTdecayConst = 1054
MTdelayedChi = 2055
MTssNu = 2452
MTssChi = 2018
MTfissionMatrix = 2518
# Read in component cross sections
xsDictIn = {}
for (Z,A) in ZAList:
sym = material.symDict[Z]
inName = 'xs_{0}_{1}-{2}_{3}.data'.format(shortName, sym.lower(), A, numGroups)
inPath = os.path.join(dirr, inName)
xsDictIn[(Z,A)] = pdtxs.read_PDT_xs_generally(inPath)
# Initialize material cross section dictionary (xsOut)
key = (Z,A)
t = xsDictIn[key]
numDNGs = 0
# Find the numDNGs of fissile material
for (Z,A) in xsDictIn:
t = xsDictIn[(Z,A)]
if numDNGs == 0:
numDNGs = t.D
elif numDNGs != t.D and t.D != 0:
assert (numDNGs == t.D), 'Fissile material ({0}-{1}) has different number of delayed neutron groups'.format(Z,A)
t.D = numDNGs
microStr = 'Macroscopic cross sections are in units of cm^-1.'
xsDict = pdtxs.PDT_XS(t.G, t.M, t.D, t.T, t.typeStr, microStr, t.Eg, t.dE, {})
xsOut = xsDict.xs
# Keep a reaction if it appears in at least one component
MTs = set()
for (Z,A) in xsDictIn:
MTs.update(xsDictIn[(Z,A)].xs.keys())
# A material-average decay rate does not make sense, so do not compute one
if MTdecay in MTs:
MTs.remove(MTdecay)
# Initialize 0D XS
MTs0D = [MT for MT in MTs if MT in [MTdecay, MTfissEnergy]]
for MT in MTs0D:
xsOut[MT] = 0.
# Initialize 1D XS
MTs1D = [MT for MT in MTs if (MT < 2500 and MT not in [MTs0D, MTdecayConst, MTdelayedChi])]
for MT in MTs1D:
xsOut[MT] = np.zeros(xsDict.G)
# Initialize delayed neutron precurse decay constant (MT 1054)
if MTdecayConst in MTs:
xsOut[MTdecayConst] = np.zeros(xsDict.D)
# Initialize delayed neutron spectra (MT 2055)
if MTdelayedChi in MTs:
xsOut[MTdelayedChi] = np.zeros((xsDict.D, xsDict.G))
# Initialize transfer matrices
MTsXfer = [MT for MT in MTs if MT >= 2500]
for MT in MTsXfer:
if MT == MTfissionMatrix:
xsOut[MT] = np.zeros((xsDict.G, xsDict.G))
else:
xsOut[MT] = np.zeros((xsDict.M, xsDict.G, xsDict.G))
# Save denominators for XS that are averages instead of density-weighted sums
MTsAvg = [MT for MT in MTs if MT in [MTwgt, MTnutot, MTnudelay, MTnuprompt, \
MTchi, MTdelayedChi, MTdecayConst, MTfissEnergy, MTinvel, MTssNu, MTssChi]]
norms = {}
for MT in MTsAvg:
norms[MT] = 0.
# Compute XS averages and sums by adding in the contribution of each component
for (Z,A) in ZAList:
xsIn = xsDictIn[(Z,A)].xs
compDensity = material.atomDensity * material.elemAtomFracDict[Z] * material.abundanceDict[(Z,A)]
# Compute flux weight and its sum for this component
wgt = 0.
wgtSum = 1.
if MTwgt in xsIn:
wgt = xsIn[MTwgt]
wgtSum = np.sum(wgt)
if not wgtSum:
wgtSum = 1.
xsOut[MTwgt] += compDensity * wgt / wgtSum
norms[MTwgt] += compDensity
# Compute fission rate sum for this component
fissRate = 0.
if MTnutot in xsIn:
fissRate = np.sum(xsIn[MTnutot] * xsIn[MTfission] * wgt) / wgtSum
fissRatePrompt = 0.
if MTnuSigF in xsIn:
fissRatePrompt = np.sum(xsIn[MTnuSigF] * wgt) / wgtSum
fissRateDelayed = 0.
if MTnudelay in xsIn:
fissRateDelayed = np.sum(xsIn[MTnudelay] * xsIn[MTfission] * wgt) / wgtSum
# Update numerator and denominator for energy per fission using fission-source weighting
if MTfissEnergy in xsIn:
xsOut[MTfissEnergy] += compDensity * fissRate * xsIn[MTfissEnergy]
norms[MTfissEnergy] += compDensity * fissRate
# Update numerator and denominator for chi using fission-source weighting
if MTchi in xsIn:
xsOut[MTchi] += compDensity * fissRatePrompt * xsIn[MTchi]
norms[MTchi] += compDensity * fissRatePrompt
# Update numerator and denominator for delayed chi using fission-source weighting
if MTdelayedChi in xsIn:
xsOut[MTdelayedChi] += compDensity * fissRateDelayed * xsIn[MTdelayedChi]
norms[MTdelayedChi] += compDensity * fissRateDelayed
# Delayed neutron decay constant should be consistant for all nuclides
if MTdecayConst in xsIn:
xsOut[MTdecayConst] += 1.0 * xsIn[MTdecayConst]
norms[MTdecayConst] += 1.0
# Update numerator and denominator for steady-state chi using fission-source weighting
if MTssChi in xsIn:
xsOut[MTssChi] += compDensity * fissRate * xsIn[MTssChi]
norms[MTssChi] += compDensity * fissRate
# Update neutrons per fission (nutot, nuprompt, nudelay, and nu_ss)
if MTnutot in xsIn:
xsOut[MTnutot] += compDensity * xsIn[MTfission] * xsIn[MTnutot]
norms[MTnutot] += compDensity * xsIn[MTfission]
if MTnudelay in xsIn:
xsOut[MTnudelay] += compDensity * xsIn[MTfission] * xsIn[MTnudelay]
norms[MTnudelay] += compDensity * xsIn[MTfission]
if MTnuprompt in xsIn:
xsOut[MTnuprompt] += compDensity * xsIn[MTfission] * xsIn[MTnuprompt]
norms[MTnuprompt] += compDensity * xsIn[MTfission]
if MTssNu in xsIn:
xsOut[MTssNu] += compDensity * xsIn[MTfission] * xsIn[MTssNu]
norms[MTssNu] += compDensity * xsIn[MTfission]
# Update numerator and denominator for inverse velocity using density weighting
if MTinvel in xsIn:
xsOut[MTinvel] += compDensity * xsIn[MTinvel]
norms[MTinvel] += compDensity
# Compute cross sections that are density-weighted sums
MTsSum = [MT for MT in MTs if MT not in MTsAvg]
for MT in MTsSum:
if MT in xsIn:
xsOut[MT] += compDensity * xsIn[MT]
# Normalize XS averages
for MT in MTsAvg:
if np.all(norms[MT]>0.0):
xsOut[MT] /= norms[MT]
# Recompute steady-state nu and chi
if all(mts in MTs for mts in [MTnuSigF, MTfission, MTssNu, MTssChi]):
flux = xsOut[MTwgt]
promptProd = xsOut[MTnuSigF]
fission_xs = xsOut[MTfission]
nu_delayed = xsOut.get(MTnudelay, 0.)
chis_delayed = xsOut.get(MTdelayedChi, 1.)
chi_delayed = np.sum(chis_delayed, axis=0)
fission_x_prompt = xsOut[MTfissionMatrix]
nu_prompt = promptProd/fission_xs
nu_ss = (nu_prompt + nu_delayed) * fission_xs
n_per_gout = ( np.dot(fission_x_prompt, flux) + \
chi_delayed*np.sum(nu_delayed*fission_xs*flux) )
chi_ss = n_per_gout/np.sum(n_per_gout)
xsOut[MTssNu] = nu_ss
xsOut[MTssChi] = chi_ss
# Print out material XS
outName = 'xs_{0}_{1}.data'.format(shortName, numGroups)
outPath = os.path.join(dirr, outName)
if verbosity:
print 'Printing combined XS to {0}'.format(outPath)
pdtxs.write_PDT_xs_generally(outPath, xsDict)
def iterate_one_material(rootDirr, material, maxError, maxIterations, energyMesh=None, fluxDict=None, verbosity=False):
'''Perform Bondarenko iteration on one material. Fine groups within an | |
import json
import global_settings as gs
from collections import defaultdict
def query_bigquery(query):
import boto3
import google.auth
from google.cloud import bigquery
"""
Runs a query in Google BigQuery and returns the result as a list of dicts
(each line is an element in the list, and each column is an entry in the dict,
with column names as keys).
"""
# Get & set Google credentials to fetch filters from BigQuery:
s3 = boto3.client('s3')
a = s3.get_object(
Bucket='config-lambda',
Key='layers/google-cloud-storage/gabinete-compartilhado.json')
open('/tmp/key.json', 'w').write(a['Body'].read().decode('utf-8'))
if gs.local:
# Must set this environment variable:
import os
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = "/tmp/key.json"
# Create credentials with Drive & BigQuery API scopes
# Both APIs must be enabled for your project before running this code
credentials, project = google.auth.default(scopes=[
'https://www.googleapis.com/auth/drive',
'https://www.googleapis.com/auth/bigquery',
])
# Run the query:
bq = bigquery.Client(credentials=credentials, project=project)
result = bq.query(query, location="US") # Location must match that of the dataset(s) referenced in the query.
# Translate the query result into a list of dicts:
result = [dict(r.items()) for r in result]
return result
def load_remote_filters():
"""
Loads filters from Google BigQuery, with a hard-coded query.
"""
filters_raw = query_bigquery("SELECT * FROM `gabinete-compartilhado.gabi_bot.gabi_filters` WHERE casa = 'dou'")
return filters_raw
def load_local_filters(filter_path):
"""
Given a .json file of a set of filters in 'filter_path',
loads it to a list of dicts (each line is an element in the list,
and each column is an entry in the dict, with column names as keys).
"""
with open(filter_path, 'r') as f:
filters_raw = json.load(f)
return filters_raw
def csvrow_to_list(csvrow):
"""
Takes a string 'csvrow' that has substrings separated by semicolons and
returns a list of substrings.
"""
if csvrow != None:
return list(map(lambda s: s.strip(), csvrow.split(';')))
else:
return None
def check_for_ambiguity(filter_group, filter_ids, key):
"""
Check if all filters numbered the same way have the same tag 'key'
(nome, casa, channel, description). Returns an error if it
doesn't. This is needed to group filters under the same
filter set.
-- filter_group: pandas grouby object, grouped by filter number.
-- key: nome, casa, channel, description
"""
unique_key = all([[f[key] == filter_group[i][0][key] for f in filter_group[i]] for i in filter_ids])
if not unique_key:
raise Exception('Found multiple entries of \''+key+'\' for same filter.')
def get_filter_par(filter_group, filter_ids, key):
"""
Given a pandas groupby object 'filter_group' that group the
filters by filter_number (giving a filter set) and a key,
get the first value for that key in each group.
"""
return [filter_group[i][0][key] for i in filter_ids]
def filterset_gen(filters_raw, fnumber):
"""
Group all filters in table 'filter_raw' in a filter set according to filter_number 'fnumber'.
Then, transforms comma separated filter keywords into a list.
It also deals with missing values in filter entries / filter entry at all.
"""
# If there is no filter, return an empty list:
flist = [f for f in filters_raw if f['filter_number'] == fnumber]
if len(flist) == 1 and flist[0]['column_name'] == None:
return []
else:
# If there are filters, group them:
return [{k:(f[k] if k=='column_name' else csvrow_to_list(f[k]))
for k in ('column_name', 'positive_filter', 'negative_filter') if f[k]!=None}
for f in filters_raw if f['filter_number'] == fnumber]
def format_filters(filters_raw):
"""
Format the filters loaded from Google sheets (a table 'filters_raw')
into the dict used by João Carabetta:
"""
# Group list of filters by filter_number, to form a filter set:
filter_group = defaultdict(list)
for f in filters_raw:
filter_group[f['filter_number']].append(f)
# Get filter set ids:
filter_ids = list(filter_group.keys())
# List of filter set tags:
filter_id_keys = ['nome', 'casa', 'channel', 'description']
# Check if every filter set has unique tags:
dump = [check_for_ambiguity(filter_group, filter_ids, key) for key in filter_id_keys]
# Get filter set tags:
filter_tag = {key: get_filter_par(filter_group, filter_ids, key) for key in filter_id_keys}
# Organize filters in a filter set:
filter_set = [filterset_gen(filters_raw, fnumber) for fnumber in filter_ids]
# Put all filter sets (with tags) into a list:
event = [{'nome': filter_tag['nome'][i],
'casa': filter_tag['casa'][i],
'media': {'type': 'slack',
'channel': filter_tag['channel'][i],
'description': filter_tag['description'][i]},
#'sns-topic': 'slack-test-DEV', # For debugging.
'sns-topic': 'slack-test',
'filters': filter_set[i]}
for i in range(len(filter_ids))]
return event
# Functions for finding out if filter over the field 'secao' will eliminate the
# entirity of the downloaded articles, according to config['secao']:
def build_article_set(secao_list):
"""
Takes the value of 'secao' key in a 'config' dict ('secao_list') that
specifies which DOU sections to download and return a set of all possible
sections that might be downloaded: (sections 1, 2 and 3 from ordinary,
extra or sumplement editions).
"""
# First select ordinary sections:
sel_set = [str(s) for s in secao_list if str(s) in ['1', '2', '3']]
# Then select all sections that might appear in an extra edition:
sel_set = sel_set + ['1e', '2e', '3e'] if 'e' in secao_list else sel_set
# Then select all sections that might appear in an suplement edition (maybe it is just 1):
sel_set = sel_set + ['1a', '2a', '3a'] if '1a' in secao_list else sel_set
return set(sel_set)
def std_secao_filter(secao_list):
"""
Takes a words list from a secao filter and standardize the words to the
same pattern as the one used to download the articles' URLs:
extra -> e and suplemento -> a.
"""
return [str(s).lower().replace('extra','e').replace('suplemento','a') for s in secao_list]
def build_filter_set(secao_pos, secao_neg):
"""
Create a set of articles that would be accepted by the section's
positive and negative filters 'secao_pos' and 'secao_neg', that is:
keep only the articles that have words in secao field that appear in
'secao_pos' and that do not have words that appear in 'secao_neg'.
"""
secao_pos = std_secao_filter(secao_pos)
secao_neg = std_secao_filter(secao_neg)
# A filter will, in principle, select all sections and editions:
sel_set = ['1','2','3','1e','2e','3e','1a','2a','3a']
# Only keep items that appear in the positive_filter list:
sel_set = [s for s in sel_set if len(set(s) & set(secao_pos)) != 0] if len(secao_pos)>0 else sel_set
# Only keep items that do not appear in the negative_filter list:
sel_set = [s for s in sel_set if len(set(s) & set(secao_neg)) == 0] if len(secao_neg)>0 else sel_set
return set(sel_set)
def remaining_sections(secao_set, secao_pos, secao_neg):
"""
Given a list of DOU sections to download 'secao_list' (e.g.
['1','1e','2','2e']) and a positive and negative list of secao
filters to apply ('secao_pos' and 'secao_neg'), returns the
remaining sections after filtering.
"""
will_select_set = build_filter_set(secao_pos, secao_neg)
return secao_set & will_select_set
def secao_left(config_secao, bot_info):
"""
Given a list of DOU sections to download, e.g. config['secao'] = [1,2,3,'e','1a']
and a bot_info, return a set of sections/editions that will remain after applying
the bot_info's filters on secao.
"""
# Select all secao filters in bot_info:
secao_filters = [{'positive_filter': defaultdict(lambda: [], d)['positive_filter'],
'negative_filter': defaultdict(lambda: [], d)['negative_filter']}
for d in bot_info['filters'] if d['column_name']=='secao']
# Transform the secao list in config['secao'] to a set of all sections/editions:
secao_set = build_article_set(config_secao)
# Remove sections/editions by applying successive secao filters:
for f in secao_filters:
secao_set = remaining_sections(secao_set, f['positive_filter'], f['negative_filter'])
return secao_set
# End.
def get_relevant_articles(bot_info, articles):
"""
Filter the query result 'articles' that gets the last 30 minutes new stuff
in the database.
-- articles: list of dicionaries. Each entry in list (a dict) is a row in the query results,
and each key in dict is a column.
# About bot_info['filters']:
# This is a list of filters (dict), each filter has the entries:
# 1 FILTER: column_name, positive_filter, negative_filter.
# -- The value for column_name is the column to check.
# -- The positive and negative_filter are a list of keywords that are combined with OR;
# -- The positive and negative filters are combined with AND;
# The filters are combined with AND;
# To combine filters with OR, create a new bot_info with a different list of filters.
"""
filters = bot_info['filters']
if len(filters):
if gs.debug:
print('name:', bot_info['nome'])
print('# articles:', len(articles))
# Loop over filters in a filter set:
for f in filters:
if gs.debug:
print('f', f)
# | |
self.set_keys=set([])
self.node_name=node_name
self.runtime_credentials=runtime_credentials
self.protocol = KademliaProtocolAppend(self.node, self.storage, ksize, node_name=self.node_name, set_keys=self.set_keys, runtime_credentials=self.runtime_credentials)
if kademlia_version != '0.5':
_log.error("#################################################")
_log.error("### EXPECTING VERSION 0.5 of kademlia package ###")
_log.error("#################################################")
def bootstrap(self, addrs):
"""
Bootstrap the server by connecting to other known nodes in the network.
Args:
addrs: A `list` of (ip, port, cert) tuples. Note that only IP addresses
are acceptable - hostnames will cause an error.
"""
_log.debug("AppendServer::bootstrap, addrs={}".format(addrs))
# if the transport hasn't been initialized yet, wait a second
if self.protocol.transport is None:
return task.deferLater(reactor,
1,
self.bootstrap,
addrs)
#id is in dhtid in hex
def initTable(results, challenge, id):
_log.debug("AppendServer::bootstrap::initTable:"
"\n\tresults={}"
"\n\tchallenge={}"
"\n\tself.node.id={}"
"\n\tid={}".format(results, challenge, self.node.id.encode('hex'), id.encode('hex')))
nodes = []
for addr, result in results.items():
ip = addr[0]
port = addr[1]
if result[0]:
resultSign = result[1]['signature'].decode('hex')
resultId = result[1]['id'].decode('hex')
payload = self.protocol.payload_to_be_signed(id,
challenge,
"signed_ping_response")
verified = self.protocol.handle_verify_signature(node.id, payload, resultSign)
if verified != True:
_log.error("Failed verification of challenge during bootstrap"
"\n\toriginal challenge={}"
"\n\treturned from ={}"
"\n\treturned signature={}"
"\n\terr={}".format(challenge, resultId.encode('id'), resultSign.encode('hex'), err))
raise Exception("Failed to verify challenge during bootstrap")
_log.debug("AppendServer::bootstrap::initTable: the challenge was correctly signed, let's append to nodes")
nodes.append(Node(resultId,
ip,
port))
_log.debug("AppendServer::bootstrap::initTable: let's now call NodeSpiderCrawl with nodes={}".format(nodes))
spider = NodeSpiderCrawl(self.protocol,
self.node,
nodes,
self.ksize,
self.alpha)
return spider.find()
ds = {}
challenge = generate_challenge()
id = None
if addrs:
data = addrs[0]
addr = (data[0], data[1])
cert_str = data[2]
logger(self.protocol.sourceNode, "\n########### DOING BOOTSTRAP ###########")
try:
id = dhtid_from_certstring(cert_str)
payload = self.protocol.payload_to_be_signed(id,
challenge,
"ping_request")
signature = self.protocol.sign_data(payload)
_log.debug("AppendServer::bootstrap: We have generated a challenge and signed it, let's now ping target"
"\n\tself.node.id={}"
"\n\ttarget id={}"
"\n\tchallenge={}"
"\n\tsignature={}".format(self.node.id.encode('hex'), id.encode('hex'), challenge, signature.encode('hex')))
ds[addr] = self.protocol.ping(addr,
self.node.id,
challenge,
signature,
self.protocol.getOwnCert())
self.protocol.storeCert(cert_str, id)
except Exception as err:
logger(self.protocol.sourceNode, "Bootstrap failed, err={}".format(err))
if not id:
return deferredDict(ds)
node = Node(id, data[0], data[1])
if self.protocol.router.isNewNode(node):
return deferredDict(ds).addCallback(initTable,
challenge,
id)
_log.debug("AppendServer::bootstrap No addrs supplied")
return deferredDict(ds)
def append(self, key, value):
"""
For the given key append the given list values to the set in the network.
"""
try:
dkey = digest(key)
except Exception as err:
_log.error("Failed to calculate digest of key={}, err={}".format(key, err))
raise
_log.debug("AppendServer::append"
"\n\tkey={}"
"\n\tdkey={}"
"\n\tvalue={}".format(key, dkey.encode('hex'), value))
node = Node(dkey)
def append_(nodes):
nodes_hex=[n.id.encode('hex') for n in nodes]
_log.debug("AppendServer::append::append_"
"\n\tkey={}"
"\n\tdkey={}"
"\n\tvalue={}"
"\n\tnodes={}".format(key, dkey.encode('hex'), value, nodes_hex))
# if this node is close too, then store here as well
if not nodes or self.node.distanceTo(node) < max([n.distanceTo(node) for n in nodes]):
_log.debug("AppendServer::append::append_: this node is close, store")
try:
pvalue = json.loads(value)
self.set_keys.add(dkey)
if dkey not in self.storage:
_log.debug("%s local append key: %s not in storage set value: %s" % (base64.b64encode(node.id), base64.b64encode(dkey), pvalue))
self.storage[dkey] = value
else:
old_value_ = self.storage[dkey]
try:
old_value = json.loads(old_value_)
new_value = list(set(old_value + pvalue))
except:
# When the key have been used for single values it does not contain a list
# When have been deleted contains None
# Just replace old value
new_value = pvalue
old_value = old_value_
_log.debug("{} local append "
"\n\tkey={}"
"\n\told={}"
"\n\tadd={}"
"\n\tnew={}".format(base64.b64encode(node.id), base64.b64encode(dkey), old_value, pvalue, new_value))
self.storage[dkey] = json.dumps(new_value)
except:
_log.debug("Trying to append something not a JSON coded list %s" % value, exc_info=True)
else:
_log.debug("AppendServer::append::append_: this node is not close, don't store")
ds = [self.protocol.callAppend(n, dkey, value) for n in nodes]
return defer.DeferredList(ds).addCallback(self._anyRespondSuccess)
nearest = self.protocol.router.findNeighbors(node)
if len(nearest) == 0:
self.log.warning("There are no known neighbors to set key %s" % key)
_log.debug("There are no known neighbors to set key %s" % key)
return defer.succeed(False)
_log.debug("AppendServer::append: Let us find Neighbors by doing a NodeSpiderCrawl, then call append_")
spider = NodeSpiderCrawl(self.protocol, node, nearest, self.ksize, self.alpha)
return spider.find().addCallback(append_)
def set(self, key, value):
"""
Set the given key to the given value in the network.
"""
try:
dkey = digest(key)
except Exception as err:
_log.error("Failed to calculate digest of key={}, err={}".format(key, err))
raise
# _log.debug("AppendServer::set:"
# "\n\tkey={}"
# "\n\tdkey={}"
# "\n\tvalue={}".format(key, dkey.encode('hex'), value))
node = Node(dkey)
def store(nodes):
_log.debug("AppendServer::set Setting '%s' on %s" % (key, [x.id.encode('hex') for x in nodes]))
# _log.debug("AppendServer::set Setting '%s' on %s" % (key, map(str, nodes)))
# if this node is close too, then store here as well
if not nodes or self.node.distanceTo(node) < max([n.distanceTo(node) for n in nodes]):
self.storage[dkey] = value
ds = [self.protocol.callStore(n, dkey, value) for n in nodes]
return defer.DeferredList(ds).addCallback(self._anyRespondSuccess)
nearest = self.protocol.router.findNeighbors(node)
if len(nearest) == 0:
_log.warning("There are no known neighbors to set key %s" % key)
return defer.succeed(False)
spider = NodeSpiderCrawl(self.protocol, node, nearest, self.ksize, self.alpha)
return spider.find().addCallback(store)
def get(self, key):
"""
Get a key if the network has it.
Returns:
:class:`None` if not found, the value otherwise.
"""
try:
dkey = digest(key)
except Exception as err:
_log.error("Failed to calculate digest of key={}, err={}".format(key, err))
raise
_log.debug("AppendServer::get"
"\n\tkey={}"
"\n\tdkey={}".format(key, dkey.encode('hex')))
_log.debug("Server:get %s" % base64.b64encode(dkey))
# if this node has it, return it
exists, value = self.storage.get(dkey)
if exists:
return defer.succeed(value)
node = Node(dkey)
nearest = self.protocol.router.findNeighbors(node)
if len(nearest) == 0:
self.log.warning("There are no known neighbors to get key %s" % key)
return defer.succeed(None)
spider = ValueSpiderCrawl(self.protocol, node, nearest, self.ksize, self.alpha)
return spider.find()
def remove(self, key, value):
"""
For the given key remove the given list values from the set in the network.
"""
try:
dkey = digest(key)
except Exception as err:
_log.error("Failed to calculate digest of key={}, err={}".format(key, err))
raise
node = Node(dkey)
_log.debug("Server:remove %s" % base64.b64encode(dkey))
_log.debug("AppendServer::remove"
"\n\tkey={}"
"\n\tdkey={}"
"\n\tvalue={}".format(key, dkey.encode('hex'), value))
def remove_(nodes):
# if this node is close too, then store here as well
if not nodes or self.node.distanceTo(node) < max([n.distanceTo(node) for n in nodes]):
try:
pvalue = json.loads(value)
self.set_keys.add(dkey)
if dkey in self.storage:
try:
old_value = json.loads(self.storage[dkey])
new_value = list(set(old_value) - set(pvalue))
except:
# When the key have been used for single values or deleted it does not contain a list
# Just empty it
old_value = self.storage[dkey]
new_value = []
self.storage[dkey] = json.dumps(new_value)
_log.debug("%s local remove key: %s old: %s remove: %s new: %s" % (base64.b64encode(node.id), base64.b64encode(dkey), old_value, pvalue, new_value))
except:
_log.debug("Trying to remove somthing not a JSON coded list %s" % value, exc_info=True)
ds = [self.protocol.callRemove(n, dkey, value) for n in nodes]
return defer.DeferredList(ds).addCallback(self._anyRespondSuccess)
nearest = self.protocol.router.findNeighbors(node)
if len(nearest) == 0:
self.log.warning("There are no known neighbors to set key %s" % key)
return defer.succeed(False)
spider = NodeSpiderCrawl(self.protocol, node, nearest, self.ksize, self.alpha)
return spider.find().addCallback(remove_)
def get_concat(self, key):
"""
Get a key if the network has it. Assuming it is a list that should be combined.
@return: C{None} if not found, the value otherwise.
"""
try:
dkey = digest(key)
except Exception as err:
_log.error("Failed to calculate digest of key={}, err={}".format(key, err))
raise
# Always try to do a find even if we have it, due to the concatenation of all results
exists, value = self.storage.get(dkey)
node = Node(dkey)
nearest = self.protocol.router.findNeighbors(node)
_log.debug("Server:get_concat "
"\n\tkey={}"
"\n\tdkey={}"
"\n\tlocal value={}"
"\n\texists={}"
"\n\tnbr nearest={}".format(key,
dkey.encode('hex'),
value,
exists,
len(nearest)))
if len(nearest) == 0:
# No neighbors but we had it, return that value
if exists:
return defer.succeed(value)
self.log.warning("There are no known neighbors to get key %s" % key)
return defer.succeed(None)
_log.debug("Let's now invoke ValueListSpiderCrawl to search for key")
spider = ValueListSpiderCrawl(self.protocol, node, nearest, self.ksize, self.alpha,
local_value=value if exists else None)
return spider.find()
class SpiderCrawl(crawling.SpiderCrawl):
def __init__(self, protocol, node, peers, ksize, alpha):
"""
Create a new C{SpiderCrawl}er.
Args:
protocol: A :class:`~kademlia.protocol.KademliaProtocol` instance.
node: A :class:`~kademlia.node.Node` representing the key we're looking for
peers: A list of :class:`~kademlia.node.Node` instances that provide the entry point for the network
ksize: The value for k based on the paper
alpha: The value for alpha based on the paper
"""
from kademlia.log import Logger
self.protocol = protocol
self.ksize = ksize
self.alpha = alpha
self.node = node
# Changed from ksize to (ksize + 1) * ksize
self.nearest = NodeHeap(self.node, (self.ksize+1) * self.ksize)
self.lastIDsCrawled = []
self.log = Logger(system=self)
self.log.debug("creating spider with peers: %s" % peers)
self.nearest.push(peers)
class NodeSpiderCrawl(SpiderCrawl, crawling.NodeSpiderCrawl):
# Make sure that our SpiderCrawl __init__ gets called (crawling.NodeSpiderCrawl don't have __init__)
pass
class ValueSpiderCrawl(SpiderCrawl, crawling.ValueSpiderCrawl):
def __init__(self, protocol, node, peers, ksize, alpha):
# Make sure that our SpiderCrawl __init__ gets called
SpiderCrawl.__init__(self, protocol, node, peers, ksize, alpha)
# copy crawling.ValueSpiderCrawl statement besides calling original SpiderCrawl.__init__
self.nearestWithoutValue = | |
import math
from .._compat import is_string_or_bytes, range, PY2, abc
from six import integer_types
def float_to_byte_bit_pair(n):
return (int(n), int((n - int(n)) * 8))
class BitView(abc.Sequence):
"""
Provides bit indexing over an existing buffer.
For example, you can do this:
bit_view = BitView(...)
bits_2_and_3 = bit_view[0.125 * 2:0.125 * 4]
Basically, you can access the buffer with 1/8 fractions.
"""
def __init__(self, buffer, start=0, stop=None):
super(BitView, self).__init__()
# FIXME: On Python 2.7 there's no bytes() type (immutable byte sequence).
if is_string_or_bytes(buffer):
self.buffer = bytearray(buffer)
else:
self.buffer = buffer
self.start = start if start is not None else 0
self.stop = stop if stop is not None else len(buffer)
assert self.start >= 0
assert self.stop <= len(buffer)
assert self.start <= self.stop
def __getitem__(self, key):
start, stop = self._key_to_range(key)
if isinstance(key, slice):
return self._get_range(start, stop)
else: # must be int/float otherwise _key_to_range would raise an error
return self._get_byte_bits(start, min(8, int((self.stop - start) * 8)))
def __len__(self):
return int(math.ceil(self.stop - self.start))
def __iter__(self):
for i in range(len(self)):
yield self._get_byte_bits(i + self.start, min(int((self.stop - i - self.start) * 8), 8))
def __str__(self):
if PY2:
return self.to_bytes()
else:
raise NotImplementedError("BitViews should not be treated as strings in Python 3. Use to_bytes instead")
def __repr__(self):
return "{0}(buffer={1!r}, start={2}, stop={3})".format(type(self), self.buffer, self.start, self.stop)
def to_bitstr(self):
result = []
for i in range(int(self.start * 8), int(self.stop * 8)):
byte_i, bit_i = float_to_byte_bit_pair(float(i) / 8)
result.append((self.buffer[byte_i] >> bit_i) & 1)
return "".join(str(n) for n in reversed(result))
def to_bytearray(self):
if int(self.start) == self.start:
return self.buffer[int(self.start):int(math.ceil(self.stop))]
else:
return bytearray(list(self)) # very inefficient.
def length(self):
return self.stop - self.start
def _get_range(self, start, stop):
start_byte_ofs = int(start)
stop_byte_ofs = int(math.ceil(stop))
return type(self)(self.buffer[start_byte_ofs:stop_byte_ofs], start - start_byte_ofs, stop - start_byte_ofs)
def _get_byte_bits(self, ofs, bit_len):
# assert ofs >= 0 and bit_len >= 0, "ofs={0!r}, bit_len={1!r}".format(ofs, bit_len)
byte_ofs, bit_ofs = float_to_byte_bit_pair(ofs)
bit_mask = ((1 << bit_len) - 1)
if bit_ofs == 0:
return self.buffer[byte_ofs] & bit_mask
elif bit_ofs < 0:
bit_mask = ((1 << (bit_len + bit_ofs)) - 1)
return self.buffer[0] & bit_mask
elif bit_ofs + bit_len <= 8:
return (self.buffer[byte_ofs] >> bit_ofs) & bit_mask
else:
cur_byte = self.buffer[byte_ofs]
next_byte = self.buffer[byte_ofs + 1] if byte_ofs + 1 < len(self.buffer) else 0
return (((cur_byte >> bit_ofs) & 0xFF) | ((next_byte << (8 - bit_ofs)) & 0xFF)) & bit_mask
def _key_to_range(self, key):
if isinstance(key, slice):
if key.step not in (None, 1):
raise NotImplementedError("step must be 1 or None")
start = self._translate_offset(key.start if key.start is not None else 0)
stop = self._translate_offset(key.stop if key.stop is not None else self.length())
assert start <= stop and start >= 0, "start={0!r}, stop={1!r}".format(start, stop)
elif isinstance(key, integer_types + (float,)):
start = self._translate_offset(key)
stop = start + 1
else:
raise TypeError("index must be int, float or a slice")
return start, stop
def _translate_offset(self, ofs):
length = self.length()
ofs = max(ofs + length, 0) if ofs < 0 else min(ofs, length)
return ofs + self.start
def to_bytes(self):
ba = bytearray(b for b in self)
return str(ba) if PY2 else bytes(ba)
class BitAwareByteArray(BitView, abc.MutableSequence):
"""
Similar to BitView, but this class is mutable.
"""
def __init__(self, source, start=0, stop=None):
assert isinstance(source, bytearray)
super(BitAwareByteArray, self).__init__(source, start, stop)
def __setitem__(self, key, value):
start, stop = self._key_to_range(key)
value, value_len = self._value_to_value_and_length(value, stop - start)
self._set_range(start, stop, value, value_len)
def __delitem__(self, key):
start, stop = self._key_to_range(key)
self._del_range(start, stop)
def insert(self, i, value):
i = self._translate_offset(i)
value, value_len = self._value_to_value_and_length(value)
self._insert_zeros(i, i + value_len)
self._copy_to_range(i, value, value_len)
def extend(self, other):
if isinstance(other, BitView):
offset = self.stop
self._insert_zeros(offset, offset + other.length())
self._copy_to_range(offset, other, other.length())
else:
super(BitAwareByteArray, self).extend(other)
def zfill(self, length):
if length > self.length():
self._insert_zeros(self.stop, self.stop + length - self.length())
def __add__(self, other):
if not isinstance(other, BitView):
return NotImplemented
copy = BitAwareByteArray(bytearray(self.buffer), start=self.start, stop=self.stop)
copy.extend(other)
return copy
def __radd__(self, other):
if not isinstance(other, BitView):
return NotImplemented
copy = BitAwareByteArray(bytearray(other.buffer), start=other.start, stop=other.stop)
copy.extend(self)
return copy
def _set_range(self, start, stop, value, value_len):
"""
Assumes that start and stop are already in 'buffer' coordinates. value is a byte iterable.
value_len is fractional.
"""
assert stop >= start and value_len >= 0
range_len = stop - start
if range_len < value_len:
self._insert_zeros(stop, stop + value_len - range_len)
self._copy_to_range(start, value, value_len)
elif range_len > value_len:
self._del_range(stop - (range_len - value_len), stop)
self._copy_to_range(start, value, value_len)
else:
self._copy_to_range(start, value, value_len)
def _copy_to_range(self, offset, iterable, iterable_len):
remaining_bit_len = int(iterable_len * 8)
for byte in iterable:
self._set_byte_bits(offset, min(remaining_bit_len, 8), byte)
offset += 1
remaining_bit_len -= 8
def _del_range(self, start, stop):
assert stop >= start
start_byte, stop_byte = int(math.ceil(start)), int(math.floor(stop))
whole_byte_delta = stop_byte - start_byte
# If we can remove whole bytes from the buffer, we'll do that first.
if whole_byte_delta >= 1:
del self.buffer[start_byte:stop_byte]
self.stop -= whole_byte_delta
stop -= whole_byte_delta
# Here we have at most 8 bits to remove, so we need to "shift" the entire array.
if stop > start:
ofs = start
bit_len_frac = stop - start
while (ofs + bit_len_frac) < self.stop:
self._set_byte_bits(ofs, 8, self._get_byte_bits(ofs + bit_len_frac, 8))
ofs += 1
self.stop -= bit_len_frac
if int(math.ceil(self.stop)) < len(self.buffer):
del self.buffer[-1]
def _insert_zeros(self, start, stop):
assert start >= 0 and start <= stop, "start={0!r}, stop={1!r}".format(start, stop)
assert start <= self.stop
start_byte, stop_byte = int(math.ceil(start)), int(math.floor(stop))
whole_byte_delta = stop_byte - start_byte
# If we can insert whole bytes to the buffer, we'll do that first.
if whole_byte_delta >= 1:
self.buffer[start_byte:start_byte] = bytearray(whole_byte_delta)
self.stop += whole_byte_delta
stop -= whole_byte_delta
if stop > start:
assert stop - start <= 2, "start={0}, stop={1}".format(start, stop)
bit_len_frac = stop - start
while int(math.ceil(self.stop + bit_len_frac)) > len(self.buffer):
self.buffer.append(0)
if start < self.stop:
# Inserting in the middle, so we copy from end to start.
ofs = self.stop + bit_len_frac - 1
while ofs >= start:
self._set_byte_bits(ofs, 8, self._get_byte_bits(ofs - bit_len_frac, 8))
ofs -= 1
self.stop += bit_len_frac
def _set_byte_bits(self, ofs, bit_len, value):
byte_ofs, bit_ofs = float_to_byte_bit_pair(ofs)
if bit_ofs == 0 and bit_len == 8:
self.buffer[byte_ofs] = value # shortcut
elif (bit_ofs + bit_len) <= 8:
self.buffer[byte_ofs] &= ~(((1 << bit_len) - 1) << bit_ofs)
self.buffer[byte_ofs] |= (value << bit_ofs) & 0xFF
else:
first_byte_bit_len = 8 - bit_ofs
self._set_byte_bits(ofs, first_byte_bit_len, value & ((1 << first_byte_bit_len) - 1))
self._set_byte_bits(byte_ofs + 1, bit_len - first_byte_bit_len, value >> first_byte_bit_len)
def _value_to_value_and_length(self, value, int_value_len=1):
value_len = 0
if isinstance(value, BitView):
value_len = value.length()
elif isinstance(value, abc.Sized):
value_len = len(value)
elif isinstance(value, abc.Iterable):
value = bytearray(value)
value_len = len(value)
elif isinstance(value, integer_types):
# Short circuit: make bit ranges accept int values by their bit length.
bit_length = max(1, value.bit_length())
if bit_length > int_value_len * 8:
# Safety measure for short circuit: if user is assigning an int with more bits than the range of
# bits that he specified we shout.
raise ValueError("trying to assign int {0} with bit length {1} to bit length {2}".format(value,
bit_length, int(int_value_len * 8)))
l = []
for n in range(0, bit_length, 8):
l.append(value % 256)
value //= 256
value = l
value_len = max(float(bit_length) / 8, int_value_len)
else:
raise TypeError("value must be iterable or int")
return value, value_len
class InputBuffer(object):
def __init__(self, buffer):
if isinstance(buffer, BitView):
self.buffer = buffer
else:
self.buffer = BitView(buffer)
def get(self, range_list):
if len(range_list) == 1:
# Shortcut: if it's a simple range we can just return a subset of the bit view.
range = range_list[0]
assert not range.is_open()
result = self.buffer[range.start:range.stop]
else:
result = BitAwareByteArray(bytearray())
for range in range_list:
assert not range.is_open()
result += self.buffer[range.start:range.stop]
return result
def length(self):
return len(self.buffer)
def __repr__(self):
return "InputBuffer({0!r})".format(self.buffer)
class OutputBuffer(object):
def __init__(self, buffer=None):
if isinstance(buffer, BitAwareByteArray):
self.buffer = buffer
elif isinstance(buffer, bytearray):
self.buffer = BitAwareByteArray(buffer)
elif buffer is None:
self.buffer = BitAwareByteArray(bytearray())
else:
raise TypeError("buffer must be either BitAwareByteArray, bytearray or None but instead is {0}".
format(type(buffer)))
def set(self, value, range_list):
value = BitView(value)
value_start = 0
for range in range_list:
assert not range.is_open()
assert range.start >= 0 and range.start <= range.stop
if range.byte_length() > len(value) - value_start:
raise ValueError("trying to assign a value with smaller length than the range | |
receiver node requesting a content
content : any hashable type
The content identifier requested by the receiver
log : bool
*True* if this session needs to be reported to the collector,
*False* otherwise
"""
self.session[flow_id] = dict(timestamp=timestamp,
receiver=receiver,
content=content,
log=log,
deadline=deadline)
#if self.collector is not None and self.session[flow_id]['log']:
self.collector.start_session(timestamp, receiver, content, flow_id, deadline)
def forward_request_path(self, s, t, path=None, main_path=True):
"""Forward a request from node *s* to node *t* over the provided path.
Parameters
----------
s : any hashable type
Origin node
t : any hashable type
Destination node
path : list, optional
The path to use. If not provided, shortest path is used
main_path : bool, optional
If *True*, indicates that link path is on the main path that will
lead to hit a content. It is normally used to calculate latency
correctly in multicast cases. Default value is *True*
"""
if path is None:
path = self.model.shortest_path[s][t]
for u, v in path_links(path):
self.forward_request_hop(u, v, main_path)
def forward_content_path(self, u, v, path=None, main_path=True):
"""Forward a content from node *s* to node *t* over the provided path.
Parameters
----------
s : any hashable type
Origin node
t : any hashable type
Destination node
path : list, optional
The path to use. If not provided, shortest path is used
main_path : bool, optional
If *True*, indicates that this path is being traversed by content
that will be delivered to the receiver. This is needed to
calculate latency correctly in multicast cases. Default value is
*True*
"""
if path is None:
path = self.model.shortest_path[u][v]
for u, v in path_links(path):
self.forward_content_hop(u, v, main_path)
def forward_request_hop(self, u, v, main_path=True):
"""Forward a request over link u -> v.
Parameters
----------
u : any hashable type
Origin node
v : any hashable type
Destination node
main_path : bool, optional
If *True*, indicates that link link is on the main path that will
lead to hit a content. It is normally used to calculate latency
correctly in multicast cases. Default value is *True*
"""
if self.collector is not None and self.session['log']:
self.collector.request_hop(u, v, main_path)
def forward_content_hop(self, u, v, main_path=True):
"""Forward a content over link u -> v.
Parameters
----------
u : any hashable type
Origin node
v : any hashable type
Destination node
main_path : bool, optional
If *True*, indicates that this link is being traversed by content
that will be delivered to the receiver. This is needed to
calculate latency correctly in multicast cases. Default value is
*True*
"""
if self.collector is not None and self.session['log']:
self.collector.content_hop(u, v, main_path)
def put_content(self, node, content=0):
"""Store content in the specified node.
The node must have a cache stack and the actual insertion of the
content is executed according to the caching policy. If the caching
policy has a selective insertion policy, then content may not be
inserted.
Parameters
----------
node : any hashable type
The node where the content is inserted
Returns
-------
evicted : any hashable type
The evicted object or *None* if no contents were evicted.
"""
if node in self.model.cache:
return self.model.cache[node].put(content)
def get_content(self, node, content=0):
"""Get a content from a server or a cache.
Parameters
----------
node : any hashable type
The node where the content is retrieved
Returns
-------
content : bool
True if the content is available, False otherwise
"""
if node in self.model.cache:
cache_hit = self.model.cache[node].get(content)
if cache_hit:
#if self.session['log']:
self.collector.cache_hit(node)
else:
#if self.session['log']:
self.collector.cache_miss(node)
return cache_hit
name, props = fnss.get_stack(self.model.topology, node)
if name == 'source':
if self.collector is not None and self.session['log']:
self.collector.server_hit(node)
return True
else:
return False
def remove_content(self, node):
"""Remove the content being handled from the cache
Parameters
----------
node : any hashable type
The node where the cached content is removed
Returns
-------
removed : bool
*True* if the entry was in the cache, *False* if it was not.
"""
if node in self.model.cache:
return self.model.cache[node].remove(self.session['content'])
def add_event(self, time, receiver, service, node, flow_id, deadline, rtt_delay, status, task=None):
"""Add an arrival event to the eventQ
"""
if time == float('inf'):
raise ValueError("Invalid argument in add_event(): time parameter is infinite")
e = Event(time, receiver, service, node, flow_id, deadline, rtt_delay, status, task)
heapq.heappush(self.model.eventQ, e)
def replacement_interval_over(self, flow_id, replacement_interval, timestamp):
""" Perform replacement of services at each computation spot
"""
#if self.collector is not None and self.session[flow_id]['log']:
self.collector.replacement_interval_over(replacement_interval, timestamp)
def execute_service(self, flow_id, service, node, timestamp, is_cloud):
""" Perform execution of the service at node with starting time
"""
self.collector.execute_service(flow_id, service, node, timestamp, is_cloud)
def complete_task(self, task, timestamp):
""" Perform execution of the task at node with starting time
"""
cs = self.model.compSpot[task.node]
if cs.is_cloud:
self.execute_service(task.flow_id, task.service, task.node, timestamp, True)
return
else:
cs.complete_task(self, task, timestamp)
if task.taskType == Task.TASK_TYPE_SERVICE:
self.execute_service(task.flow_id, task.service, task.node, timestamp, False)
def reassign_vm(self, time, compSpot, serviceToReplace, serviceToAdd, debugFlag=False):
""" Instantiate a VM with a given service
NOTE: this method should ideally call reassign_vm of ComputationSpot as well.
However, some strategies rebuild VMs from scratch every time and they do not
use that method always.
"""
if serviceToAdd == serviceToReplace:
print ("Error in reassign_vm(): serviceToAdd equals serviceToReplace")
raise ValueError("Error in reassign_vm(): service replaced and added are same")
compSpot.reassign_vm(self, time, serviceToReplace, serviceToAdd, debugFlag)
self.collector.reassign_vm(compSpot.node, serviceToReplace, serviceToAdd)
def end_session(self, success=True, timestamp=0, flow_id=0):
"""Close a session
Parameters
----------
success : bool, optional
*True* if the session was completed successfully, *False* otherwise
"""
#if self.collector is not None and self.session[flow_id]['log']:
self.collector.end_session(success, timestamp, flow_id)
self.session.pop(flow_id, None)
def rewire_link(self, u, v, up, vp, recompute_paths=True):
"""Rewire an existing link to new endpoints
This method can be used to model mobility patters, e.g., changing
attachment points of sources and/or receivers.
Note well. With great power comes great responsibility. Be careful when
using this method. In fact as a result of link rewiring, network
partitions and other corner cases might occur. Ensure that the
implementation of strategies using this method deal with all potential
corner cases appropriately.
Parameters
----------
u, v : any hashable type
Endpoints of link before rewiring
up, vp : any hashable type
Endpoints of link after rewiring
"""
link = self.model.topology.edge[u][v]
self.model.topology.remove_edge(u, v)
self.model.topology.add_edge(up, vp, **link)
if recompute_paths:
shortest_path = nx.all_pairs_dijkstra_path(self.model.topology)
self.model.shortest_path = symmetrify_paths(shortest_path)
def remove_link(self, u, v, recompute_paths=True):
"""Remove a link from the topology and update the network model.
Note well. With great power comes great responsibility. Be careful when
using this method. In fact as a result of link removal, network
partitions and other corner cases might occur. Ensure that the
implementation of strategies using this method deal with all potential
corner cases appropriately.
Also, note that, for these changes to be effective, the strategy must
use fresh data provided by the network view and not storing local copies
of network state because they won't be updated by this method.
Parameters
----------
u : any hashable type
Origin node
v : any hashable type
Destination node
recompute_paths: bool, optional
If True, recompute all shortest paths
"""
self.model.removed_links[(u, v)] = self.model.topology.edge[u][v]
self.model.topology.remove_edge(u, v)
if recompute_paths:
shortest_path = nx.all_pairs_dijkstra_path(self.model.topology)
self.model.shortest_path = symmetrify_paths(shortest_path)
def restore_link(self, u, v, recompute_paths=True):
"""Restore a previously-removed link and update the network model
Parameters
----------
u : any hashable type
Origin node
v : any hashable type
Destination node
recompute_paths: bool, optional
If True, recompute all shortest paths
"""
self.model.topology.add_edge(u, v, **self.model.removed_links.pop((u, v)))
if recompute_paths:
shortest_path = nx.all_pairs_dijkstra_path(self.model.topology)
self.model.shortest_path = symmetrify_paths(shortest_path)
def remove_node(self, v, recompute_paths=True):
"""Remove a node from the topology and update the network model.
Note well. With great power comes great responsibility. Be careful when
using this method. In fact, as a result of node removal, network
partitions and other corner cases might occur. Ensure that the
implementation of strategies using this method deal with all potential
corner cases appropriately.
It should be noted that when this method is called, all links connected
to the node to be removed are removed as well. These links are however
restored when the node is restored. However, | |
#!/usr/bin/env python
#
# Licensed under the BSD license. See full license in LICENSE file.
# http://www.lightshowpi.com/
#
# Author: <NAME> (<EMAIL>)
# Author: <NAME> (<EMAIL>)
# Author: <NAME>
# Author: <NAME> (<EMAIL>)
# Author: <NAME> (<EMAIL>)
"""Play any audio file and synchronize lights to the music
When executed, this script will play an audio file, as well as turn on
and off N channels of lights to the music (by default the first 8 GPIO
channels on the Raspberry Pi), based upon music it is playing. Many
types of audio files are supported (see decoder.py below), but it has
only been tested with wav and mp3 at the time of this writing.
The timing of the lights turning on and off is based upon the frequency
response of the music being played. A short segment of the music is
analyzed via FFT to get the frequency response across each defined
channel in the audio range. Each light channel is then faded in and
out based upon the amplitude of the frequency response in the
corresponding audio channel. Fading is accomplished with a software
PWM output. Each channel can also be configured to simply turn on and
off as the frequency response in the corresponding channel crosses a
threshold.
FFT calculation can be CPU intensive and in some cases can adversely
affect playback of songs (especially if attempting to decode the song
as well, as is the case for an mp3). For this reason, the FFT
calculations are cached after the first time a new song is played.
The values are cached in a gzip'd text file in the same location as the
song itself. Subsequent requests to play the same song will use the
cached information and not recompute the FFT, thus reducing CPU
utilization dramatically and allowing for clear music playback of all
audio file types.
Recent optimizations have improved this dramatically and most users are
no longer reporting adverse playback of songs even on the first
playback.
Sample usage:
To play an entire list -
sudo python synchronized_lights.py --playlist=/home/pi/music/.playlist
To play a specific song -
sudo python synchronized_lights.py --file=/home/pi/music/jingle_bells.mp3
Third party dependencies:
alsaaudio: for audio input/output
http://pyalsaaudio.sourceforge.net/
decoder.py: decoding mp3, ogg, wma, ...
https://pypi.python.org/pypi/decoder.py/1.5XB
numpy: for FFT calculation
http://www.numpy.org/
"""
import ConfigParser
import argparse
import atexit
import audioop
import csv
import fcntl
import logging as log
import os
import random
import subprocess
import sys
import wave
import alsaaudio as aa
import json
import signal
import decoder
import numpy as np
import cPickle
import time
import errno
import stat
import curses
import bright_curses
import mutagen
from collections import deque
import Platform
import fft
from prepostshow import PrePostShow
import RunningStats
from Queue import Queue, Empty
from threading import Thread
# Make sure SYNCHRONIZED_LIGHTS_HOME environment variable is set
HOME_DIR = os.getenv("SYNCHRONIZED_LIGHTS_HOME")
if not HOME_DIR:
print("Need to setup SYNCHRONIZED_LIGHTS_HOME environment variable, see readme")
sys.exit()
LOG_DIR = HOME_DIR + '/logs'
# logging levels
levels = {'DEBUG': log.DEBUG,
'INFO': log.INFO,
'WARNING': log.WARNING,
'ERROR': log.ERROR,
'CRITICAL': log.CRITICAL}
stream = None
fm_process = None
streaming = None
# Arguments
parser = argparse.ArgumentParser()
parser.add_argument('--log', default='INFO',
help='Set the logging level. levels:INFO, DEBUG, WARNING, ERROR, CRITICAL')
filegroup = parser.add_mutually_exclusive_group()
filegroup.add_argument('--playlist', default="playlist_path",
help='Playlist to choose song from.')
filegroup.add_argument('--file', help='path to the song to play (required if no '
'playlist is designated)')
parser.add_argument('--readcache', type=int, default=1,
help='read light timing from cache if available. Default: true')
log.basicConfig(filename=LOG_DIR + '/music_and_lights.play.dbg',
format='[%(asctime)s] %(levelname)s {%(pathname)s:%(lineno)d} - %(message)s',
level=log.INFO)
level = levels.get(parser.parse_args().log.upper())
log.getLogger().setLevel(level)
# import hardware_controller as hc
import hardware_controller as hc
# get copy of configuration manager
cm = hc.cm
parser.set_defaults(playlist=cm.lightshow.playlist_path)
args = parser.parse_args()
decay_factor = cm.lightshow.decay_factor
decay = np.zeros(cm.hardware.gpio_len, dtype='float32')
network = hc.network
server = network.networking == 'server'
client = network.networking == "client"
terminal = False
if cm.lightshow.use_fifo:
if os.path.exists(cm.lightshow.fifo):
os.remove(cm.lightshow.fifo)
os.mkfifo(cm.lightshow.fifo, 0777)
CHUNK_SIZE = 2048 # Use a multiple of 8 (move this to config)
def end_early():
"""atexit function"""
if server:
network.set_playing()
network.broadcast([0. for _ in range(hc.GPIOLEN)])
time.sleep(1)
network.unset_playing()
hc.clean_up()
if cm.audio_processing.fm:
fm_process.kill()
if network.network_stream:
network.close_connection()
if cm.lightshow.mode == 'stream-in':
try:
streaming.stdin.write("q")
except NameError:
pass
os.kill(streaming.pid, signal.SIGINT)
if cm.lightshow.use_fifo:
os.unlink(cm.lightshow.fifo)
atexit.register(end_early)
# Remove traceback on Ctrl-C
signal.signal(signal.SIGINT, lambda x, y: sys.exit(0))
signal.signal(signal.SIGTERM, lambda x, y: sys.exit(0))
def update_lights(matrix, mean, std):
"""Update the state of all the lights
Update the state of all the lights based upon the current
frequency response matrix
:param matrix: row of data from cache matrix
:type matrix: list
:param mean: standard mean of fft values
:type mean: list
:param std: standard deviation of fft values
:type std: list
"""
global decay
brightness = matrix - mean + (std * cm.lightshow.SD_low)
brightness = (brightness / (std * (cm.lightshow.SD_low + cm.lightshow.SD_high))) * \
(1.0 - (cm.lightshow.attenuate_pct / 100.0))
# insure that the brightness levels are in the correct range
brightness = np.clip(brightness, 0.0, 1.0)
brightness = np.round(brightness, decimals=3)
# calculate light decay rate if used
if decay_factor > 0:
decay = np.where(decay <= brightness, brightness, decay)
brightness = np.where(decay - decay_factor > 0, decay - decay_factor, brightness)
decay = np.where(decay - decay_factor > 0, decay - decay_factor, decay)
# broadcast to clients if in server mode
if server:
network.broadcast(brightness)
if terminal:
terminal.curses_render(brightness)
else:
for blevel, pin in zip(brightness, range(hc.GPIOLEN)):
hc.set_light(pin, True, blevel)
def set_audio_device(sample_rate, num_channels):
"""Setup the audio devices for output
:param sample_rate: audio sample rate
:type sample_rate: int
:param num_channels: number of audio channels
:type num_channels: int
"""
global fm_process
pi_version = Platform.pi_version()
if cm.audio_processing.fm:
srate = str(int(sample_rate / (1 if num_channels > 1 else 2)))
fm_command = ["sudo",
cm.home_dir + "/bin/pifm",
"-",
cm.audio_processing.frequency,
srate,
"stereo" if num_channels > 1 else "mono"]
if pi_version == 2:
fm_command = ["sudo",
cm.home_dir + "/bin/pi_fm_rds",
"-audio", "-", "-freq",
cm.audio_processing.frequency,
"-srate",
srate,
"-nochan",
"2" if num_channels > 1 else "1"]
log.info("Sending output as fm transmission")
with open(os.devnull, "w") as dev_null:
fm_process = subprocess.Popen(fm_command, stdin=subprocess.PIPE, stdout=dev_null)
return lambda raw_data: fm_process.stdin.write(raw_data)
elif cm.lightshow.audio_out_card is not '':
if cm.lightshow.mode == 'stream-in':
num_channels = 2
output_device = aa.PCM(aa.PCM_PLAYBACK, aa.PCM_NORMAL, cm.lightshow.audio_out_card)
output_device.setchannels(num_channels)
output_device.setrate(sample_rate)
output_device.setformat(aa.PCM_FORMAT_S16_LE)
output_device.setperiodsize(CHUNK_SIZE)
return lambda raw_data: output_device.write(raw_data)
else:
return lambda raw_data: None
def enqueue_output(out, queue):
for line in iter(out.readline, b''):
queue.put(line)
out.close()
def audio_in():
"""Control the lightshow from audio coming in from a real time audio"""
global streaming
stream_reader = None
streaming = None
songcount = 0
sample_rate = cm.lightshow.input_sample_rate
num_channels = cm.lightshow.input_channels
if cm.lightshow.mode == 'audio-in':
# Open the input stream from default input device
streaming = aa.PCM(aa.PCM_CAPTURE, aa.PCM_NORMAL, cm.lightshow.audio_in_card)
streaming.setchannels(num_channels)
streaming.setformat(aa.PCM_FORMAT_S16_LE) # Expose in config if needed
streaming.setrate(sample_rate)
streaming.setperiodsize(CHUNK_SIZE)
stream_reader = lambda: streaming.read()[-1]
elif cm.lightshow.mode == 'stream-in':
outq = Queue()
if cm.lightshow.use_fifo:
streaming = subprocess.Popen(cm.lightshow.stream_command_string,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
preexec_fn=os.setsid)
io = os.open(cm.lightshow.fifo, os.O_RDONLY | os.O_NONBLOCK)
stream_reader = lambda: os.read(io, CHUNK_SIZE)
outthr = Thread(target=enqueue_output, args=(streaming.stdout, outq))
else:
# Open the input stream from command string
streaming = subprocess.Popen(cm.lightshow.stream_command_string,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stream_reader = lambda: streaming.stdout.read(CHUNK_SIZE)
outthr = Thread(target=enqueue_output, args=(streaming.stderr, outq))
outthr.daemon = True
outthr.start()
log.debug("Running in %s mode - will run until Ctrl+C is pressed" % cm.lightshow.mode)
print "Running in %s mode, use Ctrl+C to stop" % cm.lightshow.mode
# setup light_delay.
chunks_per_sec = ((16 * num_channels * sample_rate) / 8) / CHUNK_SIZE
light_delay = int(cm.audio_processing.light_delay * chunks_per_sec)
matrix_buffer = deque([], 1000)
output = set_audio_device(sample_rate, num_channels)
# Start with these as our initial guesses - will calculate a rolling mean / std
# as we get input data.
mean = np.array([12.0 for _ in range(hc.GPIOLEN)], dtype='float32')
std = np.array([1.5 for _ in range(hc.GPIOLEN)], dtype='float32')
count = 2
running_stats = RunningStats.Stats(hc.GPIOLEN)
# preload running_stats to avoid errors, and give us a show that looks
# good right from the start
running_stats.preload(mean, std, count)
hc.initialize()
fft_calc = fft.FFT(CHUNK_SIZE,
sample_rate,
hc.GPIOLEN,
cm.audio_processing.min_frequency,
cm.audio_processing.max_frequency,
cm.audio_processing.custom_channel_mapping,
cm.audio_processing.custom_channel_frequencies,
1)
if server:
network.set_playing()
# Listen on the audio input device until CTRL-C is pressed
while True:
try:
streamout = outq.get_nowait().strip('\n\r')
except Empty:
pass
else:
print streamout
if cm.lightshow.stream_song_delim in streamout:
songcount+=1
if cm.lightshow.songname_command:
streamout = streamout.replace('\033[2K','')
streamout = streamout.replace(cm.lightshow.stream_song_delim,'')
streamout = streamout.replace('"','')
os.system(cm.lightshow.songname_command + ' "Now Playing ' + streamout + '"')
if cm.lightshow.stream_song_exit_count > 0 and songcount > cm.lightshow.stream_song_exit_count:
break
try:
data = stream_reader()
except OSError as err:
if err.errno == errno.EAGAIN or err.errno == errno.EWOULDBLOCK:
continue
try:
output(data)
except aa.ALSAAudioError:
continue
if len(data):
# if the maximum of the absolute value of all samples in
# data is below a threshold we will disregard it
audio_max = audioop.max(data, 2)
if audio_max < 250:
# we will fill the matrix with zeros and turn the lights off
matrix = np.zeros(hc.GPIOLEN, dtype="float32")
log.debug("below threshold: '" + str(audio_max) + "', turning the lights off")
else:
matrix = | |
#********************************************************************
# File: blocks_series2.py
# Author: <NAME>
#
# Description:
# Series 2 blocks
#
# Copyright (c) 2017 by Cisco Systems, Inc.
#
# ALL RIGHTS RESERVED. THESE SOURCE FILES ARE THE SOLE PROPERTY
# OF CISCO SYSTEMS, Inc. AND CONTAIN CONFIDENTIAL AND PROPRIETARY
# INFORMATION. REPRODUCTION OR DUPLICATION BY ANY MEANS OF ANY
# PORTION OF THIS SOFTWARE WITHOUT PRIOR WRITTEN CONSENT OF
# CISCO SYSTEMS, Inc. IS STRICTLY PROHIBITED.
#
#*********************************************************************/
from estreamer.definitions.blocks_series1 import BLOCK_STRING
from estreamer.definitions.blocks_series1 import BLOCK_BLOB
from estreamer.definitions.core import TYPE_BYTE
from estreamer.definitions.core import TYPE_UINT16
from estreamer.definitions.core import TYPE_UINT32
from estreamer.definitions.core import TYPE_UINT64
from estreamer.definitions.core import TYPE_UINT128
from estreamer.definitions.core import TYPE_UINT160
from estreamer.definitions.core import TYPE_UINT256
from estreamer.definitions.core import TYPE_UUID
from estreamer.definitions.core import TYPE_IPV6
# Without this the series 1 and 2 types collide. There is probably
# another nicer way to do this but right now this will have to do
BLOCK_SERIES_2_SHIM = 0x00010000
# Series 2 data blocks
BLOCK_EVENT_EXTRA_DATA = 4 | BLOCK_SERIES_2_SHIM
BLOCK_EVENT_EXTRA_DATA_METADATA = 5 | BLOCK_SERIES_2_SHIM
BLOCK_UUID_STRING = 14 | BLOCK_SERIES_2_SHIM
BLOCK_ACCESS_CONTROL_RULE = 15 | BLOCK_SERIES_2_SHIM
BLOCK_ICMP_TYPE_DATA = 19 | BLOCK_SERIES_2_SHIM
BLOCK_ICMP_CODE_DATA = 20 | BLOCK_SERIES_2_SHIM
BLOCK_IP_REPUTATION_CATEGORY = 22 | BLOCK_SERIES_2_SHIM
BLOCK_RULE_DOCUMENTATION_DATA_52 = 27 | BLOCK_SERIES_2_SHIM
BLOCK_GEOLOCATION_52 = 28 | BLOCK_SERIES_2_SHIM
BLOCK_IOC_NAME_53 = 39 | BLOCK_SERIES_2_SHIM
BLOCK_FILE_EVENT_SHA_HASH_53 = 40 | BLOCK_SERIES_2_SHIM
BLOCK_INTRUSION_EVENT_53 = 41 | BLOCK_SERIES_2_SHIM
BLOCK_SSL_CERTIFICATION_DETAILS_54 = 50 | BLOCK_SERIES_2_SHIM
BLOCK_FILE_EVENT_60 = 56 | BLOCK_SERIES_2_SHIM
BLOCK_USER_60 = 57 | BLOCK_SERIES_2_SHIM
BLOCK_ENDPOINT_PROFILE_60 = 58 | BLOCK_SERIES_2_SHIM
BLOCK_ACCESS_CONTROL_POLICY_RULE_REASON_60 = 59 | BLOCK_SERIES_2_SHIM
BLOCK_INTRUSION_EVENT_60 = 60 | BLOCK_SERIES_2_SHIM
BLOCK_ID_NAME_DESCRIPTION = 61 | BLOCK_SERIES_2_SHIM
BLOCK_MALWARE_EVENT_60 = 62 | BLOCK_SERIES_2_SHIM
BLOCK_ACCESS_CONTROL_POLICY_METADATA = 64 | BLOCK_SERIES_2_SHIM
BLOCKS_SERIES_2 = {
# 4 Series 2
BLOCK_EVENT_EXTRA_DATA: [
{ 'type': TYPE_UINT32, 'name': 'blockType' },
{ 'type': TYPE_UINT32, 'name': 'blockLength' },
{ 'type': TYPE_UINT32, 'name': 'deviceId' },
{ 'type': TYPE_UINT32, 'name': 'eventId' },
{ 'type': TYPE_UINT32, 'name': 'eventSecond' },
{ 'type': TYPE_UINT32, 'name': 'type' },
{ 'block': BLOCK_BLOB, 'name': 'blob' }],
# 5
BLOCK_EVENT_EXTRA_DATA_METADATA: [
{ 'type': TYPE_UINT32, 'name': 'blockType' },
{ 'type': TYPE_UINT32, 'name': 'blockLength' },
{ 'type': TYPE_UINT32, 'name': 'type' },
{ 'block': BLOCK_STRING, 'name': 'name' },
{ 'block': BLOCK_STRING, 'name': 'encoding' }],
# 14
BLOCK_UUID_STRING: [
{ 'type': TYPE_UINT32, 'name': 'blockType' },
{ 'type': TYPE_UINT32, 'name': 'blockLength' },
{ 'type': TYPE_UUID, 'name': 'uuid' },
{ 'block': BLOCK_STRING, 'name': 'name' }],
# 15
BLOCK_ACCESS_CONTROL_RULE: [
{ 'type': TYPE_UINT32, 'name': 'blockType' },
{ 'type': TYPE_UINT32, 'name': 'blockLength' },
{ 'type': TYPE_UUID, 'name': 'uuid' },
{ 'type': TYPE_UINT32, 'name': 'id' },
{ 'block': BLOCK_STRING, 'name': 'name' }],
# 19
BLOCK_ICMP_TYPE_DATA: [
{ 'type': TYPE_UINT32, 'name': 'blockType' },
{ 'type': TYPE_UINT32, 'name': 'blockLength' },
{ 'type': TYPE_UINT16, 'name': 'type' },
{ 'type': TYPE_UINT16, 'name': 'protocol' },
{ 'block': BLOCK_STRING, 'name': 'description' }],
# 20
BLOCK_ICMP_CODE_DATA: [
{ 'type': TYPE_UINT32, 'name': 'blockType' },
{ 'type': TYPE_UINT32, 'name': 'blockLength' },
{ 'type': TYPE_UINT16, 'name': 'code' },
{ 'type': TYPE_UINT16, 'name': 'type' },
{ 'type': TYPE_UINT16, 'name': 'protocol' },
{ 'block': BLOCK_STRING, 'name': 'description' }],
# 22
BLOCK_IP_REPUTATION_CATEGORY: [
{ 'type': TYPE_UINT32, 'name': 'blockType' },
{ 'type': TYPE_UINT32, 'name': 'blockLength' },
{ 'type': TYPE_UINT32, 'name': 'id' },
{ 'type': TYPE_UUID, 'name': 'accessControlPolicyUuid' },
{ 'block': BLOCK_STRING, 'name': 'name' }],
# 27
BLOCK_RULE_DOCUMENTATION_DATA_52: [
{ 'type': TYPE_UINT32, 'name': 'blockType' },
{ 'type': TYPE_UINT32, 'name': 'blockLength' },
{ 'type': TYPE_UINT32, 'name': 'signatureId' },
{ 'type': TYPE_UINT32, 'name': 'generatorId' },
{ 'type': TYPE_UINT32, 'name': 'revision' },
{ 'block': BLOCK_STRING, 'name': 'summary' },
{ 'block': BLOCK_STRING, 'name': 'impact' },
{ 'block': BLOCK_STRING, 'name': 'detail' },
{ 'block': BLOCK_STRING, 'name': 'affectedSystems' },
{ 'block': BLOCK_STRING, 'name': 'attackScenarios' },
{ 'block': BLOCK_STRING, 'name': 'easeOfAttack' },
{ 'block': BLOCK_STRING, 'name': 'falsePositives' },
{ 'block': BLOCK_STRING, 'name': 'falseNegatives' },
{ 'block': BLOCK_STRING, 'name': 'correctiveAction' },
{ 'block': BLOCK_STRING, 'name': 'contributors' },
{ 'block': BLOCK_STRING, 'name': 'additionalReferences' } ],
# 28
BLOCK_GEOLOCATION_52: [
{ 'type': TYPE_UINT32, 'name': 'blockType' },
{ 'type': TYPE_UINT32, 'name': 'blockLength' },
{ 'type': TYPE_UINT16, 'name': 'countryCode' },
{ 'block': BLOCK_STRING, 'name': 'country' }],
# 39
BLOCK_IOC_NAME_53: [
{ 'type': TYPE_UINT32, 'name': 'blockType' },
{ 'type': TYPE_UINT32, 'name': 'blockLength' },
{ 'type': TYPE_UINT32, 'name': 'id' },
{ 'block': BLOCK_STRING, 'name': 'category' },
{ 'block': BLOCK_STRING, 'name': 'eventType' }],
# 40
BLOCK_FILE_EVENT_SHA_HASH_53: [
{ 'type': TYPE_UINT32, 'name': 'blockType' },
{ 'type': TYPE_UINT32, 'name': 'blockLength' },
{ 'type': TYPE_UINT256, 'name': 'shaHash' },
{ 'block': BLOCK_STRING, 'name': 'fileName' },
{ 'type': TYPE_BYTE, 'name': 'disposition' },
{ 'type': TYPE_BYTE, 'name': 'userDefined'}],
# 41 - LEGACY
BLOCK_INTRUSION_EVENT_53: [
{ 'type': TYPE_UINT32, 'name': 'blockType' },
{ 'type': TYPE_UINT32, 'name': 'blockLength' },
{ 'type': TYPE_UINT32, 'name': 'deviceId' },
{ 'type': TYPE_UINT32, 'name': 'eventId' },
{ 'type': TYPE_UINT32, 'name': 'eventSecond' },
{ 'type': TYPE_UINT32, 'name': 'eventMicrosecond' },
{ 'type': TYPE_UINT32, 'name': 'ruleId' },
{ 'type': TYPE_UINT32, 'name': 'generatorId' },
{ 'type': TYPE_UINT32, 'name': 'ruleRevision' },
{ 'type': TYPE_UINT32, 'name': 'classificationId' },
{ 'type': TYPE_UINT32, 'name': 'priorityId' },
{ 'type': TYPE_IPV6, 'name': 'sourceIpAddress' },
{ 'type': TYPE_IPV6, 'name': 'destinationIpAddress' },
{ 'type': TYPE_UINT16, 'name': 'sourcePortOrIcmpType' },
{ 'type': TYPE_UINT16, 'name': 'destinationPortOrIcmpType' },
{ 'type': TYPE_BYTE, 'name': 'ipProtocolId' },
{ 'type': TYPE_BYTE, 'name': 'impactFlags' },
{ 'type': TYPE_BYTE, 'name': 'impact' },
{ 'type': TYPE_BYTE, 'name': 'blocked' },
{ 'type': TYPE_UINT32, 'name': 'mplsLabel' },
{ 'type': TYPE_UINT16, 'name': 'vlanId' },
{ 'type': TYPE_UINT16, 'name': 'pad' },
{ 'type': TYPE_UUID, 'name': 'policyUuid' },
{ 'type': TYPE_UINT32, 'name': 'userId' },
{ 'type': TYPE_UINT32, 'name': 'webApplicationId' },
{ 'type': TYPE_UINT32, 'name': 'clientApplicationId' },
{ 'type': TYPE_UINT32, 'name': 'applicationId' },
{ 'type': TYPE_UINT32, 'name': 'accessControlRuleId' },
{ 'type': TYPE_UUID, 'name': 'accessControlPolicyUuid' },
{ 'type': TYPE_UUID, 'name': 'interfaceIngressUuid' },
{ 'type': TYPE_UUID, 'name': 'interfaceEgressUuid' },
{ 'type': TYPE_UUID, 'name': 'securityZoneIngressUuid' },
{ 'type': TYPE_UUID, 'name': 'securityZoneEgressUuid' },
{ 'type': TYPE_UINT32, 'name': 'connectionTimestamp' },
{ 'type': TYPE_UINT16, 'name': 'connectionInstanceId' },
{ 'type': TYPE_UINT16, 'name': 'connectionCounter' },
{ 'type': TYPE_UINT16, 'name': 'sourceCountry' },
{ 'type': TYPE_UINT16, 'name': 'destinationCountry' },
{ 'type': TYPE_UINT16, 'name': 'iocNumber' }],
# 50
BLOCK_SSL_CERTIFICATION_DETAILS_54: [
{ 'type': TYPE_UINT32, 'name': 'blockType' },
{ 'type': TYPE_UINT32, 'name': 'blockLength' },
{ 'type': TYPE_UINT160, 'name': 'fingerprintShaHash' },
{ 'type': TYPE_UINT160, 'name': 'publicKeyShaHash' },
{ 'type': TYPE_UINT160, 'name': 'serialNumber' },
{ 'type': TYPE_UINT32, 'name': 'serialNumberLength' },
{ 'block': BLOCK_STRING, 'name': 'subjectCn' },
{ 'block': BLOCK_STRING, 'name': 'subjectOrganisation' },
{ 'block': BLOCK_STRING, 'name': 'subjectOU' },
{ 'block': BLOCK_STRING, 'name': 'subjectCountry' },
{ 'block': BLOCK_STRING, 'name': 'issuerCn' },
{ 'block': BLOCK_STRING, 'name': 'issuerOrganisation' },
{ 'block': BLOCK_STRING, 'name': 'issuerOU' },
{ 'block': BLOCK_STRING, 'name': 'issuerCountry' },
{ 'type': TYPE_UINT32, 'name': 'validStartDate' },
{ 'type': TYPE_UINT32, 'name': 'validFinishDate' } ],
# 56
BLOCK_FILE_EVENT_60: [
{ 'type': TYPE_UINT32, 'name': 'blockType' },
{ 'type': TYPE_UINT32, 'name': 'blockLength' },
{ 'type': TYPE_UINT32, 'name': 'deviceId' },
{ 'type': TYPE_UINT16, 'name': 'connectionInstance' },
{ 'type': TYPE_UINT16, 'name': 'connectionCounter' },
{ 'type': TYPE_UINT32, 'name': 'connectionTimestamp' },
{ 'type': TYPE_UINT32, 'name': 'fileEventTimestamp' },
{ 'type': TYPE_IPV6, 'name': 'sourceIpAddress' },
{ 'type': TYPE_IPV6, 'name': 'destinationIpAddress' },
{ 'type': TYPE_BYTE, 'name': 'disposition' },
{ 'type': TYPE_BYTE, 'name': 'speroDisposition' },
{ 'type': TYPE_BYTE, 'name': 'fileStorageStatus' },
{ 'type': TYPE_BYTE, 'name': 'fileAnalysisStatus' },
{ 'type': TYPE_BYTE, 'name': 'localMalwareAnalysisStatus' },
{ 'type': TYPE_BYTE, 'name': 'archiveFileStatus' },
{ 'type': TYPE_BYTE, 'name': 'threatScore' },
{ 'type': TYPE_BYTE, 'name': 'action' },
{ 'type': TYPE_UINT256, 'name': 'shaHash' },
{ 'type': TYPE_UINT32, 'name': 'fileTypeId' },
{ 'block': BLOCK_STRING, 'name': 'fileName' },
{ 'type': TYPE_UINT64, 'name': 'fileSize' },
{ 'type': TYPE_BYTE, 'name': 'direction' },
{ 'type': TYPE_UINT32, 'name': 'applicationId' },
{ 'type': TYPE_UINT32, 'name': 'userId' },
{ 'block': BLOCK_STRING, 'name': 'uri' },
{ 'block': BLOCK_STRING, 'name': 'signature' },
{ 'type': TYPE_UINT16, 'name': 'sourcePort' },
{ 'type': TYPE_UINT16, 'name': 'destinationPort' },
{ 'type': TYPE_BYTE, 'name': 'protocol' },
{ 'type': TYPE_UUID, 'name': 'accessControlPolicyUuid' },
{ 'type': TYPE_UINT16, 'name': 'sourceCountry' },
{ 'type': TYPE_UINT16, 'name': 'destinationCountry' },
{ 'type': TYPE_UINT32, 'name': 'webApplicationId' },
{ 'type': TYPE_UINT32, 'name': 'clientApplicationId' },
{ 'type': TYPE_UINT128, 'name': 'securityContext' },
{ 'type': TYPE_UINT160, 'name': 'sslCertificateFingerprint' },
{ 'type': TYPE_UINT16, 'name': 'sslActualAction' },
{ 'type': TYPE_UINT16, 'name': 'sslFlowStatus' },
{ 'block': BLOCK_STRING, 'name': 'archiveSha' },
{ 'block': BLOCK_STRING, 'name': 'archiveName' },
{ 'type': TYPE_BYTE, 'name': 'archiveDepth'},
{ 'type': TYPE_UINT32, 'name': 'httpResponse'}],
# 57
BLOCK_USER_60: [
{ 'type': TYPE_UINT32, 'name': 'blockType' },
{ 'type': TYPE_UINT32, 'name': 'blockLength' },
{ 'type': TYPE_UINT32, 'name': 'id' | |
the Swift password.""")
@click.option('--if-match', help="""For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the etag from a previous GET or POST response for that resource. The resource will be updated or deleted only if the etag you provide matches the resource's current etag value.""")
@cli_util.confirm_delete_option
@cli_util.help_option
@click.pass_context
@cli_util.wrap_exceptions
def delete_swift_password(ctx, user_id, swift_password_id, if_match):
kwargs = {}
if if_match is not None:
kwargs['if_match'] = if_match
client = cli_util.build_client('identity', ctx)
result = client.delete_swift_password(
user_id=user_id,
swift_password_id=<PASSWORD>,
**kwargs
)
cli_util.render_response(result)
@user_group.command(name=cli_util.override('delete_user.command_name', 'delete'), help="""Deletes the specified user. The user must not be in any groups.""")
@click.option('--user-id', required=True, help="""The OCID of the user.""")
@click.option('--if-match', help="""For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the etag from a previous GET or POST response for that resource. The resource will be updated or deleted only if the etag you provide matches the resource's current etag value.""")
@cli_util.confirm_delete_option
@cli_util.help_option
@click.pass_context
@cli_util.wrap_exceptions
def delete_user(ctx, user_id, if_match):
kwargs = {}
if if_match is not None:
kwargs['if_match'] = if_match
client = cli_util.build_client('identity', ctx)
result = client.delete_user(
user_id=user_id,
**kwargs
)
cli_util.render_response(result)
@compartment_group.command(name=cli_util.override('get_compartment.command_name', 'get'), help="""Gets the specified compartment's information.
This operation does not return a list of all the resources inside the compartment. There is no single API operation that does that. Compartments can contain multiple types of resources (instances, block storage volumes, etc.). To find out what's in a compartment, you must call the \"List\" operation for each resource type and specify the compartment's OCID as a query parameter in the request. For example, call the [ListInstances] operation in the Cloud Compute Service or the [ListVolumes] operation in Cloud Block Storage.""")
@click.option('--compartment-id', required=True, help="""The OCID of the compartment.""")
@cli_util.help_option
@click.pass_context
@cli_util.wrap_exceptions
def get_compartment(ctx, compartment_id):
kwargs = {}
client = cli_util.build_client('identity', ctx)
result = client.get_compartment(
compartment_id=compartment_id,
**kwargs
)
cli_util.render_response(result)
@group_group.command(name=cli_util.override('get_group.command_name', 'get'), help="""Gets the specified group's information.
This operation does not return a list of all the users in the group. To do that, use [ListUserGroupMemberships] and provide the group's OCID as a query parameter in the request.""")
@click.option('--group-id', required=True, help="""The OCID of the group.""")
@cli_util.help_option
@click.pass_context
@cli_util.wrap_exceptions
def get_group(ctx, group_id):
kwargs = {}
client = cli_util.build_client('identity', ctx)
result = client.get_group(
group_id=group_id,
**kwargs
)
cli_util.render_response(result)
@identity_provider_group.command(name=cli_util.override('get_identity_provider.command_name', 'get'), help="""Gets the specified identity provider's information.""")
@click.option('--identity-provider-id', required=True, help="""The OCID of the identity provider.""")
@cli_util.help_option
@click.pass_context
@cli_util.wrap_exceptions
def get_identity_provider(ctx, identity_provider_id):
kwargs = {}
client = cli_util.build_client('identity', ctx)
result = client.get_identity_provider(
identity_provider_id=identity_provider_id,
**kwargs
)
cli_util.render_response(result)
@idp_group_mapping_group.command(name=cli_util.override('get_idp_group_mapping.command_name', 'get'), help="""Gets the specified group mapping.""")
@click.option('--identity-provider-id', required=True, help="""The OCID of the identity provider.""")
@click.option('--mapping-id', required=True, help="""The OCID of the group mapping.""")
@cli_util.help_option
@click.pass_context
@cli_util.wrap_exceptions
def get_idp_group_mapping(ctx, identity_provider_id, mapping_id):
kwargs = {}
client = cli_util.build_client('identity', ctx)
result = client.get_idp_group_mapping(
identity_provider_id=identity_provider_id,
mapping_id=mapping_id,
**kwargs
)
cli_util.render_response(result)
@policy_group.command(name=cli_util.override('get_policy.command_name', 'get'), help="""Gets the specified policy's information.""")
@click.option('--policy-id', required=True, help="""The OCID of the policy.""")
@cli_util.help_option
@click.pass_context
@cli_util.wrap_exceptions
def get_policy(ctx, policy_id):
kwargs = {}
client = cli_util.build_client('identity', ctx)
result = client.get_policy(
policy_id=policy_id,
**kwargs
)
cli_util.render_response(result)
@tenancy_group.command(name=cli_util.override('get_tenancy.command_name', 'get'), help="""Get the specified tenancy's information.""")
@click.option('--tenancy-id', required=True, help="""The OCID of the tenancy.""")
@cli_util.help_option
@click.pass_context
@cli_util.wrap_exceptions
def get_tenancy(ctx, tenancy_id):
kwargs = {}
client = cli_util.build_client('identity', ctx)
result = client.get_tenancy(
tenancy_id=tenancy_id,
**kwargs
)
cli_util.render_response(result)
@user_group.command(name=cli_util.override('get_user.command_name', 'get'), help="""Gets the specified user's information.""")
@click.option('--user-id', required=True, help="""The OCID of the user.""")
@cli_util.help_option
@click.pass_context
@cli_util.wrap_exceptions
def get_user(ctx, user_id):
kwargs = {}
client = cli_util.build_client('identity', ctx)
result = client.get_user(
user_id=user_id,
**kwargs
)
cli_util.render_response(result)
@user_group_membership_group.command(name=cli_util.override('get_user_group_membership.command_name', 'get'), help="""Gets the specified UserGroupMembership's information.""")
@click.option('--user-group-membership-id', required=True, help="""The OCID of the userGroupMembership.""")
@cli_util.help_option
@click.pass_context
@cli_util.wrap_exceptions
def get_user_group_membership(ctx, user_group_membership_id):
kwargs = {}
client = cli_util.build_client('identity', ctx)
result = client.get_user_group_membership(
user_group_membership_id=user_group_membership_id,
**kwargs
)
cli_util.render_response(result)
@api_key_group.command(name=cli_util.override('list_api_keys.command_name', 'list'), help="""Lists the API signing keys for the specified user. A user can have a maximum of three keys.
Every user has permission to use this API call for *their own user ID*. An administrator in your organization does not need to write a policy to give users this ability.""")
@click.option('--user-id', required=True, help="""The OCID of the user.""")
@cli_util.help_option
@click.pass_context
@cli_util.wrap_exceptions
def list_api_keys(ctx, user_id):
kwargs = {}
client = cli_util.build_client('identity', ctx)
result = client.list_api_keys(
user_id=user_id,
**kwargs
)
cli_util.render_response(result)
@availability_domain_group.command(name=cli_util.override('list_availability_domains.command_name', 'list'), help="""Lists the Availability Domains in your tenancy. Specify the OCID of either the tenancy or another of your compartments as the value for the compartment ID (remember that the tenancy is simply the root compartment). See [Where to Get the Tenancy's OCID and User's OCID].""")
@click.option('--compartment-id', required=True, help="""The OCID of the compartment (remember that the tenancy is simply the root compartment).""")
@cli_util.help_option
@click.pass_context
@cli_util.wrap_exceptions
def list_availability_domains(ctx, compartment_id):
kwargs = {}
client = cli_util.build_client('identity', ctx)
result = client.list_availability_domains(
compartment_id=compartment_id,
**kwargs
)
cli_util.render_response(result)
@compartment_group.command(name=cli_util.override('list_compartments.command_name', 'list'), help="""Lists the compartments in your tenancy. You must specify your tenancy's OCID as the value for the compartment ID (remember that the tenancy is simply the root compartment). See [Where to Get the Tenancy's OCID and User's OCID].""")
@click.option('--compartment-id', required=True, help="""The OCID of the compartment (remember that the tenancy is simply the root compartment).""")
@click.option('--page', help="""The value of the `opc-next-page` response header from the previous \"List\" call.""")
@click.option('--limit', help="""The maximum number of items to return in a paginated \"List\" call.""")
@cli_util.help_option
@click.pass_context
@cli_util.wrap_exceptions
def list_compartments(ctx, compartment_id, page, limit):
kwargs = {}
if page is not None:
kwargs['page'] = page
if limit is not None:
kwargs['limit'] = limit
client = cli_util.build_client('identity', ctx)
result = client.list_compartments(
compartment_id=compartment_id,
**kwargs
)
cli_util.render_response(result)
@group_group.command(name=cli_util.override('list_groups.command_name', 'list'), help="""Lists the groups in your tenancy. You must specify your tenancy's OCID as the value for the compartment ID (remember that the tenancy is simply the root compartment). See [Where to Get the Tenancy's OCID and User's OCID].""")
@click.option('--compartment-id', required=True, help="""The OCID of the compartment (remember that the tenancy is simply the root compartment).""")
@click.option('--page', help="""The value of the `opc-next-page` response header from the previous \"List\" call.""")
@click.option('--limit', help="""The maximum number of items to return in a paginated \"List\" call.""")
@cli_util.help_option
@click.pass_context
@cli_util.wrap_exceptions
def list_groups(ctx, compartment_id, page, limit):
kwargs = {}
if page is not None:
kwargs['page'] = page
if limit is not None:
kwargs['limit'] = limit
client = cli_util.build_client('identity', ctx)
result = client.list_groups(
compartment_id=compartment_id,
**kwargs
)
cli_util.render_response(result)
@identity_provider_group.command(name=cli_util.override('list_identity_providers.command_name', 'list'), help="""Lists all the identity providers in your tenancy. You must specify the identity provider type (e.g., `SAML2` for identity providers using the SAML2.0 protocol). You must specify your tenancy's OCID as the value for the compartment ID (remember that the tenancy is simply the root compartment). See [Where to Get the Tenancy's OCID and User's OCID].""")
@click.option('--type', required=True, help="""Identity provider type.""")
@click.option('--compartment-id', required=True, help="""The OCID of the compartment (remember that the tenancy is simply the root compartment).""")
@click.option('--page', help="""The value of the `opc-next-page` response header from the previous \"List\" call.""")
@click.option('--limit', help="""The maximum number of items to return in a paginated \"List\" call.""")
@cli_util.help_option
@click.pass_context
@cli_util.wrap_exceptions
def list_identity_providers(ctx, type, compartment_id, page, limit):
kwargs = {}
if page is not None:
kwargs['page'] = page
if limit is not None:
kwargs['limit'] = limit
client = cli_util.build_client('identity', ctx)
result = client.list_identity_providers(
type=type,
compartment_id=compartment_id,
**kwargs
)
cli_util.render_response(result)
@idp_group_mapping_group.command(name=cli_util.override('list_idp_group_mappings.command_name', 'list'), help="""Lists the group mappings for the specified identity provider.""")
@click.option('--identity-provider-id', required=True, help="""The OCID of the identity provider.""")
@click.option('--page', help="""The value of the `opc-next-page` response header from the previous \"List\" call.""")
@click.option('--limit', help="""The maximum number of items to return in a paginated \"List\" call.""")
@cli_util.help_option
@click.pass_context
@cli_util.wrap_exceptions
def list_idp_group_mappings(ctx, identity_provider_id, page, limit):
kwargs = {}
if page is not None:
kwargs['page'] = page
if limit is not None:
kwargs['limit'] = limit
client = cli_util.build_client('identity', ctx)
result = client.list_idp_group_mappings(
identity_provider_id=identity_provider_id,
**kwargs
)
cli_util.render_response(result)
@policy_group.command(name=cli_util.override('list_policies.command_name', 'list'), help="""Lists the policies in the specified compartment (either the tenancy or another of your compartments). See [Where to Get the Tenancy's OCID and User's OCID].
To determine which policies apply to a particular group or compartment, you must view the individual statements inside all your policies. There isn't a way to automatically obtain that information via the API.""")
@click.option('--compartment-id', required=True, help="""The OCID of the compartment (remember that the tenancy is simply the root compartment).""")
@click.option('--page', help="""The value of the `opc-next-page` response header from the previous \"List\" call.""")
@click.option('--limit', help="""The maximum number of items to return in a paginated \"List\" call.""")
@cli_util.help_option
@click.pass_context
@cli_util.wrap_exceptions
def list_policies(ctx, compartment_id, page, limit):
kwargs = {}
if page is not None:
kwargs['page'] = page
if limit is not None:
kwargs['limit'] = limit
client = cli_util.build_client('identity', ctx)
result = client.list_policies(
compartment_id=compartment_id,
**kwargs
)
cli_util.render_response(result)
@region_subscription_group.command(name=cli_util.override('list_region_subscriptions.command_name', 'list'), help="""Lists the region subscriptions for the specified tenancy.""")
@click.option('--tenancy-id', required=True, help="""The OCID of the tenancy.""")
@cli_util.help_option
@click.pass_context
@cli_util.wrap_exceptions
def list_region_subscriptions(ctx, tenancy_id):
kwargs = {}
client = cli_util.build_client('identity', ctx)
result = client.list_region_subscriptions(
tenancy_id=tenancy_id,
**kwargs
)
cli_util.render_response(result)
@region_group.command(name=cli_util.override('list_regions.command_name', 'list'), help="""Lists all the regions offered by Oracle Bare | |
= _result
return _result
Shape = tf_export("raw_ops.Shape")(_ops.to_raw_op(shape))
def shape_eager_fallback(input, out_type, name, ctx):
if out_type is None:
out_type = _dtypes.int32
out_type = _execute.make_type(out_type, "out_type")
_attr_T, (input,) = _execute.args_to_matching_eager([input], ctx)
_inputs_flat = [input]
_attrs = ("T", _attr_T, "out_type", out_type)
_result = _execute.execute(b"Shape", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"Shape", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def shape_n(input, out_type=_dtypes.int32, name=None):
r"""Returns shape of tensors.
This operation returns N 1-D integer tensors representing shape of `input[i]s`.
Args:
input: A list of at least 1 `Tensor` objects with the same type.
out_type: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int32`.
name: A name for the operation (optional).
Returns:
A list with the same length as `input` of `Tensor` objects with type `out_type`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "ShapeN", name,
tld.op_callbacks, input, "out_type", out_type)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return shape_n_eager_fallback(
input, out_type=out_type, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if not isinstance(input, (list, tuple)):
raise TypeError(
"Expected list for 'input' argument to "
"'shape_n' Op, not %r." % input)
_attr_N = len(input)
if out_type is None:
out_type = _dtypes.int32
out_type = _execute.make_type(out_type, "out_type")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"ShapeN", input=input, out_type=out_type, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("N", _op._get_attr_int("N"), "T", _op._get_attr_type("T"),
"out_type", _op._get_attr_type("out_type"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"ShapeN", _inputs_flat, _attrs, _result)
return _result
ShapeN = tf_export("raw_ops.ShapeN")(_ops.to_raw_op(shape_n))
def shape_n_eager_fallback(input, out_type, name, ctx):
if not isinstance(input, (list, tuple)):
raise TypeError(
"Expected list for 'input' argument to "
"'shape_n' Op, not %r." % input)
_attr_N = len(input)
if out_type is None:
out_type = _dtypes.int32
out_type = _execute.make_type(out_type, "out_type")
_attr_T, input = _execute.args_to_matching_eager(list(input), ctx)
_inputs_flat = list(input)
_attrs = ("N", _attr_N, "T", _attr_T, "out_type", out_type)
_result = _execute.execute(b"ShapeN", _attr_N, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"ShapeN", _inputs_flat, _attrs, _result)
return _result
def size(input, out_type=_dtypes.int32, name=None):
r"""Returns the size of a tensor.
This operation returns an integer representing the number of elements in
`input`.
For example:
```
# 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]]
size(t) ==> 12
```
Args:
input: A `Tensor`.
out_type: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int32`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `out_type`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "Size", name, tld.op_callbacks,
input, "out_type", out_type)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return size_eager_fallback(
input, out_type=out_type, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if out_type is None:
out_type = _dtypes.int32
out_type = _execute.make_type(out_type, "out_type")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"Size", input=input, out_type=out_type, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "out_type",
_op._get_attr_type("out_type"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"Size", _inputs_flat, _attrs, _result)
_result, = _result
return _result
Size = tf_export("raw_ops.Size")(_ops.to_raw_op(size))
def size_eager_fallback(input, out_type, name, ctx):
if out_type is None:
out_type = _dtypes.int32
out_type = _execute.make_type(out_type, "out_type")
_attr_T, (input,) = _execute.args_to_matching_eager([input], ctx)
_inputs_flat = [input]
_attrs = ("T", _attr_T, "out_type", out_type)
_result = _execute.execute(b"Size", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"Size", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def _slice(input, begin, size, name=None):
r"""Return a slice from 'input'.
The output tensor is a tensor with dimensions described by 'size'
whose values are extracted from 'input' starting at the offsets in
'begin'.
*Requirements*:
0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n)
Args:
input: A `Tensor`.
begin: A `Tensor`. Must be one of the following types: `int32`, `int64`.
begin[i] specifies the offset into the 'i'th dimension of
'input' to slice from.
size: A `Tensor`. Must have the same type as `begin`.
size[i] specifies the number of elements of the 'i'th dimension
of 'input' to slice. If size[i] is -1, all remaining elements in dimension
i are included in the slice (i.e. this is equivalent to setting
size[i] = input.dim_size(i) - begin[i]).
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "Slice", name,
tld.op_callbacks, input, begin, size)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return _slice_eager_fallback(
input, begin, size, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"Slice", input=input, begin=begin, size=size, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "Index",
_op._get_attr_type("Index"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"Slice", _inputs_flat, _attrs, _result)
_result, = _result
return _result
Slice = tf_export("raw_ops.Slice")(_ops.to_raw_op(_slice))
def _slice_eager_fallback(input, begin, size, name, ctx):
_attr_T, (input,) = _execute.args_to_matching_eager([input], ctx)
_attr_Index, _inputs_Index = _execute.args_to_matching_eager([begin, size], ctx)
(begin, size) = _inputs_Index
_inputs_flat = [input, begin, size]
_attrs = ("T", _attr_T, "Index", _attr_Index)
_result = _execute.execute(b"Slice", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"Slice", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def snapshot(input, name=None):
r"""Returns a copy of the input tensor.
Args:
input: A `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "Snapshot", name,
tld.op_callbacks, input)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return snapshot_eager_fallback(
input, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"Snapshot", input=input, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"Snapshot", _inputs_flat, _attrs, _result)
_result, = _result
return _result
Snapshot = tf_export("raw_ops.Snapshot")(_ops.to_raw_op(snapshot))
def snapshot_eager_fallback(input, name, ctx):
_attr_T, (input,) = _execute.args_to_matching_eager([input], ctx)
_inputs_flat = [input]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Snapshot", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"Snapshot", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def space_to_batch(input, paddings, block_size, name=None):
r"""SpaceToBatch for 4-D tensors of type T.
This is a legacy version of the more general SpaceToBatchND.
Zero-pads and then rearranges (permutes) blocks of spatial data into batch.
More specifically, this op outputs a copy of the input tensor where values from
the `height` and `width` dimensions are moved to the `batch` dimension. After
the zero-padding, both `height` and `width` of the input must be divisible by the
block size.
Args:
input: A `Tensor`. 4-D with shape `[batch, height, width, depth]`.
paddings: A `Tensor`. Must be one of the following types: `int32`, `int64`.
2-D tensor of non-negative integers with shape `[2, 2]`. It specifies
the padding of the input with zeros across the spatial dimensions as follows:
paddings = [[pad_top, pad_bottom], [pad_left, pad_right]]
The effective spatial dimensions of the zero-padded input tensor will be:
height_pad = pad_top + height + pad_bottom
width_pad = pad_left + width + pad_right
The attr `block_size` must be greater than one. It indicates the block size.
* Non-overlapping blocks of size `block_size x block size` in the height and
width dimensions are rearranged into the batch dimension at each location.
* The batch of the output tensor is `batch * block_size * block_size`.
* Both height_pad and width_pad must be divisible by block_size.
The shape of the output will be:
[batch*block_size*block_size, height_pad/block_size, width_pad/block_size,
depth]
Some examples:
(1) For the following input of shape `[1, 2, 2, 1]` and block_size of 2:
```
x = [[[[1], [2]], [[3], [4]]]]
```
The output tensor has shape `[4, 1, 1, 1]` and value:
```
[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
```
(2) For the following input of shape `[1, 2, 2, 3]` and block_size of 2:
```
x = [[[[1, 2, 3], [4, 5, 6]],
[[7, 8, 9], [10, 11, 12]]]]
```
The output |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.