signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def get_members ( self , name ) :
"""Returns the member interfaces for the specified Port - Channel
Args :
name ( str ) : The Port - channel interface name to return the member
interfaces for
Returns :
A list of physical interface names that belong to the specified
interface"""
|
grpid = re . search ( r'(\d+)' , name ) . group ( )
command = 'show port-channel %s all-ports' % grpid
config = self . node . enable ( command , 'text' )
return re . findall ( r'\b(?!Peer)Ethernet[\d/]*\b' , config [ 0 ] [ 'result' ] [ 'output' ] )
|
def set_pipeline ( self , pipeline ) :
"""Specify the pipeline . See get _ pipeline _ alternatives to see what are avaialble . Input should be a string ."""
|
self . add_history ( inspect . stack ( ) [ 0 ] [ 3 ] , locals ( ) , 1 )
if not os . path . exists ( self . BIDS_dir + '/derivatives/' + pipeline ) :
print ( 'Specified direvative directory not found.' )
self . get_pipeline_alternatives ( )
else : # Todo : perform check that pipeline is valid
self . pipeline = pipeline
|
def run ( self ) :
"""Computes cleared offers and bids ."""
|
# Start the clock .
t0 = time . time ( )
# Manage reactive power offers / bids .
haveQ = self . _isReactiveMarket ( )
# Withhold offers / bids outwith optional price limits .
self . _withholdOffbids ( )
# Convert offers / bids to pwl functions and update limits .
self . _offbidToCase ( )
# Compute dispatch points and LMPs using OPF .
success = self . _runOPF ( )
if success : # Get nodal marginal prices from OPF .
gteeOfferPrice , gteeBidPrice = self . _nodalPrices ( haveQ )
# Determine quantity and price for each offer / bid .
self . _runAuction ( gteeOfferPrice , gteeBidPrice , haveQ )
logger . info ( "SmartMarket cleared in %.3fs" % ( time . time ( ) - t0 ) )
else :
for offbid in self . offers + self . bids :
offbid . clearedQuantity = 0.0
offbid . clearedPrice = 0.0
offbid . accepted = False
offbid . generator . p = 0.0
logger . error ( "Non-convergent market OPF. Blackout!" )
return self . offers , self . bids
|
def batch ( self , table_name , timeout = None ) :
'''Creates a batch object which can be used as a context manager . Commits the batch on exit .
: param str table _ name :
The name of the table to commit the batch to .
: param int timeout :
The server timeout , expressed in seconds .'''
|
batch = TableBatch ( self . require_encryption , self . key_encryption_key , self . encryption_resolver_function )
yield batch
self . commit_batch ( table_name , batch , timeout = timeout )
|
def InitFromDataPoints ( self , start_stats , complete_stats ) :
"""Check that this approval applies to the given token .
Args :
start _ stats : A list of lists , each containing two values ( a timestamp and
the number of clients started at this time ) .
complete _ stats : A list of lists , each containing two values ( a timestamp
and the number of clients completed at this time ) .
Returns :
A reference to the current instance to allow method chaining ."""
|
self . start_points = self . _ConvertToResultList ( start_stats )
self . complete_points = self . _ConvertToResultList ( complete_stats )
return self
|
def render ( self , ** kwargs ) :
"""Renders the HTML representation of the element ."""
|
for item in self . _parent . _children . values ( ) :
if not isinstance ( item , Layer ) or not item . control :
continue
key = item . layer_name
if not item . overlay :
self . base_layers [ key ] = item . get_name ( )
if len ( self . base_layers ) > 1 :
self . layers_untoggle [ key ] = item . get_name ( )
else :
self . overlays [ key ] = item . get_name ( )
if not item . show :
self . layers_untoggle [ key ] = item . get_name ( )
super ( LayerControl , self ) . render ( )
|
def docs ( output = DOC_OUTPUT , proj_settings = PROJ_SETTINGS , github = False ) :
"""Generate API documentation ( using Sphinx ) .
: param output : Output directory .
: param proj _ settings : Django project settings to use .
: param github : Convert to GitHub - friendly format ?"""
|
local ( "export PYTHONPATH='' && " "export DJANGO_SETTINGS_MODULE=%s && " "sphinx-build -b html %s %s" % ( proj_settings , DOC_INPUT , output ) , capture = False )
if _parse_bool ( github ) :
local ( "touch %s/.nojekyll" % output , capture = False )
|
def setEmergencyDecel ( self , typeID , decel ) :
"""setDecel ( string , double ) - > None
Sets the maximal physically possible deceleration in m / s ^ 2 of vehicles of this type ."""
|
self . _connection . _sendDoubleCmd ( tc . CMD_SET_VEHICLETYPE_VARIABLE , tc . VAR_EMERGENCY_DECEL , typeID , decel )
|
def short_full_symbol ( self ) :
"""Gets the full symbol excluding the character under the cursor ."""
|
if self . _short_full_symbol is None :
self . _short_full_symbol = self . _symbol_extract ( cache . RE_FULL_CURSOR , False , True )
return self . _short_full_symbol
|
def get_rubric ( self ) :
"""Gets the rubric .
return : ( osid . assessment . AssessmentOffered ) - the assessment
offered
raise : IllegalState - ` ` has _ rubric ( ) ` ` is ` ` false ` `
raise : OperationFailed - unable to complete request
* compliance : mandatory - - This method must be implemented . *"""
|
# Implemented from template for osid . resource . Resource . get _ avatar _ template
if not bool ( self . _my_map [ 'rubricId' ] ) :
raise errors . IllegalState ( 'this AssessmentOffered has no rubric' )
mgr = self . _get_provider_manager ( 'ASSESSMENT' )
if not mgr . supports_assessment_offered_lookup ( ) :
raise errors . OperationFailed ( 'Assessment does not support AssessmentOffered lookup' )
lookup_session = mgr . get_assessment_offered_lookup_session ( proxy = getattr ( self , "_proxy" , None ) )
lookup_session . use_federated_bank_view ( )
osid_object = lookup_session . get_assessment_offered ( self . get_rubric_id ( ) )
return osid_object
|
def addSource ( self , path , name , location , copyLib = False , copyGroups = False , copyInfo = False , copyFeatures = False , muteKerning = False , muteInfo = False , mutedGlyphNames = None , familyName = None , styleName = None , ) :
"""Add a new UFO source to the document .
* path : path to this UFO , will be written as a relative path to the document path .
* name : reference name for this source
* location : name of the location for this UFO
* copyLib : copy the contents of this source to instances
* copyGroups : copy the groups of this source to instances
* copyInfo : copy the non - numerical fields from this source . info to instances .
* copyFeatures : copy the feature text from this source to instances
* muteKerning : mute the kerning data from this source
* muteInfo : mute the font info data from this source
* familyName : family name for this UFO ( to be able to work on the names without reading the whole UFO )
* styleName : style name for this UFO ( to be able to work on the names without reading the whole UFO )
Note : no separate flag for mute font : the source is just not added ."""
|
sourceElement = ET . Element ( "source" )
sourceElement . attrib [ 'filename' ] = self . _posixPathRelativeToDocument ( path )
sourceElement . attrib [ 'name' ] = name
if copyLib :
libElement = ET . Element ( 'lib' )
libElement . attrib [ 'copy' ] = "1"
sourceElement . append ( libElement )
if copyGroups :
groupsElement = ET . Element ( 'groups' )
groupsElement . attrib [ 'copy' ] = "1"
sourceElement . append ( groupsElement )
if copyFeatures :
featuresElement = ET . Element ( 'features' )
featuresElement . attrib [ 'copy' ] = "1"
sourceElement . append ( featuresElement )
if copyInfo or muteInfo : # copy info :
infoElement = ET . Element ( 'info' )
if copyInfo :
infoElement . attrib [ 'copy' ] = "1"
if muteInfo :
infoElement . attrib [ 'mute' ] = "1"
sourceElement . append ( infoElement )
if muteKerning : # add kerning element to the source
kerningElement = ET . Element ( "kerning" )
kerningElement . attrib [ "mute" ] = '1'
sourceElement . append ( kerningElement )
if mutedGlyphNames : # add muted glyphnames to the source
for name in mutedGlyphNames :
glyphElement = ET . Element ( "glyph" )
glyphElement . attrib [ "name" ] = name
glyphElement . attrib [ "mute" ] = '1'
sourceElement . append ( glyphElement )
if familyName is not None :
sourceElement . attrib [ 'familyname' ] = familyName
if styleName is not None :
sourceElement . attrib [ 'stylename' ] = styleName
locationElement = self . _makeLocationElement ( location )
sourceElement . append ( locationElement )
self . root . findall ( '.sources' ) [ 0 ] . append ( sourceElement )
|
def _on_permission_result ( self , code , perms , results ) :
"""Handles a permission request result by passing it to the
handler with the given code ."""
|
# : Get the handler for this request
handler = self . _permission_requests . get ( code , None )
if handler is not None :
del self . _permission_requests [ code ]
# : Invoke that handler with the permission request response
handler ( code , perms , results )
|
def _find_new_partners ( self ) :
"""Search the token network for potential channel partners ."""
|
open_channels = views . get_channelstate_open ( chain_state = views . state_from_raiden ( self . raiden ) , payment_network_id = self . registry_address , token_address = self . token_address , )
known = set ( channel_state . partner_state . address for channel_state in open_channels )
known . add ( self . BOOTSTRAP_ADDR )
known . add ( self . raiden . address )
participants_addresses = views . get_participants_addresses ( views . state_from_raiden ( self . raiden ) , self . registry_address , self . token_address , )
available = participants_addresses - known
available = list ( available )
shuffle ( available )
new_partners = available
log . debug ( 'Found partners' , node = pex ( self . raiden . address ) , number_of_partners = len ( available ) , )
return new_partners
|
def readall ( self ) :
"""Read and return all the bytes from the stream until EOF .
Returns :
bytes : Object content"""
|
if not self . _readable :
raise UnsupportedOperation ( 'read' )
with self . _seek_lock : # Get data starting from seek
with handle_os_exceptions ( ) :
if self . _seek and self . _seekable :
data = self . _read_range ( self . _seek )
# Get all data
else :
data = self . _readall ( )
# Update seek
self . _seek += len ( data )
return data
|
def createConnection ( self ) :
"""Return a card connection thru a remote reader ."""
|
uri = self . reader . createConnection ( )
return Pyro . core . getAttrProxyForURI ( uri )
|
def _bind_ldap ( self , ldap , con , username , password ) :
"""Private to bind / Authenticate a user .
If AUTH _ LDAP _ BIND _ USER exists then it will bind first with it ,
next will search the LDAP server using the username with UID
and try to bind to it ( OpenLDAP ) .
If AUTH _ LDAP _ BIND _ USER does not exit , will bind with username / password"""
|
try :
if self . auth_ldap_bind_user :
self . _bind_indirect_user ( ldap , con )
user = self . _search_ldap ( ldap , con , username )
if user :
log . debug ( "LDAP got User {0}" . format ( user ) )
# username = DN from search
username = user [ 0 ] [ 0 ]
else :
return False
log . debug ( "LDAP bind with: {0} {1}" . format ( username , "XXXXXX" ) )
if self . auth_ldap_username_format :
username = self . auth_ldap_username_format % username
if self . auth_ldap_append_domain :
username = username + "@" + self . auth_ldap_append_domain
con . bind_s ( username , password )
log . debug ( "LDAP bind OK: {0}" . format ( username ) )
return True
except ldap . INVALID_CREDENTIALS :
return False
|
def reply_to_request ( cls , req_msg , * args ) :
"""Helper method for creating reply messages to a specific request .
Copies the message name and message identifier from request message .
Parameters
req _ msg : katcp . core . Message instance
The request message that this inform if in reply to
args : list of strings
The message arguments ."""
|
return cls ( cls . REPLY , req_msg . name , args , req_msg . mid )
|
def removeRnaQuantificationSet ( self , rnaQuantificationSet ) :
"""Removes the specified rnaQuantificationSet from this repository . This
performs a cascading removal of all items within this
rnaQuantificationSet ."""
|
q = models . Rnaquantificationset . delete ( ) . where ( models . Rnaquantificationset . id == rnaQuantificationSet . getId ( ) )
q . execute ( )
|
def optical_flow_send ( self , time_usec , sensor_id , flow_x , flow_y , flow_comp_m_x , flow_comp_m_y , quality , ground_distance , force_mavlink1 = False ) :
'''Optical flow from a flow sensor ( e . g . optical mouse sensor )
time _ usec : Timestamp ( UNIX ) ( uint64 _ t )
sensor _ id : Sensor ID ( uint8 _ t )
flow _ x : Flow in pixels * 10 in x - sensor direction ( dezi - pixels ) ( int16 _ t )
flow _ y : Flow in pixels * 10 in y - sensor direction ( dezi - pixels ) ( int16 _ t )
flow _ comp _ m _ x : Flow in meters in x - sensor direction , angular - speed compensated ( float )
flow _ comp _ m _ y : Flow in meters in y - sensor direction , angular - speed compensated ( float )
quality : Optical flow quality / confidence . 0 : bad , 255 : maximum quality ( uint8 _ t )
ground _ distance : Ground distance in meters . Positive value : distance known . Negative value : Unknown distance ( float )'''
|
return self . send ( self . optical_flow_encode ( time_usec , sensor_id , flow_x , flow_y , flow_comp_m_x , flow_comp_m_y , quality , ground_distance ) , force_mavlink1 = force_mavlink1 )
|
def delete_alert ( self , id , ** kwargs ) : # noqa : E501
"""Delete a specific alert # noqa : E501
# noqa : E501
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . delete _ alert ( id , async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: param str id : ( required )
: return : ResponseContainerAlert
If the method is called asynchronously ,
returns the request thread ."""
|
kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . delete_alert_with_http_info ( id , ** kwargs )
# noqa : E501
else :
( data ) = self . delete_alert_with_http_info ( id , ** kwargs )
# noqa : E501
return data
|
def gatk_major_version ( self ) :
"""Retrieve the GATK major version , handling multiple GATK distributions .
Has special cases for GATK nightly builds , Appistry releases and
GATK prior to 2.3."""
|
full_version = self . get_gatk_version ( )
# Working with a recent version if using nightlies
if full_version . startswith ( "nightly-" ) :
return "3.6"
parts = full_version . split ( "-" )
if len ( parts ) == 4 :
appistry_release , version , subversion , githash = parts
elif len ( parts ) == 3 :
version , subversion , githash = parts
elif len ( parts ) == 2 :
version , subversion = parts
elif len ( parts ) == 1 :
version = parts [ 0 ]
# version was not properly implemented in earlier GATKs
else :
version = "2.3"
if version . startswith ( "v" ) :
version = version [ 1 : ]
return version
|
def decode ( col , charset ) :
"""Computes the first argument into a string from a binary using the provided character set
( one of ' US - ASCII ' , ' ISO - 8859-1 ' , ' UTF - 8 ' , ' UTF - 16BE ' , ' UTF - 16LE ' , ' UTF - 16 ' ) ."""
|
sc = SparkContext . _active_spark_context
return Column ( sc . _jvm . functions . decode ( _to_java_column ( col ) , charset ) )
|
def report ( args ) :
"""Create report in html format"""
|
logger . info ( "reading sequeces" )
data = load_data ( args . json )
logger . info ( "create profile" )
data = make_profile ( data , os . path . join ( args . out , "profiles" ) , args )
logger . info ( "create database" )
make_database ( data , "seqcluster.db" , args . out )
logger . info ( "Done. Download https://github.com/lpantano/seqclusterViz/archive/master.zip to browse the output." )
|
def get_curie ( self , uri ) :
'''Get a CURIE from a URI'''
|
prefix = self . get_curie_prefix ( uri )
if prefix is not None :
key = self . curie_map [ prefix ]
return '%s:%s' % ( prefix , uri [ len ( key ) : len ( uri ) ] )
return None
|
def record_received ( self , msg ) :
"""Handle ALDB record received from device ."""
|
release_lock = False
userdata = msg . userdata
rec = ALDBRecord . create_from_userdata ( userdata )
self . _records [ rec . mem_addr ] = rec
_LOGGER . debug ( 'ALDB Record: %s' , rec )
rec_count = self . _load_action . rec_count
if rec_count == 1 or self . _have_all_records ( ) :
release_lock = True
if self . _is_first_record ( rec ) :
self . _mem_addr = rec . mem_addr
if release_lock and self . _rec_mgr_lock . locked ( ) :
_LOGGER . debug ( 'Releasing lock because record received' )
self . _rec_mgr_lock . release ( )
|
def L2S ( lunarD , lunarM , lunarY , lunarLeap , tZ = 7 ) :
'''def L2S ( lunarD , lunarM , lunarY , lunarLeap , tZ = 7 ) : Convert a lunar date
to the corresponding solar date .'''
|
if ( lunarM < 11 ) :
a11 = getLunarMonth11 ( lunarY - 1 , tZ )
b11 = getLunarMonth11 ( lunarY , tZ )
else :
a11 = getLunarMonth11 ( lunarY , tZ )
b11 = getLunarMonth11 ( lunarY + 1 , tZ )
k = int ( 0.5 + ( a11 - 2415021.076998695 ) / 29.530588853 )
off = lunarM - 11
if ( off < 0 ) :
off += 12
if ( b11 - a11 > 365 ) :
leapOff = getLeapMonthOffset ( a11 , tZ )
leapM = leapOff - 2
if ( leapM < 0 ) :
leapM += 12
if ( lunarLeap != 0 and lunarM != leapM ) :
return [ 0 , 0 , 0 ]
elif ( lunarLeap != 0 or off >= leapOff ) :
off += 1
monthStart = getNewMoonDay ( k + off , tZ )
return jdToDate ( monthStart + lunarD - 1 )
|
def write_generator_data ( self , file ) :
"""Writes generator data in MATPOWER format ."""
|
gen_attr = [ "p" , "q" , "q_max" , "q_min" , "v_magnitude" , "base_mva" , "online" , "p_max" , "p_min" , "mu_pmax" , "mu_pmin" , "mu_qmax" , "mu_qmin" ]
file . write ( "\n%%%% generator data\n" )
file . write ( "%%\tbus\tPg\tQg\tQmax\tQmin\tVg\tmBase\tstatus\tPmax\tPmin" )
file . write ( "\tmu_Pmax\tmu_Pmin\tmu_Qmax\tmu_Qmin" )
file . write ( "\n%sgen = [\n" % self . _prefix )
for generator in self . case . generators :
vals = [ getattr ( generator , a ) for a in gen_attr ]
vals . insert ( 0 , generator . bus . _i )
assert len ( vals ) == 14
file . write ( "\t%d\t%g\t%g\t%g\t%g\t%.8g\t%g\t%d\t%g\t%g\t%g\t%g" "\t%g\t%g;\n" % tuple ( vals ) )
file . write ( "];\n" )
|
def _init_alphabet_from_tokens ( self , tokens ) :
"""Initialize alphabet from an iterable of token or subtoken strings ."""
|
# Include all characters from all tokens in the alphabet to guarantee that
# any token can be encoded . Additionally , include all escaping
# characters .
self . _alphabet = { c for token in tokens for c in token }
self . _alphabet |= _ESCAPE_CHARS
|
def _detect_iplus ( self ) :
"""Check the DCNM version and determine if it ' s for iplus"""
|
ver_expr = "([0-9]+)\.([0-9]+)\((.*)\)"
re . compile ( ver_expr )
v1 = re . match ( ver_expr , self . _cur_ver )
v2 = re . match ( ver_expr , self . _base_ver )
if int ( v1 . group ( 1 ) ) > int ( v2 . group ( 1 ) ) :
self . _is_iplus = True
elif int ( v1 . group ( 1 ) ) == int ( v2 . group ( 1 ) ) :
if int ( v1 . group ( 2 ) ) > int ( v2 . group ( 2 ) ) :
self . _is_iplus = True
elif int ( v1 . group ( 2 ) ) == int ( v2 . group ( 2 ) ) :
self . _is_iplus = v1 . group ( 3 ) >= v2 . group ( 3 )
LOG . info ( "DCNM version: %(cur_ver)s, iplus: %(is_iplus)s" , { 'cur_ver' : self . _cur_ver , 'is_iplus' : self . _is_iplus } )
|
def parse_uri ( uri , default_port = DEFAULT_PORT , validate = True , warn = False ) :
"""Parse and validate a MongoDB URI .
Returns a dict of the form : :
' nodelist ' : < list of ( host , port ) tuples > ,
' username ' : < username > or None ,
' password ' : < password > or None ,
' database ' : < database name > or None ,
' collection ' : < collection name > or None ,
' options ' : < dict of MongoDB URI options >
If the URI scheme is " mongodb + srv : / / " DNS SRV and TXT lookups will be done
to build nodelist and options .
: Parameters :
- ` uri ` : The MongoDB URI to parse .
- ` default _ port ` : The port number to use when one wasn ' t specified
for a host in the URI .
- ` validate ` : If ` ` True ` ` ( the default ) , validate and normalize all
options .
- ` warn ` ( optional ) : When validating , if ` ` True ` ` then will warn
the user then ignore any invalid options or values . If ` ` False ` ` ,
validation will error when options are unsupported or values are
invalid .
. . versionchanged : : 3.6
Added support for mongodb + srv : / / URIs
. . versionchanged : : 3.5
Return the original value of the ` ` readPreference ` ` MongoDB URI option
instead of the validated read preference mode .
. . versionchanged : : 3.1
` ` warn ` ` added so invalid options can be ignored ."""
|
if uri . startswith ( SCHEME ) :
is_srv = False
scheme_free = uri [ SCHEME_LEN : ]
elif uri . startswith ( SRV_SCHEME ) :
if not _HAVE_DNSPYTHON :
raise ConfigurationError ( 'The "dnspython" module must be ' 'installed to use mongodb+srv:// URIs' )
is_srv = True
scheme_free = uri [ SRV_SCHEME_LEN : ]
else :
raise InvalidURI ( "Invalid URI scheme: URI must " "begin with '%s' or '%s'" % ( SCHEME , SRV_SCHEME ) )
if not scheme_free :
raise InvalidURI ( "Must provide at least one hostname or IP." )
user = None
passwd = None
dbase = None
collection = None
options = { }
host_part , _ , path_part = scheme_free . partition ( '/' )
if not host_part :
host_part = path_part
path_part = ""
if not path_part and '?' in host_part :
raise InvalidURI ( "A '/' is required between " "the host list and any options." )
if '@' in host_part :
userinfo , _ , hosts = host_part . rpartition ( '@' )
user , passwd = parse_userinfo ( userinfo )
else :
hosts = host_part
if '/' in hosts :
raise InvalidURI ( "Any '/' in a unix domain socket must be" " percent-encoded: %s" % host_part )
hosts = unquote_plus ( hosts )
if is_srv :
nodes = split_hosts ( hosts , default_port = None )
if len ( nodes ) != 1 :
raise InvalidURI ( "%s URIs must include one, " "and only one, hostname" % ( SRV_SCHEME , ) )
fqdn , port = nodes [ 0 ]
if port is not None :
raise InvalidURI ( "%s URIs must not include a port number" % ( SRV_SCHEME , ) )
nodes = _get_dns_srv_hosts ( fqdn )
try :
plist = fqdn . split ( "." ) [ 1 : ]
except Exception :
raise ConfigurationError ( "Invalid URI host" )
slen = len ( plist )
if slen < 2 :
raise ConfigurationError ( "Invalid URI host" )
for node in nodes :
try :
nlist = node [ 0 ] . split ( "." ) [ 1 : ] [ - slen : ]
except Exception :
raise ConfigurationError ( "Invalid SRV host" )
if plist != nlist :
raise ConfigurationError ( "Invalid SRV host" )
dns_options = _get_dns_txt_options ( fqdn )
if dns_options :
options = split_options ( dns_options , validate , warn )
if set ( options ) - _ALLOWED_TXT_OPTS :
raise ConfigurationError ( "Only authSource and replicaSet are supported from DNS" )
options [ "ssl" ] = True if validate else 'true'
else :
nodes = split_hosts ( hosts , default_port = default_port )
if path_part :
if path_part [ 0 ] == '?' :
opts = unquote_plus ( path_part [ 1 : ] )
else :
dbase , _ , opts = map ( unquote_plus , path_part . partition ( '?' ) )
if '.' in dbase :
dbase , collection = dbase . split ( '.' , 1 )
if _BAD_DB_CHARS . search ( dbase ) :
raise InvalidURI ( 'Bad database name "%s"' % dbase )
if opts :
options . update ( split_options ( opts , validate , warn ) )
if dbase is not None :
dbase = unquote_plus ( dbase )
if collection is not None :
collection = unquote_plus ( collection )
return { 'nodelist' : nodes , 'username' : user , 'password' : passwd , 'database' : dbase , 'collection' : collection , 'options' : options }
|
def sample_folder ( prj , sample ) :
"""Get the path to this Project ' s root folder for the given Sample .
: param attmap . PathExAttMap | Project prj : project with which sample is associated
: param Mapping sample : Sample or sample data for which to get root output
folder path .
: return str : this Project ' s root folder for the given Sample"""
|
return os . path . join ( prj . metadata . results_subdir , sample [ "sample_name" ] )
|
def describe_load_balancers ( names = None , load_balancer_arns = None , region = None , key = None , keyid = None , profile = None ) :
'''Describes the specified load balancer or all of your load balancers .
Returns : list
CLI example :
. . code - block : : bash
salt myminion boto _ elbv2 . describe _ load _ balancers
salt myminion boto _ elbv2 . describe _ load _ balancers alb _ name
salt myminion boto _ elbv2 . describe _ load _ balancers " [ alb _ name , alb _ name ] "'''
|
if names and load_balancer_arns :
raise SaltInvocationError ( 'At most one of names or load_balancer_arns may ' 'be provided' )
if names :
albs = names
elif load_balancer_arns :
albs = load_balancer_arns
else :
albs = None
albs_list = [ ]
if albs :
if isinstance ( albs , str ) or isinstance ( albs , six . text_type ) :
albs_list . append ( albs )
else :
for alb in albs :
albs_list . append ( alb )
conn = _get_conn ( region = region , key = key , keyid = keyid , profile = profile )
try :
if names :
ret = conn . describe_load_balancers ( Names = albs_list ) [ 'LoadBalancers' ]
elif load_balancer_arns :
ret = conn . describe_load_balancers ( LoadBalancerArns = albs_list ) [ 'LoadBalancers' ]
else :
ret = [ ]
next_marker = ''
while True :
r = conn . describe_load_balancers ( Marker = next_marker )
for alb in r [ 'LoadBalancers' ] :
ret . append ( alb )
if 'NextMarker' in r :
next_marker = r [ 'NextMarker' ]
else :
break
return ret if ret else [ ]
except ClientError as error :
log . warning ( error )
return False
|
def kill ( self , exc_info = None ) :
"""Kill the container in a semi - graceful way .
Entrypoints are killed , followed by any active worker threads .
Next , dependencies are killed . Finally , any remaining managed threads
are killed .
If ` ` exc _ info ` ` is provided , the exception will be raised by
: meth : ` ~ wait ` ` ."""
|
if self . _being_killed : # this happens if a managed thread exits with an exception
# while the container is being killed or if multiple errors
# happen simultaneously
_log . debug ( 'already killing %s ... waiting for death' , self )
try :
self . _died . wait ( )
except :
pass
# don ' t re - raise if we died with an exception
return
self . _being_killed = True
if self . _died . ready ( ) :
_log . debug ( 'already stopped %s' , self )
return
if exc_info is not None :
_log . info ( 'killing %s due to %s' , self , exc_info [ 1 ] )
else :
_log . info ( 'killing %s' , self )
# protect against extensions that throw during kill ; the container
# is already dying with an exception , so ignore anything else
def safely_kill_extensions ( ext_set ) :
try :
ext_set . kill ( )
except Exception as exc :
_log . warning ( 'Extension raised `%s` during kill' , exc )
safely_kill_extensions ( self . entrypoints . all )
self . _kill_worker_threads ( )
safely_kill_extensions ( self . extensions . all )
self . _kill_managed_threads ( )
self . started = False
# if ` kill ` is called after ` stop ` , they race to send this
if not self . _died . ready ( ) :
self . _died . send ( None , exc_info )
|
def burnstage_upgrade ( self , ** keyw ) :
"""This function calculates the presence of burning stages and
outputs the ages when key isotopes are depleted and uses them to
calculate burning lifetimes .
Parameters
keyw : dict
A dict of key word arguments .
Returns
list
A list containing the following information : burn _ cycles ,
burn _ ages , burn _ abun , burn _ type and burn _ lifetime .
Notes
The following keywords can also be used :
| Keyword Argument | Default Value |
| abund | " iso _ massf " |
| isoa | " A " |
| isoz | " Z " |
| mass | " mass " |
| cycle | " cycle " |
| cyclefin | 0 |
All arguments change the name of fields used to read data from
HDF5 files , other than cyclefin . Cyclefin is the last timestep
to use when reading files .
Cycles contain the cycle numbers for the various points where
the abundance is abun . The age of the star at each point and
the type of burning is indicated by those arrays . The
lifetimes are calculated by ."""
|
if ( "isoa" in keyw ) == False :
keyw [ "isoa" ] = "A"
if ( "isoz" in keyw ) == False :
keyw [ "isoz" ] = "Z"
if ( "mass" in keyw ) == False :
keyw [ "mass" ] = "mass"
if ( "age" in keyw ) == False :
keyw [ "age" ] = "age"
if ( "abund" in keyw ) == False :
keyw [ "abund" ] = "iso_massf"
if ( "cycle" in keyw ) == False :
keyw [ "cycle" ] = "cycle"
if ( "cyclefin" in keyw ) == False :
cyclefin = 1.e99
else :
cyclefin = keyw [ "cyclefin" ]
burn_cycles = [ ]
burn_ages = [ ]
burn_abun = [ ]
burn_type = [ ]
burn_lifetime = [ ]
firstfile = True
hemax , cmax , nemax , omax = 0. , 0. , 0. , 0.
hburn_logic = True
heburn_logic = True
cburn_logic = True
neburn_logic = True
oburn_logic = True
hburn_start_logic = False
heburn_start_logic = False
cburn_start_logic = False
neburn_start_logic = False
oburn_start_logic = False
# cycles _ list = self . se . cycles
cyc = self . se . cycles
sparsity_factor = int ( 1 )
cycles_list = list ( range ( int ( cyc [ 0 ] ) , int ( cyc [ len ( cyc ) - 1 ] ) , ( ( int ( cyc [ 1 ] ) - int ( cyc [ 0 ] ) ) ) * sparsity_factor ) )
age_list = self . se . get ( keyw [ "age" ] )
# I want to read only the isotopes I need to identify the burning stages .
all_isos = self . se . isotopes
# list _ all _ isos = all _ isos . tolist ( )
useful_species = species_list ( "burn_stages" )
useful_index = [ ]
for iso in useful_species : # useful _ index . append ( all _ isos . index ( iso ) )
useful_index . append ( useful_species . index ( iso ) )
specie_index = { }
for a , b in zip ( useful_species , useful_index ) :
specie_index [ a ] = b
# Check the order of the input data
xm_init = self . se . get ( 0 , keyw [ "mass" ] )
central_zone = 0
external_zone = - 1
if isinstance ( xm_init , list ) == True :
if xm_init [ 0 ] > xm_init [ 1 ] : # mass is descending with shell number and the centre of the star
# is the last shell
central_zone = - 1
external_zone = 0
# central zone first
zone = 0
xm_cyc = [ ]
xm_list = [ ]
for i in cycles_list :
xm_cyc = self . se . get ( i , keyw [ "mass" ] ) [ central_zone ]
xm_list . append ( xm_cyc )
abund_list = [ ]
for i in cycles_list :
abund_tmp = [ ]
for iso in useful_species :
abund_cyc = self . se . get ( i , keyw [ "abund" ] , iso ) [ central_zone ]
abund_tmp . append ( abund_cyc )
abund_list . append ( abund_tmp )
if firstfile == True :
hsurf = self . se . get ( 0 , keyw [ "abund" ] , 'H-1' ) [ external_zone ]
# hesurf = self . se . get ( 0 , keyw [ " abund " ] , ' He - 4 ' ) [ external _ zone ]
firstfile = False
# Try and determine the location of a convective core using the central and
# next from central shells
for i in range ( 1 , len ( cycles_list ) - 1 ) :
if cycles_list [ i ] > cyclefin and cyclefin != 0 :
pair = False
age1 = - 1
for i in range ( len ( burn_type ) ) :
if 'start' in burn_type [ i ] and pair == False :
age1 = burn_ages [ i ]
pair = True
elif 'end' in burn_type [ i ] and pair == True :
age2 = burn_ages [ i ]
pair = False
if age1 != - 1 :
burn_lifetime . append ( age2 - age1 )
age1 = - 1
return [ burn_cycles , burn_ages , burn_abun , burn_type , burn_lifetime ]
print ( 'passa 3' )
# H - burning
if hburn_logic :
hcen = abund_list [ i ] [ specie_index [ 'H-1' ] ]
hcennext = abund_list [ i + 1 ] [ specie_index [ 'H-1' ] ]
if hcen > 1.e-10 :
if hcennext < hsurf - 0.003 and hcen >= hsurf - 0.003 :
burn_cycles . append ( cycles_list [ i ] )
burn_ages . append ( age_list [ i ] )
burn_abun . append ( hcen )
burn_type . append ( 'H_start' )
hburn_start_logic = True
if hcennext < 1.e-1 and hcen >= 1.e-1 :
burn_cycles . append ( cycles_list [ i ] )
burn_ages . append ( age_list [ i ] )
burn_abun . append ( 1.e-1 )
burn_type . append ( 'H' )
if hcennext < 1.e-2 and hcen >= 1.e-2 :
burn_cycles . append ( cycles_list [ i ] )
burn_ages . append ( age_list [ i ] )
burn_abun . append ( 1.e-2 )
burn_type . append ( 'H' )
if hcennext < 1.e-3 and hcen >= 1.e-3 :
burn_cycles . append ( cycles_list [ i ] )
burn_ages . append ( age_list [ i ] )
burn_abun . append ( 1.e-3 )
burn_type . append ( 'H' )
if hcennext < 1.e-4 and hcen >= 1.e-4 :
burn_cycles . append ( cycles_list [ i ] )
burn_ages . append ( age_list [ i ] )
burn_abun . append ( 1.e-4 )
burn_type . append ( 'H' )
if hcennext < 1.e-5 and hcen >= 1.e-5 :
burn_cycles . append ( cycles_list [ i ] )
burn_ages . append ( age_list [ i ] )
burn_abun . append ( 1.e-5 )
burn_type . append ( 'H_end' )
hemax = abund_list [ i ] [ specie_index [ 'He-4' ] ]
if hburn_start_logic :
hburn_logic == False
if hcennext < 1.e-6 and hcen >= 1.e-6 :
burn_cycles . append ( cycles_list [ i ] )
burn_ages . append ( age_list [ i ] )
burn_abun . append ( 1.e-6 )
burn_type . append ( 'H' )
# if hcennext < 1 . e - 9 and hcen > = 1 . e - 9:
# burn _ cycles . append ( cycles _ list [ i ] )
# burn _ ages . append ( age _ list [ i ] )
# burn _ abun . append ( 1 . e - 9)
# burn _ type . append ( ' H ' )
# He - burning
hecen = abund_list [ i ] [ specie_index [ 'He-4' ] ]
hecennext = abund_list [ i + 1 ] [ specie_index [ 'He-4' ] ]
if hcen < 1.e-5 and hecen > 1.e-10 :
if hecennext < hemax - 0.003 and hecen >= hemax - 0.003 :
burn_cycles . append ( cycles_list [ i ] )
burn_ages . append ( age_list [ i ] )
burn_abun . append ( hecen )
burn_type . append ( 'He_start' )
if hecennext < 1.e-1 and hecen >= 1.e-1 :
burn_cycles . append ( cycles_list [ i ] )
burn_ages . append ( age_list [ i ] )
burn_abun . append ( 1.e-1 )
burn_type . append ( 'He' )
if hecennext < 1.e-2 and hecen >= 1.e-2 :
burn_cycles . append ( cycles_list [ i ] )
burn_ages . append ( age_list [ i ] )
burn_abun . append ( 1.e-2 )
burn_type . append ( 'He' )
if hecennext < 1.e-3 and hecen >= 1.e-3 :
burn_cycles . append ( cycles_list [ i ] )
burn_ages . append ( age_list [ i ] )
burn_abun . append ( 1.e-3 )
burn_type . append ( 'He' )
if hecennext < 1.e-4 and hecen >= 1.e-4 :
burn_cycles . append ( cycles_list [ i ] )
burn_ages . append ( age_list [ i ] )
burn_abun . append ( 1.e-4 )
burn_type . append ( 'He' )
if hecennext < 1.e-5 and hecen >= 1.e-5 :
burn_cycles . append ( cycles_list [ i ] )
burn_ages . append ( age_list [ i ] )
burn_abun . append ( 1.e-5 )
burn_type . append ( 'He_end' )
cmax = abund_list [ i ] [ specie_index [ 'C-12' ] ]
if hecennext < 1.e-6 and hecen >= 1.e-6 :
burn_cycles . append ( cycles_list [ i ] )
burn_ages . append ( age_list [ i ] )
burn_abun . append ( 1.e-6 )
burn_type . append ( 'He' )
if hecennext < 1.e-9 and hecen >= 1.e-9 :
burn_cycles . append ( cycles_list [ i ] )
burn_ages . append ( age_list [ i ] )
burn_abun . append ( 1.e-9 )
burn_type . append ( 'He' )
# C - burning
ccen = abund_list [ i ] [ specie_index [ 'C-12' ] ]
ccennext = abund_list [ i + 1 ] [ specie_index [ 'C-12' ] ]
if hcen < 1.e-5 and hecen < 1.e-5 and ccen > 1.e-10 :
if ccennext < cmax - 0.003 and ccen >= cmax - 0.003 :
burn_cycles . append ( cycles_list [ i ] )
burn_ages . append ( age_list [ i ] )
burn_abun . append ( ccen )
burn_type . append ( 'C_start' )
if ccennext < 1.e-1 and ccen >= 1.e-1 :
burn_cycles . append ( cycles_list [ i ] )
burn_ages . append ( age_list [ i ] )
burn_abun . append ( 1.e-1 )
burn_type . append ( 'C' )
if ccennext < 1.e-2 and ccen >= 1.e-2 :
burn_cycles . append ( cycles_list [ i ] )
burn_ages . append ( age_list [ i ] )
burn_abun . append ( 1.e-2 )
burn_type . append ( 'C' )
if ccennext < 1.e-3 and ccen >= 1.e-3 :
burn_cycles . append ( cycles_list [ i ] )
burn_ages . append ( age_list [ i ] )
burn_abun . append ( 1.e-3 )
burn_type . append ( 'C' )
if ccennext < 1.e-4 and ccen >= 1.e-4 :
burn_cycles . append ( cycles_list [ i ] )
burn_ages . append ( age_list [ i ] )
burn_abun . append ( 1.e-4 )
burn_type . append ( 'C' )
if ccennext < 1.e-5 and ccen >= 1.e-5 :
burn_cycles . append ( cycles_list [ i ] )
burn_ages . append ( age_list [ i ] )
burn_abun . append ( 1.e-5 )
burn_type . append ( 'C_end' )
nemax = abund_list [ i ] [ specie_index [ 'Ne-20' ] ]
if ccennext < 1.e-6 and ccen >= 1.e-6 :
burn_cycles . append ( cycles_list [ i ] )
burn_ages . append ( age_list [ i ] )
burn_abun . append ( 1.e-6 )
burn_type . append ( 'C' )
if ccennext < 1.e-9 and ccen >= 1.e-9 :
burn_cycles . append ( cycles_list [ i ] )
burn_ages . append ( age_list [ i ] )
burn_abun . append ( 1.e-9 )
burn_type . append ( 'C' )
# Ne - burning
necen = abund_list [ i ] [ specie_index [ 'Ne-20' ] ]
necennext = abund_list [ i + 1 ] [ specie_index [ 'Ne-20' ] ]
if hcen < 1.e-5 and hecen < 1.e-5 and ccen < 1.e-3 and necen > 1.e-10 :
if necennext < nemax - 0.003 and necen >= nemax - 0.003 :
burn_cycles . append ( cycles_list [ i ] )
burn_ages . append ( age_list [ i ] )
burn_abun . append ( necen )
burn_type . append ( 'Ne_start' )
if necennext < 1.e-1 and necen >= 1.e-1 :
burn_cycles . append ( cycles_list [ i ] )
burn_ages . append ( age_list [ i ] )
burn_abun . append ( 1.e-1 )
burn_type . append ( 'Ne' )
if necennext < 1.e-2 and necen >= 1.e-2 :
burn_cycles . append ( cycles_list [ i ] )
burn_ages . append ( age_list [ i ] )
burn_abun . append ( 1.e-2 )
burn_type . append ( 'Ne' )
if necennext < 1.e-3 and necen >= 1.e-3 :
burn_cycles . append ( cycles_list [ i ] )
burn_ages . append ( age_list [ i ] )
burn_abun . append ( 1.e-3 )
burn_type . append ( 'Ne_end' )
omax = abund_list [ i ] [ specie_index [ 'O-16' ] ]
if necennext < 1.e-4 and necen >= 1.e-4 :
burn_cycles . append ( cycles_list [ i ] )
burn_ages . append ( age_list [ i ] )
burn_abun . append ( 1.e-4 )
burn_type . append ( 'Ne' )
if necennext < 1.e-5 and necen >= 1.e-5 :
burn_cycles . append ( cycles_list [ i ] )
burn_ages . append ( age_list [ i ] )
burn_abun . append ( 1.e-5 )
burn_type . append ( 'Ne' )
if necennext < 1.e-6 and necen >= 1.e-6 :
burn_cycles . append ( cycles_list [ i ] )
burn_ages . append ( age_list [ i ] )
burn_abun . append ( 1.e-6 )
burn_type . append ( 'Ne' )
if necennext < 1.e-9 and necen >= 1.e-9 :
burn_cycles . append ( cycles_list [ i ] )
burn_ages . append ( age_list [ i ] )
burn_abun . append ( 1.e-9 )
burn_type . append ( 'Ne' )
# O - burning
ocen = abund_list [ i ] [ specie_index [ 'O-16' ] ]
ocennext = abund_list [ i + 1 ] [ specie_index [ 'O-16' ] ]
if hcen < 1.e-5 and hecen < 1.e-5 and ccen < 1.e-3 and ocen > 1.e-10 :
if ocennext < omax - 0.003 and ocen >= omax - 0.003 :
burn_cycles . append ( cycles_list [ i ] )
burn_ages . append ( age_list [ i ] )
burn_abun . append ( ccen )
burn_type . append ( 'O_start' )
if ocennext < 1.e-1 and ocen >= 1.e-1 :
burn_cycles . append ( cycles_list [ i ] )
burn_ages . append ( age_list [ i ] )
burn_abun . append ( 1.e-1 )
burn_type . append ( 'O' )
if ocennext < 1.e-2 and ocen >= 1.e-2 :
burn_cycles . append ( cycles_list [ i ] )
burn_ages . append ( age_list [ i ] )
burn_abun . append ( 1.e-2 )
burn_type . append ( 'O' )
if ocennext < 1.e-3 and ocen >= 1.e-3 :
burn_cycles . append ( cycles_list [ i ] )
burn_ages . append ( age_list [ i ] )
burn_abun . append ( 1.e-3 )
burn_type . append ( 'O' )
if ocennext < 1.e-4 and ocen >= 1.e-4 :
burn_cycles . append ( cycles_list [ i ] )
burn_ages . append ( age_list [ i ] )
burn_abun . append ( 1.e-4 )
burn_type . append ( 'O' )
if ocennext < 1.e-5 and ocen >= 1.e-5 :
burn_cycles . append ( cycles_list [ i ] )
burn_ages . append ( age_list [ i ] )
burn_abun . append ( 1.e-5 )
burn_type . append ( 'O_end' )
if ocennext < 1.e-6 and ocen >= 1.e-6 :
burn_cycles . append ( cycles_list [ i ] )
burn_ages . append ( age_list [ i ] )
burn_abun . append ( 1.e-6 )
burn_type . append ( 'O' )
if ocennext < 1.e-9 and ocen >= 1.e-9 :
burn_cycles . append ( cycles_list [ i ] )
burn_ages . append ( age_list [ i ] )
burn_abun . append ( 1.e-9 )
burn_type . append ( 'O' )
print ( 'passa 4' )
pair = False
age1 = - 1
for i in range ( len ( burn_type ) ) :
if 'start' in burn_type [ i ] and pair == False :
age1 = burn_ages [ i ]
pair = True
elif 'end' in burn_type [ i ] and pair == True :
age2 = burn_ages [ i ]
pair = False
if age1 != - 1 :
burn_lifetime . append ( age2 - age1 )
age1 = - 1
return [ burn_cycles , burn_ages , burn_abun , burn_type , burn_lifetime ]
|
def _deref ( tensor : tf . Tensor , index : tf . Tensor ) -> tf . Tensor :
"""Equivalent to ` tensor [ index , . . . ] ` .
This is a workaround for XLA requiring constant tensor indices . It works
by producing a node representing hardcoded instructions like the following :
if index = = 0 : return tensor [ 0]
if index = = 1 : return tensor [ 1]
if index = = 2 : return tensor [ 2]
if index = = n - 1 : return tensor [ n - 1]
This is acceptable as long as n * size ( tensor ) is negligible compared to
the rest of the computation ."""
|
assert tensor . shape [ 0 ] > 0
return _deref_helper ( lambda i : tensor [ i , ... ] , index , 0 , tensor . shape [ 0 ] - 1 )
|
def _crc16_checksum ( bytes ) :
"""Returns the CRC - 16 checksum of bytearray bytes
Ported from Java implementation at : http : / / introcs . cs . princeton . edu / java / 61data / CRC16CCITT . java . html
Initial value changed to 0x0000 to match Stellar configuration ."""
|
crc = 0x0000
polynomial = 0x1021
for byte in bytes :
for i in range ( 8 ) :
bit = ( byte >> ( 7 - i ) & 1 ) == 1
c15 = ( crc >> 15 & 1 ) == 1
crc <<= 1
if c15 ^ bit :
crc ^= polynomial
return crc & 0xFFFF
|
def AddTimeZoneOption ( self , argument_group ) :
"""Adds the time zone option to the argument group .
Args :
argument _ group ( argparse . _ ArgumentGroup ) : argparse argument group ."""
|
# Note the default here is None so we can determine if the time zone
# option was set .
argument_group . add_argument ( '-z' , '--zone' , '--timezone' , dest = 'timezone' , action = 'store' , type = str , default = None , help = ( 'explicitly define the timezone. Typically the timezone is ' 'determined automatically where possible otherwise it will ' 'default to UTC. Use "-z list" to see a list of available ' 'timezones.' ) )
|
def from_config ( cls , cp , ifo , section ) :
"""Read a config file to get calibration options and transfer
functions which will be used to intialize the model .
Parameters
cp : WorkflowConfigParser
An open config file .
ifo : string
The detector ( H1 , L1 ) for which the calibration model will
be loaded .
section : string
The section name in the config file from which to retrieve
the calibration options .
Return
instance
An instance of the Recalibrate class ."""
|
# read transfer functions
tfs = [ ]
tf_names = [ "a-tst" , "a-pu" , "c" , "d" ]
for tag in [ '-' . join ( [ ifo , "transfer-function" , name ] ) for name in tf_names ] :
tf_path = cp . get_opt_tag ( section , tag , None )
tfs . append ( cls . tf_from_file ( tf_path ) )
a_tst0 = tfs [ 0 ] [ : , 1 ]
a_pu0 = tfs [ 1 ] [ : , 1 ]
c0 = tfs [ 2 ] [ : , 1 ]
d0 = tfs [ 3 ] [ : , 1 ]
freq = tfs [ 0 ] [ : , 0 ]
# if upper stage actuation is included , read that in and add it
# to a _ pu0
uim_tag = '-' . join ( [ ifo , 'transfer-function-a-uim' ] )
if cp . has_option ( section , uim_tag ) :
tf_path = cp . get_opt_tag ( section , uim_tag , None )
a_pu0 += cls . tf_from_file ( tf_path ) [ : , 1 ]
# read fc0 , fs0 , and qinv0
fc0 = cp . get_opt_tag ( section , '-' . join ( [ ifo , "fc0" ] ) , None )
fs0 = cp . get_opt_tag ( section , '-' . join ( [ ifo , "fs0" ] ) , None )
qinv0 = cp . get_opt_tag ( section , '-' . join ( [ ifo , "qinv0" ] ) , None )
return cls ( freq = freq , fc0 = fc0 , c0 = c0 , d0 = d0 , a_tst0 = a_tst0 , a_pu0 = a_pu0 , fs0 = fs0 , qinv0 = qinv0 )
|
def export ( self , nidm_version , export_dir ) :
"""Create prov entities and activities ."""
|
# Create cvs file containing design matrix
np . savetxt ( os . path . join ( export_dir , self . csv_file ) , np . asarray ( self . matrix ) , delimiter = "," )
if nidm_version [ 'num' ] in [ "1.0.0" , "1.1.0" ] :
csv_location = Identifier ( "file://./" + self . csv_file )
else :
csv_location = Identifier ( self . csv_file )
attributes = [ ( PROV [ 'type' ] , self . type ) , ( PROV [ 'label' ] , self . label ) , ( NIDM_REGRESSOR_NAMES , json . dumps ( self . regressors ) ) , ( DCT [ 'format' ] , "text/csv" ) , ( NFO [ 'fileName' ] , self . filename ) , ( DC [ 'description' ] , self . image . id ) , ( PROV [ 'location' ] , csv_location ) ]
if self . hrf_models is not None :
if nidm_version [ 'num' ] in ( "1.0.0" , "1.1.0" ) :
if self . design_type is not None :
attributes . append ( ( NIDM_HAS_FMRI_DESIGN , self . design_type ) )
else :
warnings . warn ( "Design type is missing" )
# hrf model
for hrf_model in self . hrf_models :
attributes . append ( ( NIDM_HAS_HRF_BASIS , hrf_model ) )
# drift model
if self . drift_model is not None :
attributes . append ( ( NIDM_HAS_DRIFT_MODEL , self . drift_model . id ) )
# Create " design matrix " entity
self . add_attributes ( attributes )
|
def convert_ligatures ( text_string ) :
'''Coverts Latin character references within text _ string to their corresponding unicode characters
and returns converted string as type str .
Keyword argument :
- text _ string : string instance
Exceptions raised :
- InputError : occurs should a string or NoneType not be passed as an argument'''
|
if text_string is None or text_string == "" :
return ""
elif isinstance ( text_string , str ) :
for i in range ( 0 , len ( LIGATURES ) ) :
text_string = text_string . replace ( LIGATURES [ str ( i ) ] [ "ligature" ] , LIGATURES [ str ( i ) ] [ "term" ] )
return text_string
else :
raise InputError ( "none type or string not passed as an argument" )
|
def editpermissions_index_view ( self , request , forum_id = None ) :
"""Allows to select how to edit forum permissions .
The view displays a form to select a user or a group in order to edit its permissions for
the considered forum ."""
|
forum = get_object_or_404 ( Forum , pk = forum_id ) if forum_id else None
# Set up the context
context = self . get_forum_perms_base_context ( request , forum )
context [ 'forum' ] = forum
context [ 'title' ] = _ ( 'Forum permissions' ) if forum else _ ( 'Global forum permissions' )
# Handles " copy permission from " form
permissions_copied = False
if forum and request . method == 'POST' :
forum_form = PickForumForm ( request . POST )
if forum_form . is_valid ( ) and forum_form . cleaned_data [ 'forum' ] :
self . _copy_forum_permissions ( forum_form . cleaned_data [ 'forum' ] , forum )
self . message_user ( request , _ ( 'Permissions successfully copied' ) )
permissions_copied = True
context [ 'forum_form' ] = forum_form
elif forum :
context [ 'forum_form' ] = PickForumForm ( )
# Handles user or group selection
if request . method == 'POST' and not permissions_copied :
user_form = PickUserForm ( request . POST , admin_site = self . admin_site )
group_form = PickGroupForm ( request . POST , admin_site = self . admin_site )
if user_form . is_valid ( ) and group_form . is_valid ( ) :
user = user_form . cleaned_data . get ( 'user' , None ) if user_form . cleaned_data else None
anonymous_user = ( user_form . cleaned_data . get ( 'anonymous_user' , None ) if user_form . cleaned_data else None )
group = ( group_form . cleaned_data . get ( 'group' , None ) if group_form . cleaned_data else None )
if not user and not anonymous_user and not group :
user_form . _errors [ NON_FIELD_ERRORS ] = user_form . error_class ( [ _ ( 'Choose either a user ID, a group ID or the anonymous user' ) , ] )
elif user : # Redirect to user
url_kwargs = ( { 'forum_id' : forum . id , 'user_id' : user . id } if forum else { 'user_id' : user . id } )
return redirect ( reverse ( 'admin:forum_forum_editpermission_user' , kwargs = url_kwargs ) , )
elif anonymous_user : # Redirect to anonymous user
url_kwargs = { 'forum_id' : forum . id } if forum else { }
return redirect ( reverse ( 'admin:forum_forum_editpermission_anonymous_user' , kwargs = url_kwargs , ) , )
elif group : # Redirect to group
url_kwargs = ( { 'forum_id' : forum . id , 'group_id' : group . id } if forum else { 'group_id' : group . id } )
return redirect ( reverse ( 'admin:forum_forum_editpermission_group' , kwargs = url_kwargs ) , )
context [ 'user_errors' ] = helpers . AdminErrorList ( user_form , [ ] )
context [ 'group_errors' ] = helpers . AdminErrorList ( group_form , [ ] )
else :
user_form = PickUserForm ( admin_site = self . admin_site )
group_form = PickGroupForm ( admin_site = self . admin_site )
context [ 'user_form' ] = user_form
context [ 'group_form' ] = group_form
return render ( request , self . editpermissions_index_view_template_name , context )
|
def make_mecard ( name , reading = None , email = None , phone = None , videophone = None , memo = None , nickname = None , birthday = None , url = None , pobox = None , roomno = None , houseno = None , city = None , prefecture = None , zipcode = None , country = None ) :
"""Returns a QR Code which encodes a ` MeCard < https : / / en . wikipedia . org / wiki / MeCard > ` _
: param str name : Name . If it contains a comma , the first part
is treated as lastname and the second part is treated as forename .
: param str | None reading : Designates a text string to be set as the
kana name in the phonebook
: param str | iterable email : E - mail address . Multiple values are
allowed .
: param str | iterable phone : Phone number . Multiple values are
allowed .
: param str | iterable videophone : Phone number for video calls .
Multiple values are allowed .
: param str memo : A notice for the contact .
: param str nickname : Nickname .
: param str | int | date birthday : Birthday . If a string is provided ,
it should encode the date as YYYYMMDD value .
: param str | iterable url : Homepage . Multiple values are allowed .
: param str | None pobox : P . O . box ( address information ) .
: param str | None roomno : Room number ( address information ) .
: param str | None houseno : House number ( address information ) .
: param str | None city : City ( address information ) .
: param str | None prefecture : Prefecture ( address information ) .
: param str | None zipcode : Zip code ( address information ) .
: param str | None country : Country ( address information ) .
: rtype : segno . QRCode"""
|
return segno . make_qr ( make_mecard_data ( name = name , reading = reading , email = email , phone = phone , videophone = videophone , memo = memo , nickname = nickname , birthday = birthday , url = url , pobox = pobox , roomno = roomno , houseno = houseno , city = city , prefecture = prefecture , zipcode = zipcode , country = country ) )
|
def ensure_file ( path ) :
"""Checks if file exists , if fails , tries to create file"""
|
try :
exists = isfile ( path )
if not exists :
with open ( path , 'w+' ) as fname :
fname . write ( 'initialized' )
return ( True , path )
return ( True , 'exists' )
except OSError as e : # pragma : no cover
return ( False , e )
|
def find_required_filehandlers ( self , requirements , filename_info ) :
"""Find the necessary file handlers for the given requirements .
We assume here requirements are available .
Raises :
KeyError , if no handler for the given requirements is available .
RuntimeError , if there is a handler for the given requirements ,
but it doesn ' t match the filename info ."""
|
req_fh = [ ]
filename_info = set ( filename_info . items ( ) )
if requirements :
for requirement in requirements :
for fhd in self . file_handlers [ requirement ] :
if set ( fhd . filename_info . items ( ) ) . issubset ( filename_info ) :
req_fh . append ( fhd )
break
else :
raise RuntimeError ( "No matching requirement file of type " "{}" . format ( requirement ) )
# break everything and continue to next
# filetype !
return req_fh
|
def split_first ( s , delims ) :
"""Given a string and an iterable of delimiters , split on the first found
delimiter . Return two split parts and the matched delimiter .
If not found , then the first part is the full input string .
Example : :
> > > split _ first ( ' foo / bar ? baz ' , ' ? / = ' )
( ' foo ' , ' bar ? baz ' , ' / ' )
> > > split _ first ( ' foo / bar ? baz ' , ' 123 ' )
( ' foo / bar ? baz ' , ' ' , None )
Scales linearly with number of delims . Not ideal for large number of delims ."""
|
min_idx = None
min_delim = None
for d in delims :
idx = s . find ( d )
if idx < 0 :
continue
if min_idx is None or idx < min_idx :
min_idx = idx
min_delim = d
if min_idx is None or min_idx < 0 :
return s , '' , None
return s [ : min_idx ] , s [ min_idx + 1 : ] , min_delim
|
def translate_item_ids ( self , item_ids , language , is_nested = None ) :
"""Translate a list of item ids to JSON objects which reference them .
Args :
item _ ids ( list [ int ] ) : item ids
language ( str ) : language used for further filtering ( some objects
for different languages share the same item )
is _ nested ( function ) : mapping from item ids to booleans , where the
boolean value indicates whether the item is nested
Returns :
dict : item id - > JSON object"""
|
if is_nested is None :
def is_nested_fun ( x ) :
return True
elif isinstance ( is_nested , bool ) :
def is_nested_fun ( x ) :
return is_nested
else :
is_nested_fun = is_nested
all_item_type_ids = ItemType . objects . get_all_item_type_ids ( )
groupped = proso . list . group_by ( item_ids , by = lambda item_id : all_item_type_ids [ item_id ] )
result = { }
for item_type_id , items in groupped . items ( ) :
with timeit ( 'translating item type {}' . format ( item_type_id ) ) :
item_type = ItemType . objects . get_all_types ( ) [ item_type_id ]
model = ItemType . objects . get_model ( item_type_id )
kwargs = { '{}__in' . format ( item_type [ 'foreign_key' ] ) : items }
if 'language' in item_type :
kwargs [ item_type [ 'language' ] ] = language
if any ( [ not is_nested_fun ( item_id ) for item_id in items ] ) and hasattr ( model . objects , 'prepare_related' ) :
objs = model . objects . prepare_related ( )
elif hasattr ( model . objects , 'prepare' ) :
objs = model . objects . prepare ( )
else :
objs = model . objects
for obj in objs . filter ( ** kwargs ) :
item_id = getattr ( obj , item_type [ 'foreign_key' ] )
result [ item_id ] = obj . to_json ( nested = is_nested_fun ( item_id ) )
return result
|
def wallet_unlock ( self , wallet , password ) :
"""Unlocks * * wallet * * using * * password * *
: param wallet : Wallet to unlock
: type wallet : str
: param password : Password to enter
: type password : str
: raises : : py : exc : ` nano . rpc . RPCException `
> > > rpc . wallet _ unlock (
. . . wallet = " 000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F " ,
. . . password = " test "
True"""
|
wallet = self . _process_value ( wallet , 'wallet' )
payload = { "wallet" : wallet , "password" : password }
resp = self . call ( 'wallet_unlock' , payload )
return resp [ 'valid' ] == '1'
|
def build_url_parts ( self ) :
"""Set userinfo , host , port and anchor from self . urlparts .
Also checks for obfuscated IP addresses ."""
|
# check userinfo @ host : port syntax
self . userinfo , host = urllib . splituser ( self . urlparts [ 1 ] )
port = urlutil . default_ports . get ( self . scheme , 0 )
host , port = urlutil . splitport ( host , port = port )
if port is None :
raise LinkCheckerError ( _ ( "URL host %(host)r has invalid port" ) % { "host" : host } )
self . port = port
# set host lowercase
self . host = host . lower ( )
if self . scheme in scheme_requires_host :
if not self . host :
raise LinkCheckerError ( _ ( "URL has empty hostname" ) )
self . check_obfuscated_ip ( )
if not self . port or self . port == urlutil . default_ports . get ( self . scheme ) :
host = self . host
else :
host = "%s:%d" % ( self . host , self . port )
if self . userinfo :
self . urlparts [ 1 ] = "%s@%s" % ( self . userinfo , host )
else :
self . urlparts [ 1 ] = host
# safe anchor for later checking
self . anchor = self . urlparts [ 4 ]
if self . anchor is not None :
assert isinstance ( self . anchor , unicode ) , repr ( self . anchor )
|
def copy_remote_directory_to_local ( sftp , remote_path , local_path ) :
'''copy remote directory to local machine'''
|
try :
os . makedirs ( local_path , exist_ok = True )
files = sftp . listdir ( remote_path )
for file in files :
remote_full_path = os . path . join ( remote_path , file )
local_full_path = os . path . join ( local_path , file )
try :
if sftp . listdir ( remote_full_path ) :
copy_remote_directory_to_local ( sftp , remote_full_path , local_full_path )
except :
sftp . get ( remote_full_path , local_full_path )
except Exception :
pass
|
def delete_message ( self , msg_id , claim_id = None ) :
"""Deletes the message whose ID matches the supplied msg _ id from the
specified queue . If the message has been claimed , the ID of that claim
must be passed as the ' claim _ id ' parameter ."""
|
return self . _message_manager . delete ( msg_id , claim_id = claim_id )
|
def rename_acquisition ( self , plate_name , name , new_name ) :
'''Renames an acquisition .
Parameters
plate _ name : str
name of the parent plate
name : str
name of the acquisition that should be renamed
new _ name : str
name that should be given to the acquisition
See also
: func : ` tmserver . api . acquisition . update _ acquisition `
: class : ` tmlib . models . acquisition . Acquisition `'''
|
logger . info ( 'rename acquisistion "%s" of experiment "%s", plate "%s"' , name , self . experiment_name , plate_name )
content = { 'name' : new_name }
acquisition_id = self . _get_acquisition_id ( plate_name , name )
url = self . _build_api_url ( '/experiments/{experiment_id}/acquisitions/{acquisition_id}' . format ( experiment_id = self . _experiment_id , acquisition_id = acquisition_id ) )
res = self . _session . put ( url , json = content )
res . raise_for_status ( )
|
def _to_key_val_pairs ( defs ) :
"""Helper to split strings , lists and dicts into ( current , value ) tuples for accumulation"""
|
if isinstance ( defs , STRING_TYPES ) : # Convert ' a ' to [ ( ' a ' , None ) ] , or ' a . b . c ' to [ ( ' a ' , ' b . c ' ) ]
return [ defs . split ( '.' , 1 ) if '.' in defs else ( defs , None ) ]
else :
pairs = [ ]
# Convert collections of strings or lists as above ; break dicts into component items
pairs . extend ( p for s in defs if isinstance ( s , STRING_TYPES ) for p in _to_key_val_pairs ( s ) )
pairs . extend ( p for l in defs if isinstance ( l , list ) for p in _to_key_val_pairs ( l ) )
pairs . extend ( p for d in defs if isinstance ( d , dict ) for p in iteritems ( d ) )
return pairs
|
def call_status ( * args , ** kwargs ) :
'''Return the status of the lamps .
Options :
* * * id * * : Specifies a device ID . Can be a comma - separated values . All , if omitted .
CLI Example :
. . code - block : : bash
salt ' * ' hue . status
salt ' * ' hue . status id = 1
salt ' * ' hue . status id = 1,2,3'''
|
res = dict ( )
devices = _get_lights ( )
for dev_id in 'id' not in kwargs and sorted ( devices . keys ( ) ) or _get_devices ( kwargs ) :
dev_id = six . text_type ( dev_id )
res [ dev_id ] = { 'on' : devices [ dev_id ] [ 'state' ] [ 'on' ] , 'reachable' : devices [ dev_id ] [ 'state' ] [ 'reachable' ] }
return res
|
def implement ( self , implementation , for_type = None , for_types = None ) :
"""Registers an implementing function for for _ type .
Arguments :
implementation : Callable implementation for this type .
for _ type : The type this implementation applies to .
for _ types : Same as for _ type , but takes a tuple of types .
for _ type and for _ types cannot both be passed ( for obvious reasons . )
Raises :
ValueError"""
|
unbound_implementation = self . __get_unbound_function ( implementation )
for_types = self . __get_types ( for_type , for_types )
for t in for_types :
self . _write_lock . acquire ( )
try :
self . implementations . append ( ( t , unbound_implementation ) )
finally :
self . _write_lock . release ( )
|
def sub_state_by_gene_name ( self , * gene_names : str ) -> 'State' :
"""Create a sub state with only the gene passed in arguments .
Example
> > > state . sub _ state _ by _ gene _ name ( ' operon ' )
{ operon : 2}
> > > state . sub _ state _ by _ gene _ name ( ' mucuB ' )
{ mucuB : 0}"""
|
return State ( { gene : state for gene , state in self . items ( ) if gene . name in gene_names } )
|
def apply_experimental_design ( df , f , prefix = 'Intensity ' ) :
"""Load the experimental design template from MaxQuant and use it to apply the label names to the data columns .
: param df :
: param f : File path for the experimental design template
: param prefix :
: return : dt"""
|
df = df . copy ( )
edt = pd . read_csv ( f , sep = '\t' , header = 0 )
edt . set_index ( 'Experiment' , inplace = True )
new_column_labels = [ ]
for l in df . columns . values :
try :
l = edt . loc [ l . replace ( prefix , '' ) ] [ 'Name' ]
except ( IndexError , KeyError ) :
pass
new_column_labels . append ( l )
df . columns = new_column_labels
return df
|
def fhp_from_json_dict ( json_dict # type : Dict [ str , Any ]
) : # type : ( . . . ) - > FieldHashingProperties
"""Make a : class : ` FieldHashingProperties ` object from a dictionary .
: param dict json _ dict :
The dictionary must have have an ' ngram ' key
and one of k or num _ bits . It may have
' positional ' key ; if missing a default is used .
The encoding is
always set to the default value .
: return : A : class : ` FieldHashingProperties ` instance ."""
|
h = json_dict . get ( 'hash' , { 'type' : 'blakeHash' } )
num_bits = json_dict . get ( 'numBits' )
k = json_dict . get ( 'k' )
if not num_bits and not k :
num_bits = 200
# default for v2 schema
return FieldHashingProperties ( ngram = json_dict [ 'ngram' ] , positional = json_dict . get ( 'positional' , FieldHashingProperties . _DEFAULT_POSITIONAL ) , hash_type = h [ 'type' ] , prevent_singularity = h . get ( 'prevent_singularity' ) , num_bits = num_bits , k = k , missing_value = MissingValueSpec . from_json_dict ( json_dict [ 'missingValue' ] ) if 'missingValue' in json_dict else None )
|
def _validate_data ( self , data : dict ) :
"""Validates data against provider schema . Raises : class : ` ~ notifiers . exceptions . BadArguments ` if relevant
: param data : Data to validate
: raises : : class : ` ~ notifiers . exceptions . BadArguments `"""
|
log . debug ( "validating provided data" )
e = best_match ( self . validator . iter_errors ( data ) )
if e :
custom_error_key = f"error_{e.validator}"
msg = ( e . schema [ custom_error_key ] if e . schema . get ( custom_error_key ) else e . message )
raise BadArguments ( validation_error = msg , provider = self . name , data = data )
|
def pipe ( wrapped ) :
"""Decorator to create an SPL operator from a function .
A pipe SPL operator with a single input port and a single
output port . For each tuple on the input port the
function is called passing the contents of the tuple .
SPL attributes from the tuple are passed by position .
The value returned from the function results in
zero or more tuples being submitted to the operator output
port , see : ref : ` submit - from - python ` .
. . deprecated : : 1.8
Recommended to use : py : class : ` @ spl . map < map > ` instead ."""
|
if not inspect . isfunction ( wrapped ) :
raise TypeError ( 'A function is required' )
return _wrapforsplop ( _OperatorType . Pipe , wrapped , 'position' , False )
|
async def start ( self , host = None , port = None , * , path = None , family = socket . AF_UNSPEC , flags = socket . AI_PASSIVE , sock = None , backlog = 100 , ssl = None , reuse_address = None , reuse_port = None ) :
"""Coroutine to start the server .
: param host : can be a string , containing IPv4 / v6 address or domain name .
If host is None , server will be bound to all available interfaces .
: param port : port number .
: param path : UNIX domain socket path . If specified , host and port should
be omitted ( must be None ) .
: param family : can be set to either : py : data : ` python : socket . AF _ INET ` or
: py : data : ` python : socket . AF _ INET6 ` to force the socket to use IPv4 or
IPv6 . If not set it will be determined from host .
: param flags : is a bitmask for
: py : meth : ` ~ python : asyncio . AbstractEventLoop . getaddrinfo ` .
: param sock : sock can optionally be specified in order to use a
preexisting socket object . If specified , host and port should be
omitted ( must be None ) .
: param backlog : is the maximum number of queued connections passed to
listen ( ) .
: param ssl : can be set to an : py : class : ` ~ python : ssl . SSLContext `
to enable SSL over the accepted connections .
: param reuse _ address : tells the kernel to reuse a local socket in
TIME _ WAIT state , without waiting for its natural timeout to expire .
: param reuse _ port : tells the kernel to allow this endpoint to be bound
to the same port as other existing endpoints are bound to ,
so long as they all set this flag when being created ."""
|
if path is not None and ( host is not None or port is not None ) :
raise ValueError ( "The 'path' parameter can not be used with the " "'host' or 'port' parameters." )
if self . _server is not None :
raise RuntimeError ( 'Server is already started' )
if path is not None :
self . _server = await self . _loop . create_unix_server ( self . _protocol_factory , path , sock = sock , backlog = backlog , ssl = ssl )
else :
self . _server = await self . _loop . create_server ( self . _protocol_factory , host , port , family = family , flags = flags , sock = sock , backlog = backlog , ssl = ssl , reuse_address = reuse_address , reuse_port = reuse_port )
|
def _compute_margin ( self ) :
"""Compute graph margins from set texts"""
|
self . _legend_at_left_width = 0
for series_group in ( self . series , self . secondary_series ) :
if self . show_legend and series_group :
h , w = get_texts_box ( map ( lambda x : truncate ( x , self . truncate_legend or 15 ) , [ serie . title [ 'title' ] if isinstance ( serie . title , dict ) else serie . title or '' for serie in series_group ] ) , self . style . legend_font_size )
if self . legend_at_bottom :
h_max = max ( h , self . legend_box_size )
cols = ( self . _order // self . legend_at_bottom_columns if self . legend_at_bottom_columns else ceil ( sqrt ( self . _order ) ) or 1 )
self . margin_box . bottom += self . spacing + h_max * round ( cols - 1 ) * 1.5 + h_max
else :
if series_group is self . series :
legend_width = self . spacing + w + self . legend_box_size
self . margin_box . left += legend_width
self . _legend_at_left_width += legend_width
else :
self . margin_box . right += ( self . spacing + w + self . legend_box_size )
self . _x_labels_height = 0
if ( self . _x_labels or self . _x_2nd_labels ) and self . show_x_labels :
for xlabels in ( self . _x_labels , self . _x_2nd_labels ) :
if xlabels :
h , w = get_texts_box ( map ( lambda x : truncate ( x , self . truncate_label or 25 ) , cut ( xlabels ) ) , self . style . label_font_size )
self . _x_labels_height = self . spacing + max ( w * abs ( sin ( rad ( self . x_label_rotation ) ) ) , h )
if xlabels is self . _x_labels :
self . margin_box . bottom += self . _x_labels_height
else :
self . margin_box . top += self . _x_labels_height
if self . x_label_rotation :
if self . x_label_rotation % 180 < 90 :
self . margin_box . right = max ( w * abs ( cos ( rad ( self . x_label_rotation ) ) ) , self . margin_box . right )
else :
self . margin_box . left = max ( w * abs ( cos ( rad ( self . x_label_rotation ) ) ) , self . margin_box . left )
if self . show_y_labels :
for ylabels in ( self . _y_labels , self . _y_2nd_labels ) :
if ylabels :
h , w = get_texts_box ( cut ( ylabels ) , self . style . label_font_size )
if ylabels is self . _y_labels :
self . margin_box . left += self . spacing + max ( w * abs ( cos ( rad ( self . y_label_rotation ) ) ) , h )
else :
self . margin_box . right += self . spacing + max ( w * abs ( cos ( rad ( self . y_label_rotation ) ) ) , h )
self . _title = split_title ( self . title , self . width , self . style . title_font_size )
if self . title :
h , _ = get_text_box ( self . _title [ 0 ] , self . style . title_font_size )
self . margin_box . top += len ( self . _title ) * ( self . spacing + h )
self . _x_title = split_title ( self . x_title , self . width - self . margin_box . x , self . style . title_font_size )
self . _x_title_height = 0
if self . _x_title :
h , _ = get_text_box ( self . _x_title [ 0 ] , self . style . title_font_size )
height = len ( self . _x_title ) * ( self . spacing + h )
self . margin_box . bottom += height
self . _x_title_height = height + self . spacing
self . _y_title = split_title ( self . y_title , self . height - self . margin_box . y , self . style . title_font_size )
self . _y_title_height = 0
if self . _y_title :
h , _ = get_text_box ( self . _y_title [ 0 ] , self . style . title_font_size )
height = len ( self . _y_title ) * ( self . spacing + h )
self . margin_box . left += height
self . _y_title_height = height + self . spacing
# Inner margin
if self . print_values_position == 'top' :
gh = self . height - self . margin_box . y
alpha = 1.1 * ( self . style . value_font_size / gh ) * self . _box . height
if self . _max and self . _max > 0 :
self . _box . ymax += alpha
if self . _min and self . _min < 0 :
self . _box . ymin -= alpha
|
def _update_internal_data_base ( self ) :
"""Updates Internal combo knowledge for any actual transition by calling get _ possible _ combos _ for _ transition -
function for those ."""
|
model = self . model
# print ( " clean data base " )
# # # FOR COMBOS
# internal transitions
# - take all internal states
# - take all not used internal outcomes of this states
# external transitions
# - take all external states
# - take all external outcomes
# - take all not used own outcomes
# # # LINKING
# internal - > transition _ id - > from _ state = outcome combos
# external - > state - > outcome combos
self . combo [ 'internal' ] = { }
self . combo [ 'external' ] = { }
self . combo [ 'free_from_states' ] = { }
self . combo [ 'free_from_outcomes_dict' ] = { }
self . combo [ 'free_ext_from_outcomes_dict' ] = { }
self . combo [ 'free_ext_from_outcomes_dict' ] = { }
if isinstance ( model , ContainerStateModel ) : # check for internal combos
for transition_id , transition in model . state . transitions . items ( ) :
self . combo [ 'internal' ] [ transition_id ] = { }
[ from_state_combo , from_outcome_combo , to_state_combo , to_outcome_combo , free_from_states , free_from_outcomes_dict ] = self . get_possible_combos_for_transition ( transition , self . model , self . model )
self . combo [ 'internal' ] [ transition_id ] [ 'from_state' ] = from_state_combo
self . combo [ 'internal' ] [ transition_id ] [ 'from_outcome' ] = from_outcome_combo
self . combo [ 'internal' ] [ transition_id ] [ 'to_state' ] = to_state_combo
self . combo [ 'internal' ] [ transition_id ] [ 'to_outcome' ] = to_outcome_combo
self . combo [ 'free_from_states' ] = free_from_states
self . combo [ 'free_from_outcomes_dict' ] = free_from_outcomes_dict
if not model . state . transitions :
[ x , y , z , v , free_from_states , free_from_outcomes_dict ] = self . get_possible_combos_for_transition ( None , self . model , self . model )
self . combo [ 'free_from_states' ] = free_from_states
self . combo [ 'free_from_outcomes_dict' ] = free_from_outcomes_dict
# TODO check why the can happen should not be handed always the LibraryStateModel
if not ( self . model . state . is_root_state or self . model . state . is_root_state_of_library ) : # check for external combos
for transition_id , transition in model . parent . state . transitions . items ( ) :
if transition . from_state == model . state . state_id or transition . to_state == model . state . state_id :
self . combo [ 'external' ] [ transition_id ] = { }
[ from_state_combo , from_outcome_combo , to_state_combo , to_outcome_combo , free_from_states , free_from_outcomes_dict ] = self . get_possible_combos_for_transition ( transition , self . model . parent , self . model , True )
self . combo [ 'external' ] [ transition_id ] [ 'from_state' ] = from_state_combo
self . combo [ 'external' ] [ transition_id ] [ 'from_outcome' ] = from_outcome_combo
self . combo [ 'external' ] [ transition_id ] [ 'to_state' ] = to_state_combo
self . combo [ 'external' ] [ transition_id ] [ 'to_outcome' ] = to_outcome_combo
self . combo [ 'free_ext_from_states' ] = free_from_states
self . combo [ 'free_ext_from_outcomes_dict' ] = free_from_outcomes_dict
if not model . parent . state . transitions :
[ x , y , z , v , free_from_states , free_from_outcomes_dict ] = self . get_possible_combos_for_transition ( None , self . model . parent , self . model , True )
self . combo [ 'free_ext_from_states' ] = free_from_states
self . combo [ 'free_ext_from_outcomes_dict' ] = free_from_outcomes_dict
|
def publish_schedule_length ( self , redis_client , port , db ) :
""": param redis _ client : Redis client
: param db : Redis Database index
: param port : Redis port
: return : Redis schedule length"""
|
schedule_length = redis_client . zcard ( 'schedule' )
self . __publish ( port , db , 'schedule' , schedule_length )
|
def add_dplist_permission_for_user_on_portal ( self , user_email , portal_id ) :
"""Adds the ' d _ p _ list ' permission to a user object when provided
a user _ email and portal _ id ."""
|
_id = self . get_user_id_from_email ( user_email )
print ( self . get_user_permission_from_email ( user_email ) )
retval = self . add_user_permission ( _id , json . dumps ( [ { 'access' : 'd_p_list' , 'oid' : { 'id' : portal_id , 'type' : 'Portal' } } ] ) )
print ( self . get_user_permission_from_email ( user_email ) )
return retval
|
def log_uniform_prior ( value , umin = 0 , umax = None ) :
"""Log - uniform prior distribution ."""
|
if value > 0 and value >= umin :
if umax is not None :
if value <= umax :
return 1 / value
else :
return - np . inf
else :
return 1 / value
else :
return - np . inf
|
def address_to_coords ( self , address ) :
"""Convert address to coordinates"""
|
base_coords = self . BASE_COORDS [ self . region ]
get_cord = self . COORD_SERVERS [ self . region ]
url_options = { "q" : address , "lang" : "eng" , "origin" : "livemap" , "lat" : base_coords [ "lat" ] , "lon" : base_coords [ "lon" ] }
response = requests . get ( self . WAZE_URL + get_cord , params = url_options , headers = self . HEADERS )
for response_json in response . json ( ) :
if response_json . get ( 'city' ) :
lat = response_json [ 'location' ] [ 'lat' ]
lon = response_json [ 'location' ] [ 'lon' ]
bounds = response_json [ 'bounds' ]
# sometimes the coords don ' t match up
if bounds is not None :
bounds [ 'top' ] , bounds [ 'bottom' ] = max ( bounds [ 'top' ] , bounds [ 'bottom' ] ) , min ( bounds [ 'top' ] , bounds [ 'bottom' ] )
bounds [ 'left' ] , bounds [ 'right' ] = min ( bounds [ 'left' ] , bounds [ 'right' ] ) , max ( bounds [ 'left' ] , bounds [ 'right' ] )
else :
bounds = { }
return { "lat" : lat , "lon" : lon , "bounds" : bounds }
raise WRCError ( "Cannot get coords for %s" % address )
|
def get ( self , query_path = None , return_type = list , preceding_depth = None , throw_null_return_error = False ) :
"""Traverses the list of query paths to find the data requested
: param query _ path : ( list ( str ) , str ) , list of query path branches or query string
Default behavior : returns list ( str ) of possible config headers
: param return _ type : ( list , str , dict , OrderedDict ) , desired return type for the data
: param preceding _ depth : int , returns a dictionary containing the data that traces back up the path for x depth
-1 : for the full traversal back up the path
None : is default for no traversal
: param throw _ null _ return _ error : bool , whether or not to throw an error if we get an empty result but no error
: return : ( list , str , dict , OrderedDict ) , the type specified from return _ type
: raises : exceptions . ResourceNotFoundError : if the query path is invalid"""
|
function_type_lookup = { str : self . _get_path_entry_from_string , list : self . _get_path_entry_from_list }
if query_path is None :
return self . _default_config ( return_type )
try :
config_entry = function_type_lookup . get ( type ( query_path ) , str ) ( query_path )
query_result = self . config_entry_handler . format_query_result ( config_entry , query_path , return_type = return_type , preceding_depth = preceding_depth )
return query_result
except IndexError :
return return_type ( )
|
def level_is_between ( level , min_level_value , max_level_value ) :
"""Returns True if level is between the specified min or max , inclusive ."""
|
level_value = get_level_value ( level )
if level_value is None : # unknown level value
return False
return level_value >= min_level_value and level_value <= max_level_value
|
def remove ( self , key , glob = False ) :
"""Remove key value pair in a local or global namespace ."""
|
ns = self . namespace ( key , glob )
try :
self . keyring . delete_password ( ns , key )
except PasswordDeleteError : # OSX and gnome have no delete method
self . set ( key , '' , glob )
|
def make_hash ( o ) :
r"""Makes a hash from a dictionary , list , tuple or set to any level , that
contains only other hashable types ( including any lists , tuples , sets , and
dictionaries ) . In the case where other kinds of objects ( like classes ) need
to be hashed , pass in a collection of object attributes that are pertinent .
For example , a class can be hashed in this fashion :
make _ hash ( [ cls . _ _ dict _ _ , cls . _ _ name _ _ ] )
A function can be hashed like so :
make _ hash ( [ fn . _ _ dict _ _ , fn . _ _ code _ _ ] )
References :
http : / / stackoverflow . com / questions / 5884066 / hashing - a - python - dictionary"""
|
if type ( o ) == DictProxyType :
o2 = { }
for k , v in o . items ( ) :
if not k . startswith ( "__" ) :
o2 [ k ] = v
o = o2
if isinstance ( o , ( set , tuple , list ) ) :
return tuple ( [ make_hash ( e ) for e in o ] )
elif not isinstance ( o , dict ) :
return hash ( o )
new_o = copy . deepcopy ( o )
for k , v in new_o . items ( ) :
new_o [ k ] = make_hash ( v )
return hash ( tuple ( frozenset ( sorted ( new_o . items ( ) ) ) ) )
|
def Parse ( self , stat , file_object , knowledge_base ) :
"""Parse the passwd file ."""
|
_ , _ = stat , knowledge_base
lines = [ l . strip ( ) for l in utils . ReadFileBytesAsUnicode ( file_object ) . splitlines ( ) ]
for index , line in enumerate ( lines ) :
user = self . ParseLine ( index , line )
if user is not None :
yield user
|
def rotate_scale ( im , angle , scale , borderValue = 0 , interp = cv2 . INTER_CUBIC ) :
"""Rotates and scales the image
Parameters
im : 2d array
The image
angle : number
The angle , in radians , to rotate
scale : positive number
The scale factor
borderValue : number , default 0
The value for the pixels outside the border ( default 0)
Returns
im : 2d array
the rotated and scaled image
Notes
The output image has the same size as the input .
Therefore the image may be cropped in the process ."""
|
im = np . asarray ( im , dtype = np . float32 )
rows , cols = im . shape
M = cv2 . getRotationMatrix2D ( ( cols / 2 , rows / 2 ) , - angle * 180 / np . pi , 1 / scale )
im = cv2 . warpAffine ( im , M , ( cols , rows ) , borderMode = cv2 . BORDER_CONSTANT , flags = interp , borderValue = borderValue )
# REPLICATE
return im
|
def load_topology ( path ) :
"""Open a topology file , patch it for last GNS3 release and return it"""
|
log . debug ( "Read topology %s" , path )
try :
with open ( path , encoding = "utf-8" ) as f :
topo = json . load ( f )
except ( OSError , UnicodeDecodeError , ValueError ) as e :
raise aiohttp . web . HTTPConflict ( text = "Could not load topology {}: {}" . format ( path , str ( e ) ) )
if topo . get ( "revision" , 0 ) > GNS3_FILE_FORMAT_REVISION :
raise aiohttp . web . HTTPConflict ( text = "This project is designed for a more recent version of GNS3 please update GNS3 to version {} or later" . format ( topo [ "version" ] ) )
changed = False
if "revision" not in topo or topo [ "revision" ] < GNS3_FILE_FORMAT_REVISION : # If it ' s an old GNS3 file we need to convert it
# first we backup the file
try :
shutil . copy ( path , path + ".backup{}" . format ( topo . get ( "revision" , 0 ) ) )
except ( OSError ) as e :
raise aiohttp . web . HTTPConflict ( text = "Can't write backup of the topology {}: {}" . format ( path , str ( e ) ) )
changed = True
if "revision" not in topo or topo [ "revision" ] < 5 :
topo = _convert_1_3_later ( topo , path )
# Version before GNS3 2.0 alpha 4
if topo [ "revision" ] < 6 :
topo = _convert_2_0_0_alpha ( topo , path )
# Version before GNS3 2.0 beta 3
if topo [ "revision" ] < 7 :
topo = _convert_2_0_0_beta_2 ( topo , path )
# Version before GNS3 2.1
if topo [ "revision" ] < 8 :
topo = _convert_2_0_0 ( topo , path )
try :
_check_topology_schema ( topo )
except aiohttp . web . HTTPConflict as e :
log . error ( "Can't load the topology %s" , path )
raise e
if changed :
try :
with open ( path , "w+" , encoding = "utf-8" ) as f :
json . dump ( topo , f , indent = 4 , sort_keys = True )
except ( OSError ) as e :
raise aiohttp . web . HTTPConflict ( text = "Can't write the topology {}: {}" . format ( path , str ( e ) ) )
return topo
|
def get_members ( self , offset = None , limit = None ) :
"""Fetch team members for current team .
: param offset : Pagination offset .
: param limit : Pagination limit .
: return : Collection object ."""
|
extra = { 'resource' : self . __class__ . __name__ , 'query' : { 'id' : self . id } }
logger . info ( 'Get team members' , extra = extra )
response = self . _api . get ( url = self . _URL [ 'members_query' ] . format ( id = self . id ) , params = { 'offset' : offset , 'limit' : limit } )
data = response . json ( )
total = response . headers [ 'x-total-matching-query' ]
members = [ TeamMember ( api = self . _api , ** member ) for member in data [ 'items' ] ]
links = [ Link ( ** link ) for link in data [ 'links' ] ]
href = data [ 'href' ]
return Collection ( resource = TeamMember , href = href , total = total , items = members , links = links , api = self . _api )
|
def parse_subdomain_record ( domain_name , rec , block_height , parent_zonefile_hash , parent_zonefile_index , zonefile_offset , txid , domain_zonefiles_missing , resolver = None ) :
"""Parse a subdomain record , and verify its signature .
@ domain _ name : the stem name
@ rec : the parsed zone file , with ' txt ' records
Returns a Subdomain object on success
Raises an exception on parse error"""
|
# sanity check : need ' txt ' record list
txt_entry = rec [ 'txt' ]
if not isinstance ( txt_entry , list ) :
raise ParseError ( "Tried to parse a TXT record with only a single <character-string>" )
entries = { }
# parts of the subdomain record
for item in txt_entry : # coerce string
if isinstance ( item , unicode ) :
item = str ( item )
key , value = item . split ( '=' , 1 )
value = value . replace ( '\\=' , '=' )
# escape ' = '
if key in entries :
raise ParseError ( "Duplicate TXT entry '{}'" . format ( key ) )
entries [ key ] = value
pubkey = entries [ SUBDOMAIN_PUBKEY ]
n = entries [ SUBDOMAIN_N ]
if SUBDOMAIN_SIG in entries :
sig = entries [ SUBDOMAIN_SIG ]
else :
sig = None
try :
zonefile_parts = int ( entries [ SUBDOMAIN_ZF_PARTS ] )
except ValueError :
raise ParseError ( "Not an int (SUBDOMAIN_ZF_PARTS)" )
try :
n = int ( n )
except ValueError :
raise ParseError ( "Not an int (SUBDOMAIN_N)" )
b64_zonefile = "" . join ( [ entries [ SUBDOMAIN_ZF_PIECE % zf_index ] for zf_index in range ( zonefile_parts ) ] )
is_subdomain , _ , _ = is_address_subdomain ( rec [ 'name' ] )
subd_name = None
if not is_subdomain : # not a fully - qualified subdomain , which means it ends with this domain name
try :
assert is_name_valid ( str ( domain_name ) ) , domain_name
subd_name = str ( rec [ 'name' ] + '.' + domain_name )
assert is_address_subdomain ( subd_name ) [ 0 ] , subd_name
except AssertionError as ae :
if BLOCKSTACK_DEBUG :
log . exception ( ae )
raise ParseError ( "Invalid names: {}" . format ( ae ) )
else : # already fully - qualified
subd_name = rec [ 'name' ]
return Subdomain ( str ( subd_name ) , str ( domain_name ) , str ( pubkey ) , int ( n ) , base64 . b64decode ( b64_zonefile ) , str ( sig ) , block_height , parent_zonefile_hash , parent_zonefile_index , zonefile_offset , txid , domain_zonefiles_missing = domain_zonefiles_missing , resolver = resolver )
|
def to_unicode_from_fs ( string ) :
"""Return a unicode version of string decoded using the file system encoding ."""
|
if not is_string ( string ) : # string is a QString
string = to_text_string ( string . toUtf8 ( ) , 'utf-8' )
else :
if is_binary_string ( string ) :
try :
unic = string . decode ( FS_ENCODING )
except ( UnicodeError , TypeError ) :
pass
else :
return unic
return string
|
def size ( self ) :
"""The size of the element ."""
|
size = { }
if self . _w3c :
size = self . _execute ( Command . GET_ELEMENT_RECT ) [ 'value' ]
else :
size = self . _execute ( Command . GET_ELEMENT_SIZE ) [ 'value' ]
new_size = { "height" : size [ "height" ] , "width" : size [ "width" ] }
return new_size
|
def add ( name , default_for_unspecified , total_resource_slots , max_concurrent_sessions , max_containers_per_session , max_vfolder_count , max_vfolder_size , idle_timeout , allowed_vfolder_hosts ) :
'''Add a new keypair resource policy .
NAME : NAME of a new keypair resource policy .'''
|
with Session ( ) as session :
try :
data = session . ResourcePolicy . create ( name , default_for_unspecified = default_for_unspecified , total_resource_slots = total_resource_slots , max_concurrent_sessions = max_concurrent_sessions , max_containers_per_session = max_containers_per_session , max_vfolder_count = max_vfolder_count , max_vfolder_size = max_vfolder_size , idle_timeout = idle_timeout , allowed_vfolder_hosts = allowed_vfolder_hosts , )
except Exception as e :
print_error ( e )
sys . exit ( 1 )
if not data [ 'ok' ] :
print_fail ( 'KeyPair Resource Policy creation has failed: {0}' . format ( data [ 'msg' ] ) )
sys . exit ( 1 )
item = data [ 'resource_policy' ]
print ( 'Keypair resource policy ' + item [ 'name' ] + ' is created.' )
|
def get_subdomain ( url ) :
"""Get the subdomain of the given URL .
Args :
url ( str ) : The URL to get the subdomain from .
Returns :
str : The subdomain ( s )"""
|
if url not in URLHelper . __cache :
URLHelper . __cache [ url ] = urlparse ( url )
return "." . join ( URLHelper . __cache [ url ] . netloc . split ( "." ) [ : - 2 ] )
|
def is_valid_vpnv6_prefix ( prefix ) :
"""Returns True if given prefix is a string represent vpnv6 prefix .
Vpnv6 prefix is made up of RD : Ipv6 , where RD is represents route
distinguisher and Ipv6 represents valid colon hexadecimal notation string ."""
|
if not isinstance ( prefix , str ) :
return False
# Split the prefix into route distinguisher and IP
tokens = prefix . split ( ':' , 2 )
if len ( tokens ) != 3 :
return False
# Validate route distinguisher
if not is_valid_route_dist ( ':' . join ( [ tokens [ 0 ] , tokens [ 1 ] ] ) ) :
return False
# Validate IPv6 prefix and return
return is_valid_ipv6_prefix ( tokens [ 2 ] )
|
def close_temp_file ( self ) :
"""< Purpose >
Closes the temporary file object . ' close _ temp _ file ' mimics usual
file . close ( ) , however temporary file destroys itself when
' close _ temp _ file ' is called . Further if compression is set , second
temporary file instance ' self . _ orig _ file ' is also closed so that no open
temporary files are left open .
< Arguments >
None .
< Exceptions >
None .
< Side Effects >
Closes ' self . _ orig _ file ' .
< Return >
None ."""
|
self . temporary_file . close ( )
# If compression has been set , we need to explicitly close the original
# file object .
if self . _orig_file is not None :
self . _orig_file . close ( )
|
def serialize_break ( ctx , document , elem , root ) :
"Serialize break element ."
|
if elem . break_type == u'textWrapping' :
_div = etree . SubElement ( root , 'br' )
else :
_div = etree . SubElement ( root , 'span' )
if ctx . options [ 'embed_styles' ] :
_div . set ( 'style' , 'page-break-after: always;' )
fire_hooks ( ctx , document , elem , _div , ctx . get_hook ( 'page_break' ) )
return root
|
def lookup_asset_types ( self , sids ) :
"""Retrieve asset types for a list of sids .
Parameters
sids : list [ int ]
Returns
types : dict [ sid - > str or None ]
Asset types for the provided sids ."""
|
found = { }
missing = set ( )
for sid in sids :
try :
found [ sid ] = self . _asset_type_cache [ sid ]
except KeyError :
missing . add ( sid )
if not missing :
return found
router_cols = self . asset_router . c
for assets in group_into_chunks ( missing ) :
query = sa . select ( ( router_cols . sid , router_cols . asset_type ) ) . where ( self . asset_router . c . sid . in_ ( map ( int , assets ) ) )
for sid , type_ in query . execute ( ) . fetchall ( ) :
missing . remove ( sid )
found [ sid ] = self . _asset_type_cache [ sid ] = type_
for sid in missing :
found [ sid ] = self . _asset_type_cache [ sid ] = None
return found
|
def AddActiveConnection ( self , devices , connection_device , specific_object , name , state ) :
'''Add an active connection to an existing WiFi device .
You have to a list of the involved WiFi devices , the connection path ,
the access point path , ActiveConnection object name and connection
state .
Please note that this does not set any global properties .
Returns the new object path .'''
|
conn_obj = dbusmock . get_object ( connection_device )
settings = conn_obj . settings
conn_uuid = settings [ 'connection' ] [ 'uuid' ]
conn_type = settings [ 'connection' ] [ 'type' ]
device_objects = [ dbus . ObjectPath ( dev ) for dev in devices ]
active_connection_path = '/org/freedesktop/NetworkManager/ActiveConnection/' + name
self . AddObject ( active_connection_path , ACTIVE_CONNECTION_IFACE , { 'Devices' : dbus . Array ( device_objects , signature = 'o' ) , 'Default6' : False , 'Default' : True , 'Type' : conn_type , 'Vpn' : ( conn_type == 'vpn' ) , 'Connection' : dbus . ObjectPath ( connection_device ) , 'Master' : dbus . ObjectPath ( '/' ) , 'SpecificObject' : dbus . ObjectPath ( specific_object ) , 'Uuid' : conn_uuid , 'State' : dbus . UInt32 ( state ) , } , [ ] )
for dev_path in devices :
self . SetDeviceActive ( dev_path , active_connection_path )
self . object_manager_emit_added ( active_connection_path )
NM = dbusmock . get_object ( MANAGER_OBJ )
active_connections = NM . Get ( MANAGER_IFACE , 'ActiveConnections' )
active_connections . append ( dbus . ObjectPath ( active_connection_path ) )
NM . SetProperty ( MANAGER_OBJ , MANAGER_IFACE , 'ActiveConnections' , active_connections )
return active_connection_path
|
def feed_forward_gaussian ( config , action_space , observations , unused_length , state = None ) :
"""Independent feed forward networks for policy and value .
The policy network outputs the mean action and the standard deviation is
learned as independent parameter vector .
Args :
config : Configuration object .
action _ space : Action space of the environment .
observations : Sequences of observations .
unused _ length : Batch of sequence lengths .
state : Unused batch of initial states .
Raises :
ValueError : Unexpected action space .
Returns :
Attribute dictionary containing the policy , value , and unused state ."""
|
if not isinstance ( action_space , gym . spaces . Box ) :
raise ValueError ( 'Network expects continuous actions.' )
if not len ( action_space . shape ) == 1 :
raise ValueError ( 'Network only supports 1D action vectors.' )
action_size = action_space . shape [ 0 ]
init_output_weights = tf . contrib . layers . variance_scaling_initializer ( factor = config . init_output_factor )
before_softplus_std_initializer = tf . constant_initializer ( np . log ( np . exp ( config . init_std ) - 1 ) )
flat_observations = tf . reshape ( observations , [ tf . shape ( observations ) [ 0 ] , tf . shape ( observations ) [ 1 ] , functools . reduce ( operator . mul , observations . shape . as_list ( ) [ 2 : ] , 1 ) ] )
with tf . variable_scope ( 'policy' ) :
x = flat_observations
for size in config . policy_layers :
x = tf . contrib . layers . fully_connected ( x , size , tf . nn . relu )
mean = tf . contrib . layers . fully_connected ( x , action_size , tf . tanh , weights_initializer = init_output_weights )
std = tf . nn . softplus ( tf . get_variable ( 'before_softplus_std' , mean . shape [ 2 : ] , tf . float32 , before_softplus_std_initializer ) )
std = tf . tile ( std [ None , None ] , [ tf . shape ( mean ) [ 0 ] , tf . shape ( mean ) [ 1 ] ] + [ 1 ] * ( mean . shape . ndims - 2 ) )
with tf . variable_scope ( 'value' ) :
x = flat_observations
for size in config . value_layers :
x = tf . contrib . layers . fully_connected ( x , size , tf . nn . relu )
value = tf . contrib . layers . fully_connected ( x , 1 , None ) [ ... , 0 ]
mean = tf . check_numerics ( mean , 'mean' )
std = tf . check_numerics ( std , 'std' )
value = tf . check_numerics ( value , 'value' )
policy = CustomKLDiagNormal ( mean , std )
return agents . tools . AttrDict ( policy = policy , value = value , state = state )
|
def read_file ( self , path , ** kwargs ) :
"""Read file input into memory , returning deserialized objects
: param path : Path of file to read
: param \ * * kwargs :
* ignore ( ` ` list ` ` ) : List of file patterns to ignore"""
|
# TODO support JSON here
# TODO sphinx way of reporting errors in logs ?
parser_command = [ "godocjson" ]
_ignore = kwargs . get ( "ignore" )
if _ignore :
parser_command . extend ( [ "-e" , "{0}" . format ( "|" . join ( _ignore ) ) ] )
parser_command . append ( path )
try :
parsed_data = json . loads ( subprocess . check_output ( parser_command ) )
return parsed_data
except IOError :
LOGGER . warning ( "Error reading file: {0}" . format ( path ) )
except TypeError :
LOGGER . warning ( "Error reading file: {0}" . format ( path ) )
return None
|
def compose ( composite_property_s , component_properties_s ) :
"""Sets the components of the given composite property .
All parameters are < feature > value strings"""
|
from . import property
component_properties_s = to_seq ( component_properties_s )
composite_property = property . create_from_string ( composite_property_s )
f = composite_property . feature
if len ( component_properties_s ) > 0 and isinstance ( component_properties_s [ 0 ] , property . Property ) :
component_properties = component_properties_s
else :
component_properties = [ property . create_from_string ( p ) for p in component_properties_s ]
if not f . composite :
raise BaseException ( "'%s' is not a composite feature" % f )
if property in __composite_properties :
raise BaseException ( 'components of "%s" already set: %s' % ( composite_property , str ( __composite_properties [ composite_property ] ) ) )
if composite_property in component_properties :
raise BaseException ( 'composite property "%s" cannot have itself as a component' % composite_property )
__composite_properties [ composite_property ] = component_properties
|
def checkversion ( version ) :
"""Checks foliadocserve version , returns 1 if the document is newer than the library , - 1 if it is older , 0 if it is equal"""
|
try :
for refversion , responseversion in zip ( [ int ( x ) for x in REQUIREFOLIADOCSERVE . split ( '.' ) ] , [ int ( x ) for x in version . split ( '.' ) ] ) :
if responseversion > refversion :
return 1
# response is newer than library
elif responseversion < refversion :
return - 1
# response is older than library
return 0
# versions are equal
except ValueError :
raise ValueError ( "Unable to parse version, invalid syntax" )
|
def ordered_members_map ( self ) :
"""Mask to group the persons by entity
This function only caches the map value , to see what the map is used for , see value _ nth _ person method ."""
|
if self . _ordered_members_map is None :
return np . argsort ( self . members_entity_id )
return self . _ordered_members_map
|
def rsa_base64_decrypt ( self , cipher , b64 = True ) :
"""先base64 解码 再rsa 解密数据"""
|
with open ( self . key_file ) as fp :
key_ = RSA . importKey ( fp . read ( ) )
_cip = PKCS1_v1_5 . new ( key_ )
cipher = base64 . b64decode ( cipher ) if b64 else cipher
plain = _cip . decrypt ( cipher , Random . new ( ) . read ( 15 + SHA . digest_size ) )
return helper . to_str ( plain )
|
def serialize ( pca , ** kwargs ) :
"""Serialize an orientation object to a dict suitable
for JSON"""
|
strike , dip , rake = pca . strike_dip_rake ( )
hyp_axes = sampling_axes ( pca )
return dict ( ** kwargs , principal_axes = pca . axes . tolist ( ) , hyperbolic_axes = hyp_axes . tolist ( ) , n_samples = pca . n , strike = strike , dip = dip , rake = rake , angular_errors = [ 2 * N . degrees ( i ) for i in angular_errors ( hyp_axes ) ] )
|
def devectorize_utterance ( self , utterance ) :
"""Take in a sequence of indices and transform it back into a tokenized utterance"""
|
utterance = self . swap_pad_and_zero ( utterance )
return self . ie . inverse_transform ( utterance ) . tolist ( )
|
def _set_defaults ( self ) :
"""Set some default values in the manifest .
This method should be called after loading from disk , but before
checking the integrity of the reference package ."""
|
self . contents . setdefault ( 'log' , [ ] )
self . contents . setdefault ( 'rollback' , None )
self . contents . setdefault ( 'rollforward' , None )
|
def restore_state ( self , state ) :
"""Restore the current state of this emulated device .
Args :
state ( dict ) : A previously dumped state produced by dump _ state ."""
|
tile_states = state . get ( 'tile_states' , { } )
for address , tile_state in tile_states . items ( ) :
address = int ( address )
tile = self . _tiles . get ( address )
if tile is None :
raise DataError ( "Invalid dumped state, tile does not exist at address %d" % address , address = address )
tile . restore_state ( tile_state )
|
def _get_object_from_version ( cls , operations , ident ) :
"""Returns a Python object from an Alembic migration module ( script ) .
Args :
operations : instance of ` ` alembic . operations . base . Operations ` `
ident : string of the format ` ` version . objname ` `
Returns :
the object whose name is ` ` objname ` ` within the Alembic migration
script identified by ` ` version ` `"""
|
version , objname = ident . split ( "." )
module_ = operations . get_context ( ) . script . get_revision ( version ) . module
obj = getattr ( module_ , objname )
return obj
|
def type_and_model_to_query ( self , request ) :
"""Return JSON for an individual Model instance
If the required parameters are wrong , return 400 Bad Request
If the parameters are correct but there is no data , return empty JSON"""
|
try :
content_type_id = request . GET [ "content_type_id" ]
object_id = request . GET [ "object_id" ]
except KeyError :
return HttpResponseBadRequest ( )
try :
content_type = ContentType . objects . get ( pk = content_type_id )
model = content_type . model_class ( )
result = model . objects . get ( pk = object_id )
except ObjectDoesNotExist :
data = ""
else :
data = '%s: "%d: %s"' % ( content_type . model , result . pk , result )
return JsonResponse ( { "query" : data } )
|
def GetAnalyzersInformation ( cls ) :
"""Retrieves the analyzers information .
Returns :
list [ tuple ] : containing :
str : analyzer name .
str : analyzer description ."""
|
analyzer_information = [ ]
for _ , analyzer_class in cls . GetAnalyzers ( ) :
description = getattr ( analyzer_class , 'DESCRIPTION' , '' )
analyzer_information . append ( ( analyzer_class . NAME , description ) )
return analyzer_information
|
def extend_to_data ( self , data , ** kwargs ) :
"""Build transition matrix from new data to the graph
Creates a transition matrix such that ` Y ` can be approximated by
a linear combination of landmarks . Any
transformation of the landmarks can be trivially applied to ` Y ` by
performing
` transform _ Y = transitions . dot ( transform ) `
Parameters
Y : array - like , [ n _ samples _ y , n _ features ]
new data for which an affinity matrix is calculated
to the existing data . ` n _ features ` must match
either the ambient or PCA dimensions
Returns
transitions : array - like , [ n _ samples _ y , self . data . shape [ 0 ] ]
Transition matrix from ` Y ` to ` self . data `"""
|
kernel = self . build_kernel_to_data ( data , ** kwargs )
if sparse . issparse ( kernel ) :
pnm = sparse . hstack ( [ sparse . csr_matrix ( kernel [ : , self . clusters == i ] . sum ( axis = 1 ) ) for i in np . unique ( self . clusters ) ] )
else :
pnm = np . array ( [ np . sum ( kernel [ : , self . clusters == i ] , axis = 1 ) . T for i in np . unique ( self . clusters ) ] ) . transpose ( )
pnm = normalize ( pnm , norm = 'l1' , axis = 1 )
return pnm
|
def create_default_context ( purpose = None , ** kwargs ) :
"""Create a new SSL context in the most secure way available on the current
Python version . See : func : ` ssl . create _ default _ context ` ."""
|
if hasattr ( ssl , 'create_default_context' ) : # Python 2.7.9 + , Python 3.4 + : take a server _ side boolean or None , in
# addition to the ssl . Purpose . XX values . This allows a user to write
# code that works on all supported Python versions .
if purpose is None or purpose is False :
purpose = ssl . Purpose . SERVER_AUTH
elif purpose is True :
purpose = ssl . Purpose . CLIENT_AUTH
return ssl . create_default_context ( purpose , ** kwargs )
# Python 2.7.8 , Python 3.3
context = SSLContext ( ssl . PROTOCOL_SSLv23 )
if kwargs . get ( 'cafile' ) :
context . load_verify_locations ( kwargs [ 'cafile' ] )
return context
|
def coverage ( ) :
"""Run tests and show test coverage report ."""
|
try :
import pytest_cov
# NOQA
except ImportError :
print_failure_message ( 'Install the pytest coverage plugin to use this task, ' "i.e., `pip install pytest-cov'." )
raise SystemExit ( 1 )
import pytest
pytest . main ( PYTEST_FLAGS + [ '--cov' , CODE_DIRECTORY , '--cov-report' , 'term-missing' , '--junit-xml' , 'test-report.xml' , TESTS_DIRECTORY ] )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.