signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def removeActor ( self , a ) :
"""Remove ` ` vtkActor ` ` or actor index from current renderer ."""
|
if not self . initializedPlotter :
save_int = self . interactive
self . show ( interactive = 0 )
self . interactive = save_int
return
if self . renderer :
self . renderer . RemoveActor ( a )
if hasattr ( a , 'renderedAt' ) :
ir = self . renderers . index ( self . renderer )
a . renderedAt . discard ( ir )
if a in self . actors :
i = self . actors . index ( a )
del self . actors [ i ]
|
def remove_column ( self , column_name , inplace = False ) :
"""Removes the column with the given name from the SFrame .
If inplace = = False ( default ) this operation does not modify the
current SFrame , returning a new SFrame .
If inplace = = True , this operation modifies the current
SFrame , returning self .
Parameters
column _ name : string
The name of the column to remove .
inplace : bool , optional . Defaults to False .
Whether the SFrame is modified in place ."""
|
if column_name not in self . column_names ( ) :
raise KeyError ( 'Cannot find column %s' % column_name )
if inplace :
self . __is_dirty__ = True
try :
with cython_context ( ) :
if self . _is_vertex_frame ( ) :
assert column_name != '__id' , 'Cannot remove \"__id\" column'
graph_proxy = self . __graph__ . __proxy__ . delete_vertex_field ( column_name )
self . __graph__ . __proxy__ = graph_proxy
elif self . _is_edge_frame ( ) :
assert column_name != '__src_id' , 'Cannot remove \"__src_id\" column'
assert column_name != '__dst_id' , 'Cannot remove \"__dst_id\" column'
graph_proxy = self . __graph__ . __proxy__ . delete_edge_field ( column_name )
self . __graph__ . __proxy__ = graph_proxy
return self
except :
self . __is_dirty__ = False
raise
else :
return super ( GFrame , self ) . remove_column ( column_name , inplace = inplace )
|
def _sanitize_url ( url , max_length ) :
"""Sanitize and shorten url to fit in max _ length .
Function is stable : same input MUST ALWAYS give same result , accros changes
in code as well . Different URLs might give same result .
As much as possible , the extension should be kept .
Heuristics are applied to only keep useful info from url .
1 - Drop generic [ sub ] domains .
' www . cs . toronto . edu / . . . ' - > ' cs . toronto . edu / . . . '
' storage . googleapis . com / foo / . . . ' - > ' foo / . . . '
' drive . google . com / bar / . . . ' - > ' bar / . . . '
' github . com / baz / . . . ' - > ' baz / . . . '
2 - Remove leading ' 0 ' s from url components :
' foo / train - 00004 - of - 00010 . tfrecords ' - > ' foo / train - 4 - of - 10 . tfrecords '
3 - Truncate each component of url until total size fits or each component is
left with 4 chars ( or total size is < = limit ) :
' MoveUnitToBorder _ 64x64 _ png / train - 4 - of - 10 . tfrecords '
( here truncate components to 4 chars per component max )
- > ' Move _ 64x6 _ png / trai - 4 - of - 10 . tfrecords '
4 - Truncate result , keeping prefix : ' abc _ def _ ghi _ jkl ' - > ' abc _ def '
Args :
url : string , url to sanitize and shorten .
max _ length : int , max length of result .
Returns :
( string , string ) : sanitized and shorted url , file extension ."""
|
url = urllib . parse . urlparse ( url )
netloc = url . netloc
for prefix in _NETLOC_COMMON_PREFIXES :
if netloc . startswith ( prefix ) :
netloc = netloc [ len ( prefix ) : ]
for suffix in _NETLOC_COMMON_SUFFIXES :
if netloc . endswith ( suffix ) :
netloc = netloc [ : - len ( suffix ) ]
url = '%s%s%s%s' % ( netloc , url . path , url . params , url . query )
# Get the extension :
for ext in _KNOWN_EXTENSIONS :
if url . endswith ( ext ) :
extension = ext
url = url [ : - len ( extension ) ]
break
else :
url , extension = os . path . splitext ( url )
max_length -= len ( extension )
# Replace non authorized chars ( including ' / ' ) by ' _ ' :
url = re . sub ( r'[^a-zA-Z0-9\.\-_]+' , '_' , url )
# Remove parts with no info :
for common_part in _URL_COMMON_PARTS :
url = url . replace ( common_part , '_' )
url = url . strip ( '_' )
# Remove leading zeros in groups of numbers :
url = re . sub ( '(?<![0-9])0+(?=[0-9])' , '' , url )
# Decrease max size of URL components :
c_size = max ( len ( c ) for c in re . split ( r'[\.\-_]' , url ) )
while c_size > 4 and len ( url ) > max_length :
c_size -= 1
url = re . sub ( r'[^\.\-_]{4,}' , lambda match : match . group ( 0 ) [ : c_size ] , url )
return url [ : max_length ] , extension
|
def find_name ( tagtype : str , name : str , language : { str , 'Language' , None } = None ) :
"""Find the subtag of a particular ` tagtype ` that has the given ` name ` .
The default language , " und " , will allow matching names in any language ,
so you can get the code ' fr ' by looking up " French " , " Français " , or
" francés " .
Occasionally , names are ambiguous in a way that can be resolved by
specifying what name the language is supposed to be in . For example ,
there is a language named ' Malayo ' in English , but it ' s different from
the language named ' Malayo ' in Spanish ( which is Malay ) . Specifying the
language will look up the name in a trie that is only in that language .
In a previous version , we thought we were going to deprecate the
` language ` parameter , as there weren ' t significant cases of conflicts
in names of things between languages . Well , we got more data , and
conflicts in names are everywhere .
Specifying the language that the name should be in is still not
required , but it will help to make sure that names can be
round - tripped .
> > > Language . find _ name ( ' language ' , ' francés ' )
Language . make ( language = ' fr ' )
> > > Language . find _ name ( ' region ' , ' United Kingdom ' )
Language . make ( region = ' GB ' )
> > > Language . find _ name ( ' script ' , ' Arabic ' )
Language . make ( script = ' Arab ' )
> > > Language . find _ name ( ' language ' , ' norsk bokmål ' )
Language . make ( language = ' nb ' )
> > > Language . find _ name ( ' language ' , ' norsk ' )
Language . make ( language = ' no ' )
> > > Language . find _ name ( ' language ' , ' norsk ' , ' en ' )
Traceback ( most recent call last ) :
LookupError : Can ' t find any language named ' norsk '
> > > Language . find _ name ( ' language ' , ' norsk ' , ' no ' )
Language . make ( language = ' no ' )
> > > Language . find _ name ( ' language ' , ' malayo ' , ' en ' )
Language . make ( language = ' mbp ' )
> > > Language . find _ name ( ' language ' , ' malayo ' , ' es ' )
Language . make ( language = ' ms ' )
Some langauge names resolve to more than a language . For example ,
the name ' Brazilian Portuguese ' resolves to a language and a region ,
and ' Simplified Chinese ' resolves to a language and a script . In these
cases , a Language object with multiple subtags will be returned .
> > > Language . find _ name ( ' language ' , ' Brazilian Portuguese ' , ' en ' )
Language . make ( language = ' pt ' , region = ' BR ' )
> > > Language . find _ name ( ' language ' , ' Simplified Chinese ' , ' en ' )
Language . make ( language = ' zh ' , script = ' Hans ' )
A small amount of fuzzy matching is supported : if the name can be
shortened to match a single language name , you get that language .
This allows , for example , " Hakka dialect " to match " Hakka " .
> > > Language . find _ name ( ' language ' , ' Hakka dialect ' )
Language . make ( language = ' hak ' )"""
|
# No matter what form of language we got , normalize it to a single
# language subtag
if isinstance ( language , Language ) :
language = language . language
elif isinstance ( language , str ) :
language = get ( language ) . language
if language is None :
language = 'und'
code = name_to_code ( tagtype , name , language )
if code is None :
raise LookupError ( "Can't find any %s named %r" % ( tagtype , name ) )
if '-' in code :
return Language . get ( code )
else :
data = { tagtype : code }
return Language . make ( ** data )
|
def main ( ) :
"Send some test strings"
|
actions = """
{LWIN}
{PAUSE .25}
r
{PAUSE .25}
Notepad.exe{ENTER}
{PAUSE 1}
Hello{SPACE}World!
{PAUSE 1}
%{F4}
{PAUSE .25}
n
"""
SendKeys ( actions , pause = .1 )
keys = parse_keys ( actions )
for k in keys :
print ( k )
k . Run ( )
time . sleep ( .1 )
test_strings = [ "\n" "(aa)some text\n" , "(a)some{ }text\n" , "(b)some{{}text\n" , "(c)some{+}text\n" , "(d)so%me{ab 4}text" , "(e)so%me{LEFT 4}text" , "(f)so%me{ENTER 4}text" , "(g)so%me{^aa 4}text" , "(h)some +(asdf)text" , "(i)some %^+(asdf)text" , "(j)some %^+a text+" , "(k)some %^+a tex+{&}" , "(l)some %^+a tex+(dsf)" , "" , ]
for s in test_strings :
print ( repr ( s ) )
keys = parse_keys ( s , with_newlines = True )
print ( keys )
for k in keys :
k . Run ( )
time . sleep ( .1 )
print ( )
|
def disconnect ( self , receipt = None , headers = None , ** keyword_headers ) :
""": param str receipt :
: param dict headers :
: param keyword _ headers :"""
|
Protocol12 . disconnect ( self , receipt , headers , ** keyword_headers )
self . transport . stop ( )
|
def resolve_model ( self , model ) :
'''Resolve a model given a name or dict with ` class ` entry .
: raises ValueError : model specification is wrong or does not exists'''
|
if not model :
raise ValueError ( 'Unsupported model specifications' )
if isinstance ( model , basestring ) :
classname = model
elif isinstance ( model , dict ) and 'class' in model :
classname = model [ 'class' ]
else :
raise ValueError ( 'Unsupported model specifications' )
try :
return get_document ( classname )
except self . NotRegistered :
message = 'Model "{0}" does not exist' . format ( classname )
raise ValueError ( message )
|
def _serialize_to_jvm ( self , data , serializer , reader_func , createRDDServer ) :
"""Using py4j to send a large dataset to the jvm is really slow , so we use either a file
or a socket if we have encryption enabled .
: param data :
: param serializer :
: param reader _ func : A function which takes a filename and reads in the data in the jvm and
returns a JavaRDD . Only used when encryption is disabled .
: param createRDDServer : A function which creates a PythonRDDServer in the jvm to
accept the serialized data , for use when encryption is enabled .
: return :"""
|
if self . _encryption_enabled : # with encryption , we open a server in java and send the data directly
server = createRDDServer ( )
( sock_file , _ ) = local_connect_and_auth ( server . port ( ) , server . secret ( ) )
chunked_out = ChunkedStream ( sock_file , 8192 )
serializer . dump_stream ( data , chunked_out )
chunked_out . close ( )
# this call will block until the server has read all the data and processed it ( or
# throws an exception )
r = server . getResult ( )
return r
else : # without encryption , we serialize to a file , and we read the file in java and
# parallelize from there .
tempFile = NamedTemporaryFile ( delete = False , dir = self . _temp_dir )
try :
try :
serializer . dump_stream ( data , tempFile )
finally :
tempFile . close ( )
return reader_func ( tempFile . name )
finally : # we eagerily reads the file so we can delete right after .
os . unlink ( tempFile . name )
|
def install ( which = None , mirror_url = None , destination = None , skip_top_level = False , resources_yaml = 'resources.yaml' ) :
"""Install one or more resources .
The resource ( s ) will be fetched , if necessary , and different resource
types are handled appropriately ( e . g . , PyPI resources are installed
with ` ` pip ` ` , archive file resources are extracted , non - archive file
resources are copied , etc ) .
For PyPI resources , this is roughly equivalent to the following : :
pip install ` juju - resources resource _ spec $ resource ` - i $ mirror _ url
: param list which : A name , or a list of one or more resource names , to
fetch . If ommitted , all non - optional resources are installed .
: param str mirror _ url : Fetch resources from the given mirror .
: param str destination : Destination to which to extract or copy file resources .
: param bool skip _ top _ level : When extracting archive file resources , skip
all members that are at the top level of the archive and instead extract
all nested members directly into ` ` destination ` ` . E . g . , an archive
containing ` ` foo / bar . txt ` ` and ` ` foo / qux / baz . txt ` ` will be extracted as
` ` destination / bar . txt ` ` and ` ` destination / qux / baz . txt ` ` .
: param str resources _ yaml : Location of the yaml file containing the
resource descriptions ( default : ` ` resources . yaml ` ` ) .
Can be a local file name or a remote URL .
: returns : True if all resources were successfully installed ."""
|
resources = _load ( resources_yaml , None )
return _install ( resources , which , mirror_url , destination , skip_top_level )
|
def _load_raw_data ( self , resource_name ) :
"""Extract raw data from resource
: param resource _ name :"""
|
# Instantiating the resource again as a simple ` Resource ` ensures that
# ` ` data ` ` will be returned as bytes .
upcast_resource = datapackage . Resource ( self . __resources [ resource_name ] . descriptor , default_base_path = self . __base_path )
return upcast_resource . data
|
def get_void_volume_surfarea ( structure , rad_dict = None , chan_rad = 0.3 , probe_rad = 0.1 ) :
"""Computes the volume and surface area of isolated void using Zeo + + .
Useful to compute the volume and surface area of vacant site .
Args :
structure : pymatgen Structure containing vacancy
rad _ dict ( optional ) : Dictionary with short name of elements and their
radii .
chan _ rad ( optional ) : Minimum channel Radius .
probe _ rad ( optional ) : Probe radius for Monte Carlo sampling .
Returns :
volume : floating number representing the volume of void"""
|
with ScratchDir ( '.' ) :
name = "temp_zeo"
zeo_inp_filename = name + ".cssr"
ZeoCssr ( structure ) . write_file ( zeo_inp_filename )
rad_file = None
if rad_dict :
rad_file = name + ".rad"
with open ( rad_file , 'w' ) as fp :
for el in rad_dict . keys ( ) :
fp . write ( "{0} {1}" . format ( el , rad_dict [ el ] ) )
atmnet = AtomNetwork . read_from_CSSR ( zeo_inp_filename , True , rad_file )
vol_str = volume ( atmnet , 0.3 , probe_rad , 10000 )
sa_str = surface_area ( atmnet , 0.3 , probe_rad , 10000 )
vol = None
sa = None
for line in vol_str . split ( "\n" ) :
if "Number_of_pockets" in line :
fields = line . split ( )
if float ( fields [ 1 ] ) > 1 :
vol = - 1.0
break
if float ( fields [ 1 ] ) == 0 :
vol = - 1.0
break
vol = float ( fields [ 3 ] )
for line in sa_str . split ( "\n" ) :
if "Number_of_pockets" in line :
fields = line . split ( )
if float ( fields [ 1 ] ) > 1 : # raise ValueError ( " Too many voids " )
sa = - 1.0
break
if float ( fields [ 1 ] ) == 0 :
sa = - 1.0
break
sa = float ( fields [ 3 ] )
if not vol or not sa :
raise ValueError ( "Error in zeo++ output stream" )
return vol , sa
|
def get_mixed_type_key ( obj ) :
"""Return a key suitable for sorting between networks and addresses .
Address and Network objects are not sortable by default ; they ' re
fundamentally different so the expression
IPv4Address ( ' 192.0.2.0 ' ) < = IPv4Network ( ' 192.0.2.0/24 ' )
doesn ' t make any sense . There are some times however , where you may wish
to have ipaddress sort these for you anyway . If you need to do this , you
can use this function as the key = argument to sorted ( ) .
Args :
obj : either a Network or Address object .
Returns :
appropriate key ."""
|
if isinstance ( obj , _BaseNetwork ) :
return obj . _get_networks_key ( )
elif isinstance ( obj , _BaseAddress ) :
return obj . _get_address_key ( )
return NotImplemented
|
def rpc ( self , address , rpc_id , * args , ** kwargs ) :
"""Immediately dispatch an RPC inside this EmulatedDevice .
This function is meant to be used for testing purposes as well as by
tiles inside a complex EmulatedDevice subclass that need to
communicate with each other . It should only be called from the main
virtual device thread where start ( ) was called from .
* * Background workers may not call this method since it may cause them to deadlock . * *
Args :
address ( int ) : The address of the tile that has the RPC .
rpc _ id ( int ) : The 16 - bit id of the rpc we want to call
* args : Any required arguments for the RPC as python objects .
* * kwargs : Only two keyword arguments are supported :
- arg _ format : A format specifier for the argument list
- result _ format : A format specifier for the result
Returns :
list : A list of the decoded response members from the RPC ."""
|
if isinstance ( rpc_id , RPCDeclaration ) :
arg_format = rpc_id . arg_format
resp_format = rpc_id . resp_format
rpc_id = rpc_id . rpc_id
else :
arg_format = kwargs . get ( 'arg_format' , None )
resp_format = kwargs . get ( 'resp_format' , None )
arg_payload = b''
if arg_format is not None :
arg_payload = pack_rpc_payload ( arg_format , args )
self . _logger . debug ( "Sending rpc to %d:%04X, payload=%s" , address , rpc_id , args )
resp_payload = self . call_rpc ( address , rpc_id , arg_payload )
if resp_format is None :
return [ ]
resp = unpack_rpc_payload ( resp_format , resp_payload )
return resp
|
def type ( self ) :
"""Returns a ` ` string ` ` constant to indicate whether the game was played
during the regular season or in the post season ."""
|
if self . _type . lower ( ) == 'reg' :
return REGULAR_SEASON
if self . _type . lower ( ) == 'ctourn' :
return CONFERENCE_TOURNAMENT
if self . _type . lower ( ) == 'ncaa' :
return NCAA_TOURNAMENT
if self . _type . lower ( ) == 'nit' :
return NIT_TOURNAMENT
if self . _type . lower ( ) == 'cbi' :
return CBI_TOURNAMENT
if self . _type . lower ( ) == 'cit' :
return CIT_TOURNAMENT
|
def ancestor ( self , index ) :
"""Return the ` ` index ` ` - th ancestor .
The 0 - th ancestor is the node itself ,
the 1 - th ancestor is its parent node ,
etc .
: param int index : the number of levels to go up
: rtype : : class : ` ~ aeneas . tree . Tree `
: raises : TypeError if ` ` index ` ` is not an int
: raises : ValueError if ` ` index ` ` is negative"""
|
if not isinstance ( index , int ) :
self . log_exc ( u"index is not an integer" , None , True , TypeError )
if index < 0 :
self . log_exc ( u"index cannot be negative" , None , True , ValueError )
parent_node = self
for i in range ( index ) :
if parent_node is None :
break
parent_node = parent_node . parent
return parent_node
|
def _make_request_data ( self , teststep_dict , entry_json ) :
"""parse HAR entry request data , and make teststep request data
Args :
entry _ json ( dict ) :
" request " : {
" method " : " POST " ,
" postData " : {
" mimeType " : " application / x - www - form - urlencoded ; charset = utf - 8 " ,
" params " : [
{ " name " : " a " , " value " : 1 } ,
{ " name " : " b " , " value " : " 2 " }
" response " : { . . . }
Returns :
" request " : {
" method " : " POST " ,
" data " : { " v " : " 1 " , " w " : " 2 " }"""
|
method = entry_json [ "request" ] . get ( "method" )
if method in [ "POST" , "PUT" , "PATCH" ] :
postData = entry_json [ "request" ] . get ( "postData" , { } )
mimeType = postData . get ( "mimeType" )
# Note that text and params fields are mutually exclusive .
if "text" in postData :
post_data = postData . get ( "text" )
else :
params = postData . get ( "params" , [ ] )
post_data = utils . convert_list_to_dict ( params )
request_data_key = "data"
if not mimeType :
pass
elif mimeType . startswith ( "application/json" ) :
try :
post_data = json . loads ( post_data )
request_data_key = "json"
except JSONDecodeError :
pass
elif mimeType . startswith ( "application/x-www-form-urlencoded" ) :
post_data = utils . convert_x_www_form_urlencoded_to_dict ( post_data )
else : # TODO : make compatible with more mimeType
pass
teststep_dict [ "request" ] [ request_data_key ] = post_data
|
def _read_para_transaction_id ( self , code , cbit , clen , * , desc , length , version ) :
"""Read HIP TRANSACTION _ ID parameter .
Structure of HIP TRANSACTION _ ID parameter [ RFC 6078 ] :
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
| Type | Length |
| Identifier /
/ | Padding |
Octets Bits Name Description
0 0 transaction _ id . type Parameter Type
1 15 transaction _ id . critical Critical Bit
2 16 transaction _ id . length Length of Contents
4 32 transaction _ id . id Identifier"""
|
_tsid = self . _read_unpack ( clen )
transaction_id = dict ( type = desc , critical = cbit , length = clen , id = _tsid , )
_plen = length - clen
if _plen :
self . _read_fileng ( _plen )
return transaction_id
|
def local_timezone ( value ) :
"""Add the local timezone to ` value ` to make it aware ."""
|
if hasattr ( value , "tzinfo" ) and value . tzinfo is None :
return value . replace ( tzinfo = dateutil . tz . tzlocal ( ) )
return value
|
def parse_usearch61_failures ( seq_path , failures , output_fasta_fp ) :
"""Parses seq IDs from failures list , writes to output _ fasta _ fp
seq _ path : filepath of original input fasta file .
failures : list / set of failure seq IDs
output _ fasta _ fp : path to write parsed sequences"""
|
parsed_out = open ( output_fasta_fp , "w" )
for label , seq in parse_fasta ( open ( seq_path ) , "U" ) :
curr_label = label . split ( ) [ 0 ]
if curr_label in failures :
parsed_out . write ( ">%s\n%s\n" % ( label , seq ) )
parsed_out . close ( )
return output_fasta_fp
|
def save ( self , wf_state ) :
"""write wf state to DB through MQ > > Worker > > _ zops _ sync _ wf _ cache
Args :
wf _ state dict : wf state"""
|
self . wf_state = wf_state
self . wf_state [ 'role_id' ] = self . current . role_id
self . set ( self . wf_state )
if self . wf_state [ 'name' ] not in settings . EPHEMERAL_WORKFLOWS :
self . publish ( job = '_zops_sync_wf_cache' , token = self . db_key )
|
def typelogged_func ( func ) :
"""Works like typelogged , but is only applicable to functions ,
methods and properties ."""
|
if not pytypes . typelogging_enabled :
return func
if hasattr ( func , 'do_logging' ) :
func . do_logging = True
return func
elif hasattr ( func , 'do_typecheck' ) : # actually shouldn ' t happen
return _typeinspect_func ( func , func . do_typecheck , True )
else :
return _typeinspect_func ( func , False , True )
|
def search ( self , q , resolve = True , result_type = None , account_id = None , offset = None , min_id = None , max_id = None ) :
"""Fetch matching hashtags , accounts and statuses . Will perform webfinger
lookups if resolve is True . Full - text search is only enabled if
the instance supports it , and is restricted to statuses the logged - in
user wrote or was mentioned in .
` result _ type ` can be one of " accounts " , " hashtags " or " statuses " , to only
search for that type of object .
Specify ` account _ id ` to only get results from the account with that id .
` offset ` , ` min _ id ` and ` max _ id ` can be used to paginate .
Returns a ` search result dict ` _ , with tags as ` hashtag dicts ` _ ."""
|
return self . search_v2 ( q , resolve = resolve , result_type = result_type , account_id = account_id , offset = offset , min_id = min_id , max_id = max_id )
|
def recursive_pattern_delete ( root , file_patterns , directory_patterns , dry_run = False ) :
"""Recursively deletes files matching a list of patterns . Same for directories"""
|
for root , dirs , files in os . walk ( root ) :
for pattern in file_patterns :
for file_name in fnmatch . filter ( files , pattern ) :
file_path = os . path . join ( root , file_name )
if dry_run :
print_ ( 'Removing {}' . format ( file_path ) )
continue
os . remove ( file_path )
for pattern in directory_patterns :
for found_dir in fnmatch . filter ( dirs , pattern ) :
if os . path . exists ( found_dir ) :
if dry_run :
print ( 'Removing directory tree {}' . format ( found_dir ) )
continue
shutil . rmtree ( found_dir )
|
def lineincols ( inlist , colsize ) :
"""Returns a string composed of elements in inlist , with each element
right - aligned in columns of ( fixed ) colsize .
Usage : lineincols ( inlist , colsize ) where colsize is an integer"""
|
outstr = ''
for item in inlist :
if type ( item ) != StringType :
item = str ( item )
size = len ( item )
if size <= colsize :
for i in range ( colsize - size ) :
outstr = outstr + ' '
outstr = outstr + item
else :
outstr = outstr + item [ 0 : colsize + 1 ]
return outstr
|
def _to_dot_key ( cls , section , key = None ) :
"""Return the section and key in dot notation format ."""
|
if key :
return ( NON_ALPHA_NUM . sub ( '_' , section . lower ( ) ) , NON_ALPHA_NUM . sub ( '_' , key . lower ( ) ) )
else :
return NON_ALPHA_NUM . sub ( '_' , section . lower ( ) )
|
def get_shipping_cost ( settings , country_code = None , name = None ) :
"""Return the shipping cost for a given country code and shipping option ( shipping rate name )"""
|
shipping_rate = None
if settings . default_shipping_enabled :
shipping_rate = { "rate" : settings . default_shipping_rate , "description" : "Standard shipping to rest of world" , "carrier" : settings . default_shipping_carrier }
elif not country_code :
raise InvalidShippingCountry
if country_code :
qrs = models . ShippingRate . objects . filter ( countries__in = [ country_code ] , name = name )
count = qrs . count ( )
if count == 1 :
shipping_rate_qrs = qrs [ 0 ]
else :
raise InvalidShippingRate ( )
shipping_rate = { "rate" : shipping_rate_qrs . rate , "description" : shipping_rate_qrs . description , "carrier" : shipping_rate_qrs . carrier }
return shipping_rate
|
def disableTemperature ( self ) :
"""Specifies the device should NOT write temperature values to the FIFO , is not applied until enableFIFO is called .
: return :"""
|
logger . debug ( "Disabling temperature sensor" )
self . fifoSensorMask &= ~ self . enableTemperatureMask
self . _setSampleSizeBytes ( )
|
def _GetPurgeMessage ( most_recent_step , most_recent_wall_time , event_step , event_wall_time , num_expired ) :
"""Return the string message associated with TensorBoard purges ."""
|
return ( 'Detected out of order event.step likely caused by a TensorFlow ' 'restart. Purging {} expired tensor events from Tensorboard display ' 'between the previous step: {} (timestamp: {}) and current step: {} ' '(timestamp: {}).' ) . format ( num_expired , most_recent_step , most_recent_wall_time , event_step , event_wall_time )
|
def get_stream_action_type ( stream_arn ) :
"""Returns the awacs Action for a stream type given an arn
Args :
stream _ arn ( str ) : The Arn of the stream .
Returns :
: class : ` awacs . aws . Action ` : The appropriate stream type awacs Action
class
Raises :
ValueError : If the stream type doesn ' t match kinesis or dynamodb ."""
|
stream_type_map = { "kinesis" : awacs . kinesis . Action , "dynamodb" : awacs . dynamodb . Action , }
stream_type = stream_arn . split ( ":" ) [ 2 ]
try :
return stream_type_map [ stream_type ]
except KeyError :
raise ValueError ( "Invalid stream type '%s' in arn '%s'" % ( stream_type , stream_arn ) )
|
def get_connection ( self ) :
"""Return a connection from the pool using the ` ConnectionSelector `
instance .
It tries to resurrect eligible connections , forces a resurrection when
no connections are availible and passes the list of live connections to
the selector instance to choose from .
Returns a connection instance and it ' s current fail count ."""
|
self . resurrect ( )
connections = self . connections [ : ]
# no live nodes , resurrect one by force and return it
if not connections :
return self . resurrect ( True )
# only call selector if we have a selection
if len ( connections ) > 1 :
return self . selector . select ( connections )
# only one connection , no need for a selector
return connections [ 0 ]
|
def get_vmpolicy_macaddr_input_datacenter ( self , ** kwargs ) :
"""Auto Generated Code"""
|
config = ET . Element ( "config" )
get_vmpolicy_macaddr = ET . Element ( "get_vmpolicy_macaddr" )
config = get_vmpolicy_macaddr
input = ET . SubElement ( get_vmpolicy_macaddr , "input" )
datacenter = ET . SubElement ( input , "datacenter" )
datacenter . text = kwargs . pop ( 'datacenter' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config )
|
def send_and_receive_raw ( self , target , lun , netfn , raw_bytes ) :
"""Interface function to send and receive raw message .
target : IPMI target
lun : logical unit number
netfn : network function
raw _ bytes : RAW bytes as bytestring
Returns the IPMI message response bytestring ."""
|
return self . _send_and_receive ( target = target , lun = lun , netfn = netfn , cmdid = array ( 'B' , raw_bytes ) [ 0 ] , payload = raw_bytes [ 1 : ] )
|
def eth_getBlockHeaderByNumber ( self , number ) :
"""Get block header by block number .
: param number :
: return :"""
|
block_hash = self . reader . _get_block_hash ( number )
block_number = _format_block_number ( number )
return self . reader . _get_block_header ( block_hash , block_number )
|
def construct_xblock_from_class ( self , cls , scope_ids , field_data = None , * args , ** kwargs ) :
"""Construct a new xblock of type cls , mixing in the mixins
defined for this application ."""
|
return self . mixologist . mix ( cls ) ( runtime = self , field_data = field_data , scope_ids = scope_ids , * args , ** kwargs )
|
def compile_link_import_strings ( codes , build_dir = None , ** kwargs ) :
"""Creates a temporary directory and dumps , compiles and links
provided source code .
Parameters
codes : iterable of name / source pair tuples
build _ dir : string ( default : None )
path to cache _ dir . None implies use a temporary directory .
* * kwargs :
keyword arguments passed onto ` compile _ link _ import _ py _ ext `"""
|
build_dir = build_dir or tempfile . mkdtemp ( )
if not os . path . isdir ( build_dir ) :
raise OSError ( "Non-existent directory: " , build_dir )
source_files = [ ]
if kwargs . get ( 'logger' , False ) is True :
import logging
logging . basicConfig ( level = logging . DEBUG )
kwargs [ 'logger' ] = logging . getLogger ( )
only_update = kwargs . get ( 'only_update' , True )
for name , code_ in codes :
dest = os . path . join ( build_dir , name )
differs = True
md5_in_mem = md5_of_string ( code_ . encode ( 'utf-8' ) ) . hexdigest ( )
if only_update and os . path . exists ( dest ) :
if os . path . exists ( dest + '.md5' ) :
md5_on_disk = open ( dest + '.md5' , 'rt' ) . read ( )
else :
md5_on_disk = md5_of_file ( dest ) . hexdigest ( )
differs = md5_on_disk != md5_in_mem
if not only_update or differs :
with open ( dest , 'wt' ) as fh :
fh . write ( code_ )
open ( dest + '.md5' , 'wt' ) . write ( md5_in_mem )
source_files . append ( dest )
return compile_link_import_py_ext ( source_files , build_dir = build_dir , ** kwargs )
|
def clone_and_update ( self , ** kwargs ) :
"""Clones the object and updates the clone with the args
@ param kwargs : Keyword arguments to set
@ return : The cloned copy with updated values"""
|
cloned = self . clone ( )
cloned . update ( ** kwargs )
return cloned
|
def is_valid ( container , path ) :
"""Checks if a container exists and is unpacked .
Args :
path : The location where the container is expected .
Returns :
True if the container is valid , False if the container needs to
unpacked or if the path does not exist yet ."""
|
try :
tmp_hash_path = container . filename + ".hash"
with open ( tmp_hash_path , 'r' ) as tmp_file :
tmp_hash = tmp_file . readline ( )
except IOError :
LOG . info ( "No .hash-file in the tmp-directory." )
container_hash_path = local . path ( path ) / "gentoo.tar.bz2.hash"
if container_hash_path . exists ( ) :
with open ( container_hash_path , 'r' ) as hash_file :
container_hash = hash_file . readline ( )
return container_hash == tmp_hash
return False
|
def _load ( module , globals_dict = None , symb_list = None ) :
"""Loads a Python module to make variables , objects and functions
available globally .
The idea is to load the module using importlib , then copy the
symbols to the global symbol table ."""
|
if globals_dict is None :
globals_dict = six . moves . builtins . __dict__
try :
mod = importlib . import_module ( module )
if '__all__' in mod . __dict__ : # import listed symbols
for name in mod . __dict__ [ '__all__' ] :
if symb_list is not None :
symb_list . append ( name )
globals_dict [ name ] = mod . __dict__ [ name ]
else : # only import non - private symbols
for name , sym in six . iteritems ( mod . __dict__ ) :
if _validate_local ( name ) :
if symb_list is not None :
symb_list . append ( name )
globals_dict [ name ] = sym
except Exception :
log_interactive . error ( "Loading module %s" , module , exc_info = True )
|
def read_10xgenomics ( cls , tarball_fpath : str , prefix : str , use_ensembl_ids : bool = False ) :
"""Read a 10X genomics compressed tarball containing expression data .
Note : common prefix patterns :
- " filtered _ gene _ bc _ matrices / [ annotations ] / "
- " filtered _ matrices _ mex / [ annotations ] / "
TODO : docstring"""
|
_LOGGER . info ( 'Reading file: %s' , tarball_fpath )
with tarfile . open ( tarball_fpath , mode = 'r:gz' ) as tf :
ti = tf . getmember ( '%smatrix.mtx' % prefix )
with tf . extractfile ( ti ) as fh :
mtx = scipy . io . mmread ( fh )
ti = tf . getmember ( '%sgenes.tsv' % prefix )
with tf . extractfile ( ti ) as fh :
wrapper = io . TextIOWrapper ( fh , encoding = 'ascii' )
i = 1
if use_ensembl_ids :
i = 0
gene_names = [ row [ i ] for row in csv . reader ( wrapper , delimiter = '\t' ) ]
ti = tf . getmember ( '%sbarcodes.tsv' % prefix )
with tf . extractfile ( ti ) as fh :
wrapper = io . TextIOWrapper ( fh , encoding = 'ascii' )
barcodes = [ row [ 0 ] for row in csv . reader ( wrapper , delimiter = '\t' ) ]
assert mtx . shape [ 0 ] == len ( gene_names )
assert mtx . shape [ 1 ] == len ( barcodes )
_LOGGER . info ( 'Matrix dimensions: %s' , str ( mtx . shape ) )
X = mtx . todense ( )
matrix = cls ( X = X , genes = gene_names , cells = barcodes )
return matrix
|
def getMultiSeriesRegistrations ( self , q_filter = Q ( ) , name_series = False , ** kwargs ) :
'''Use the getSeriesRegistered method above to get a list of each series the
person has registered for . The return only indicates whether they are
registered more than once for the same series ( e . g . for keeping track of
dance admissions for couples who register under one name ) .'''
|
series_registered = self . getSeriesRegistered ( q_filter , distinct = False , counter = False , ** kwargs )
counter_items = Counter ( series_registered ) . items ( )
multireg_list = [ x for x in counter_items if x [ 1 ] > 1 ]
if name_series and multireg_list :
if 'year' in kwargs or 'month' in kwargs :
return [ str ( x [ 1 ] ) + 'x: ' + x [ 0 ] . classDescription . title for x in multireg_list ]
else :
return [ str ( x [ 1 ] ) + 'x: ' + x [ 0 ] . __str__ ( ) for x in multireg_list ]
elif multireg_list :
return '%sx registration' % max ( [ x [ 1 ] for x in multireg_list ] )
|
def acp_account ( ) :
"""Manage the user account of currently - logged - in users .
This does NOT accept admin - specific options ."""
|
if request . args . get ( 'status' ) == 'pwdchange' :
alert = 'You must change your password before proceeding.'
alert_status = 'danger'
pwdchange_skip = True
else :
alert = ''
alert_status = ''
pwdchange_skip = False
if db is None :
form = PwdHashForm ( )
return render ( 'coil_account_single.tmpl' , context = { 'title' : 'My account' , 'form' : form , 'alert' : alert , 'alert_status' : alert_status } )
action = 'edit'
form = AccountForm ( )
if request . method == 'POST' :
if int ( current_user . uid ) in app . config [ 'COIL_USERS_PREVENT_EDITING' ] :
return error ( "Cannot edit data for this user." , 403 )
if not form . validate ( ) :
return error ( "Bad Request" , 400 )
action = 'save'
data = request . form
if data [ 'newpwd1' ] :
try :
pwd_ok = check_password ( current_user . password , data [ 'oldpwd' ] )
except ValueError :
if current_user . password . startswith ( '$2a$12' ) : # old bcrypt hash
pwd_ok = check_old_password ( current_user . password , data [ 'oldpwd' ] )
if data [ 'newpwd1' ] == data [ 'newpwd2' ] and pwd_ok :
current_user . password = password_hash ( data [ 'newpwd1' ] )
current_user . must_change_password = False
pwdchange_skip = True
else :
alert = 'Passwords don’t match.'
alert_status = 'danger'
action = 'save_fail'
current_user . realname = data [ 'realname' ]
current_user . email = data [ 'email' ]
current_user . wants_all_posts = 'wants_all_posts' in data
write_user ( current_user )
return render ( 'coil_account.tmpl' , context = { 'title' : 'My account' , 'action' : action , 'alert' : alert , 'alert_status' : alert_status , 'form' : form , 'pwdchange_skip' : pwdchange_skip } )
|
def retrieve_last_elements ( sublists ) :
"""A Python function which returns the final element from each sublist within a provided list .
Examples :
> > > retrieve _ last _ elements ( [ [ 1 , 2 , 3 ] , [ 4 , 5 ] , [ 6 , 7 , 8 , 9 ] ] )
[3 , 5 , 9]
> > > retrieve _ last _ elements ( [ [ ' x ' , ' y ' , ' z ' ] , [ ' m ' ] , [ ' a ' , ' b ' ] , [ ' u ' , ' v ' ] ] )
[ ' z ' , ' m ' , ' b ' , ' v ' ]
> > > retrieve _ last _ elements ( [ [ 1 , 2 , 3 ] , [ 4 , 5 ] ] )
[3 , 5]
Args :
sublists : A list containing sublists .
Returns : A list with the last element from each sublist ."""
|
return [ sublist [ - 1 ] for sublist in sublists ]
|
def currentpath ( self ) -> str :
"""Absolute path of the current working directory .
> > > from hydpy . core . filetools import FileManager
> > > filemanager = FileManager ( )
> > > filemanager . BASEDIR = ' basename '
> > > filemanager . projectdir = ' projectname '
> > > from hydpy import repr _ , TestIO
> > > with TestIO ( ) :
. . . filemanager . currentdir = ' testdir '
. . . repr _ ( filemanager . currentpath ) # doctest : + ELLIPSIS
' . . . hydpy / tests / iotesting / projectname / basename / testdir '"""
|
return os . path . join ( self . basepath , self . currentdir )
|
def popitem ( self ) :
"""Remove oldest key from dict and return item ."""
|
if self . _keys :
k = self . _keys [ 0 ]
v = self [ k ]
del self [ k ]
return ( k , v )
raise KeyError ( "popitem() on empty dictionary" )
|
def deserialize ( doc_xml , pyxb_binding = None ) :
"""Deserialize DataONE XML types to PyXB .
Args :
doc _ xml : UTF - 8 encoded ` ` bytes ` `
pyxb _ binding : PyXB binding object . If not specified , the correct one should be
selected automatically .
Returns :
PyXB object
See Also :
` ` deserialize _ d1 _ exception ( ) ` ` for deserializing DataONE Exception types ."""
|
pyxb_binding = pyxb_binding or d1_common . types . dataoneTypes
try :
return pyxb_binding . CreateFromDocument ( doc_xml )
except pyxb . ValidationError as e :
raise ValueError ( 'Unable to deserialize XML to PyXB. error="{}" xml="{}"' . format ( e . details ( ) , doc_xml ) )
except ( pyxb . PyXBException , xml . sax . SAXParseException , Exception ) as e :
raise ValueError ( 'Unable to deserialize XML to PyXB. error="{}" xml="{}"' . format ( str ( e ) , doc_xml ) )
|
def url ( self , value ) :
"""Setter for * * self . _ _ url * * attribute .
: param value : Attribute value .
: type value : unicode"""
|
if value is not None :
assert type ( value ) is unicode , "'{0}' attribute: '{1}' type is not 'unicode'!" . format ( "url" , value )
self . __url = value
|
def try_it_n_times ( operation , expected_error_codes , custom_error = 'operation failed' , n = 10 ) :
"""Try a given operation ( API call ) n times .
Raises if the API call fails with an error _ code that is not expected .
Raises if the API call has not succeeded within n attempts .
Waits 3 seconds betwee each attempt ."""
|
for i in itertools . count ( ) :
try :
operation ( )
break
except UpCloudAPIError as e :
if e . error_code not in expected_error_codes :
raise e
sleep ( 3 )
if i >= n - 1 :
raise UpCloudClientError ( custom_error )
|
def dad_status_output_dad_status_entries_index ( self , ** kwargs ) :
"""Auto Generated Code"""
|
config = ET . Element ( "config" )
dad_status = ET . Element ( "dad_status" )
config = dad_status
output = ET . SubElement ( dad_status , "output" )
dad_status_entries = ET . SubElement ( output , "dad-status-entries" )
index = ET . SubElement ( dad_status_entries , "index" )
index . text = kwargs . pop ( 'index' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config )
|
def _check_signal ( self , s ) :
r"""Check if signal is valid ."""
|
s = np . asanyarray ( s )
if s . shape [ 0 ] != self . n_vertices :
raise ValueError ( 'First dimension must be the number of vertices ' 'G.N = {}, got {}.' . format ( self . N , s . shape ) )
return s
|
def underlying_variable_ref ( t ) :
"""Find the underlying variable ref .
Traverses through Identity , ReadVariableOp , and Enter ops .
Stops when op type has Variable or VarHandle in name .
Args :
t : a Tensor
Returns :
a Tensor that is a variable ref , or None on error ."""
|
while t . op . type in [ "Identity" , "ReadVariableOp" , "Enter" ] :
t = t . op . inputs [ 0 ]
op_type = t . op . type
if "Variable" in op_type or "VarHandle" in op_type :
return t
else :
return None
|
def page_exists_on_disk ( self , slug ) :
'''Return true if post directory and post file both exist .'''
|
r = False
page_dir = os . path . join ( self . dirs [ 'source' ] , slug )
page_file_name = os . path . join ( page_dir , slug + '.md' )
if os . path . isdir ( page_dir ) :
if os . path . isfile ( page_file_name ) :
r = True
return r
|
def render_attrs ( attrs ) :
"""Render HTML attributes , or return ' ' if no attributes needs to be rendered ."""
|
if attrs is not None :
def parts ( ) :
for key , value in sorted ( attrs . items ( ) ) :
if value is None :
continue
if value is True :
yield '%s' % ( key , )
continue
if key == 'class' and isinstance ( value , dict ) :
if not value :
continue
value = render_class ( value )
if key == 'style' and isinstance ( value , dict ) :
if not value :
continue
value = render_style ( value )
yield '%s="%s"' % ( key , ( '%s' % value ) . replace ( '"' , '"' ) )
return mark_safe ( ' %s' % ' ' . join ( parts ( ) ) )
return ''
|
def _task_complete ( self , ** kwargs ) :
"""Performs cleanup tasks and notifies Job that the Task finished ."""
|
logger . debug ( 'Running _task_complete for task {0}' . format ( self . name ) )
with self . parent_job . completion_lock :
self . completed_at = datetime . utcnow ( )
self . successful = kwargs . get ( 'success' , None )
self . parent_job . _complete_task ( self . name , ** kwargs )
|
def get_item ( self , item_type , id ) :
'''Get the an item response for the given item _ type and id
: param item _ type str : A valid item - type
: param id str : The id of the item
: returns : : py : Class : ` planet . api . models . JSON `
: raises planet . api . exceptions . APIException : On API error .'''
|
url = 'data/v1/item-types/%s/items/%s' % ( item_type , id )
return self . _get ( url ) . get_body ( )
|
def _validate_isvalid_history ( self , isvalid_history , field , value ) :
"""Checks that the given time history is properly formatted .
Args :
isvalid _ history ( ` bool ` ) : flag from schema indicating units to be checked .
field ( ` str ` ) : property associated with history in question .
value ( ` dict ` ) : dictionary of values from file associated with this property .
The rule ' s arguments are validated against this schema :
{ ' isvalid _ history ' : { ' type ' : ' bool ' } , ' field ' : { ' type ' : ' str ' } ,
' value ' : { ' type ' : ' dict ' } }"""
|
# Check the type has appropriate units
history_type = value [ 'type' ]
if history_type . endswith ( 'emission' ) :
history_type = 'emission'
elif history_type . endswith ( 'absorption' ) :
history_type = 'absorption'
quantity = 1.0 * ( units ( value [ 'quantity' ] [ 'units' ] ) )
try :
quantity . to ( property_units [ history_type ] )
except pint . DimensionalityError :
self . _error ( field , 'incompatible units; should be consistent ' 'with ' + property_units [ history_type ] )
# Check that time has appropriate units
time = 1.0 * ( units ( value [ 'time' ] [ 'units' ] ) )
try :
time . to ( property_units [ 'time' ] )
except pint . DimensionalityError :
self . _error ( field , 'incompatible units; should be consistent ' 'with ' + property_units [ 'time' ] )
# Check that the values have the right number of columns
n_cols = len ( value [ 'values' ] [ 0 ] )
max_cols = max ( value [ 'time' ] [ 'column' ] , value [ 'quantity' ] [ 'column' ] , value . get ( 'uncertainty' , { } ) . get ( 'column' , 0 ) ) + 1
if n_cols > max_cols :
self . _error ( field , 'too many columns in the values' )
elif n_cols < max_cols :
self . _error ( field , 'not enough columns in the values' )
|
def get_raw_exception_record_list ( self ) :
"""Traverses the exception record linked list and builds a Python list .
Nested exception records are received for nested exceptions . This
happens when an exception is raised in the debugee while trying to
handle a previous exception .
@ rtype : list ( L { win32 . EXCEPTION _ RECORD } )
@ return :
List of raw exception record structures as used by the Win32 API .
There is always at least one exception record , so the list is
never empty . All other methods of this class read from the first
exception record only , that is , the most recent exception ."""
|
# The first EXCEPTION _ RECORD is contained in EXCEPTION _ DEBUG _ INFO .
# The remaining EXCEPTION _ RECORD structures are linked by pointers .
nested = list ( )
record = self . raw . u . Exception
while True :
record = record . ExceptionRecord
if not record :
break
nested . append ( record )
return nested
|
def add_dependent_work_units ( self , work_unit , depends_on , hard = True ) :
"""Add work units , where one prevents execution of the other .
The two work units may be attached to different work specs ,
but both must be in this task master ' s namespace . ` work _ unit `
and ` depends _ on ` are both tuples of ( work spec name , work unit
name , work unit dictionary ) . The work specs must already
exist ; they may be created with : meth : ` update _ bundle ` with
an empty work unit dictionary . If a work unit dictionary is
provided with either work unit , then this defines that work
unit , and any existing definition is replaced . Either or both
work unit dictionaries may be : const : ` None ` , in which case the
work unit is not created if it does not already exist . In
this last case , the other work unit will be added if
specified , but the dependency will not be added , and this
function will return : const : ` False ` . In all other cases , this
dependency is added in addition to all existing dependencies
on either or both work units , even if the work unit dictionary
is replaced .
` work _ unit ` will not be executed or reported as available via
: meth : ` get _ work ` until ` depends _ on ` finishes execution . If
the ` depends _ on ` task fails , then the ` hard ` parameter
describes what happens : if ` hard ` is : const : ` True ` then
` work _ unit ` will also fail , but if ` hard ` is : const : ` False `
then ` work _ unit ` will be able to execute even if ` depends _ on `
fails , it just must have completed some execution attempt .
Calling this function with ` ` hard = True ` ` suggests an ordered
sequence of tasks where the later task depends on the output
of the earlier tasks . Calling this function with
` ` hard = False ` ` suggests a cleanup task that must run after
this task ( and , likely , several others ) are done , but doesn ' t
specifically depend on its result being available .
: param work _ unit : " Later " work unit to execute
: paramtype work _ unit : tuple of ( str , str , dict )
: param depends _ on : " Earlier " work unit to execute
: paramtype depends _ on : tuple of ( str , str , dict )
: param bool hard : if True , then ` work _ unit ` automatically fails
if ` depends _ on ` fails
: return : : const : ` True ` , unless one or both of the work units
didn ' t exist and weren ' t specified , in which case , : const : ` False `
: raise rejester . exceptions . NoSuchWorkSpecError : if a work spec was
named that doesn ' t exist"""
|
# There ' s no good , not - confusing terminology here .
# I ' ll call work _ unit " later " and depends _ on " earlier "
# consistently , because that at least makes the time flow
# correct .
later_spec , later_unit , later_unitdef = work_unit
earlier_spec , earlier_unit , earlier_unitdef = depends_on
with self . registry . lock ( identifier = self . worker_id ) as session : # Bail if either work spec doesn ' t already exist
if session . get ( WORK_SPECS , later_spec ) is None :
raise NoSuchWorkSpecError ( later_spec )
if session . get ( WORK_SPECS , earlier_spec ) is None :
raise NoSuchWorkSpecError ( earlier_spec )
# Cause both work units to exist ( if possible )
# Note that if " earlier " is already finished , we may be
# able to make " later " available immediately
earlier_done = False
earlier_successful = False
if earlier_unitdef is not None :
session . update ( WORK_UNITS_ + earlier_spec , { earlier_unit : earlier_unitdef } )
else :
earlier_unitdef = session . get ( WORK_UNITS_ + earlier_spec , earlier_unit )
if earlier_unitdef is None :
earlier_unitdef = session . get ( WORK_UNITS_ + earlier_spec + _BLOCKED , earlier_unit )
if earlier_unitdef is None :
earlier_unitdef = session . get ( WORK_UNITS_ + earlier_spec + _FINISHED , earlier_unit )
if earlier_unitdef is not None :
earlier_done = True
earlier_successful = True
if earlier_unitdef is None :
earlier_unitdef = session . get ( WORK_UNITS_ + earlier_spec + _FAILED , earlier_unit )
if earlier_unitdef is not None :
earlier_done = True
later_failed = earlier_done and hard and not earlier_successful
later_unblocked = ( ( earlier_done and not later_failed ) or ( earlier_unitdef is None ) )
if later_failed :
later_destination = WORK_UNITS_ + later_spec + _FAILED
elif later_unblocked :
later_destination = WORK_UNITS_ + later_spec
else :
later_destination = WORK_UNITS_ + later_spec + _BLOCKED
if later_unitdef is not None :
for suffix in [ '' , _FINISHED , _FAILED , _BLOCKED ] :
k = WORK_UNITS_ + later_spec + suffix
if k != later_destination :
session . popmany ( k , later_unit )
session . update ( later_destination , { later_unit : later_unitdef } )
elif earlier_unitdef is not None :
later_unitdef = session . get ( WORK_UNITS_ + later_spec , later_unit )
if later_unitdef is not None :
session . move ( WORK_UNITS_ + later_spec , WORK_UNITS_ + later_spec + _BLOCKED , { later_unit : later_unitdef } )
else :
later_unitdef = session . get ( WORK_UNITS_ + later_spec + _BLOCKED , later_unit )
if later_unitdef is None or earlier_unitdef is None :
return False
# Now both units exist and are in the right place ;
# record the dependency
blocks = session . get ( WORK_UNITS_ + earlier_spec + _BLOCKS , earlier_unit )
if blocks is None :
blocks = [ ]
blocks . append ( [ later_spec , later_unit , hard ] )
session . set ( WORK_UNITS_ + earlier_spec + _BLOCKS , earlier_unit , blocks )
depends = session . get ( WORK_UNITS_ + later_spec + _DEPENDS , later_unit )
if depends is None :
depends = [ ]
depends . append ( [ earlier_spec , earlier_unit ] )
session . set ( WORK_UNITS_ + later_spec + _DEPENDS , later_unit , depends )
return True
|
def check_lt ( self ) :
"""Check is the POSTed LoginTicket is valid , if yes invalide it
: return : ` ` True ` ` if the LoginTicket is valid , ` ` False ` ` otherwise
: rtype : bool"""
|
# save LT for later check
lt_valid = self . request . session . get ( 'lt' , [ ] )
lt_send = self . request . POST . get ( 'lt' )
# generate a new LT ( by posting the LT has been consumed )
self . gen_lt ( )
# check if send LT is valid
if lt_send not in lt_valid :
return False
else :
self . request . session [ 'lt' ] . remove ( lt_send )
# we need to redo the affectation for django to detect that the list has changed
# and for its new value to be store in the session
self . request . session [ 'lt' ] = self . request . session [ 'lt' ]
return True
|
def nodes ( self ) :
"""A | Nodes | collection of all required nodes .
> > > from hydpy import RiverBasinNumbers2Selection
> > > rbns2s = RiverBasinNumbers2Selection (
. . . ( 111 , 113 , 1129 , 11269 , 1125 , 11261,
. . . 11262 , 1123 , 1124 , 1122 , 1121 ) )
Note that the required outlet node is added :
> > > rbns2s . nodes
Nodes ( " node _ 1123 " , " node _ 1125 " , " node _ 11269 " , " node _ 1129 " , " node _ 113 " ,
" node _ outlet " )
It is both possible to change the prefix names of the nodes and
the name of the outlet node separately :
> > > rbns2s . node _ prefix = ' b _ '
> > > rbns2s . last _ node = ' l _ node '
> > > rbns2s . nodes
Nodes ( " b _ 1123 " , " b _ 1125 " , " b _ 11269 " , " b _ 1129 " , " b _ 113 " , " l _ node " )"""
|
return ( devicetools . Nodes ( self . node_prefix + routers for routers in self . _router_numbers ) + devicetools . Node ( self . last_node ) )
|
def get_display_label ( choices , status ) :
"""Get a display label for resource status .
This method is used in places where a resource ' s status or
admin state labels need to assigned before they are sent to the
view template ."""
|
for ( value , label ) in choices :
if value == ( status or '' ) . lower ( ) :
display_label = label
break
else :
display_label = status
return display_label
|
def get_super_assignment ( pyname ) :
""": type pyname : rope . base . pynamesdef . AssignedName
: type : rope . base . pynamesdef . AssignedName"""
|
try :
pyclass , attr_name = get_class_with_attr_name ( pyname )
except TypeError :
return
else :
for super_pyclass in get_mro ( pyclass ) [ 1 : ] :
if attr_name in super_pyclass :
return super_pyclass [ attr_name ]
|
def _write ( self , session , openFile , replaceParamFile = None ) :
"""ProjectFileEvent Write to File Method"""
|
openFile . write ( text ( yaml . dump ( [ evt . as_yml ( ) for evt in self . events . order_by ( ProjectFileEvent . name , ProjectFileEvent . subfolder ) ] ) ) )
|
def stream_gzip_decompress_lines ( stream ) :
"""Uncompress a gzip stream into lines of text .
Parameters
Generator of chunks of gzip compressed text .
Returns
Generator of uncompressed lines ."""
|
dec = zlib . decompressobj ( zlib . MAX_WBITS | 16 )
previous = ""
for compressed_chunk in stream :
chunk = dec . decompress ( compressed_chunk ) . decode ( )
if chunk :
lines = ( previous + chunk ) . split ( "\n" )
previous = lines . pop ( )
for line in lines :
yield line
yield previous
|
def generate_transpose ( node_name , in_name , out_name , axes , base_name , func_counter ) :
"""Generate a Transpose operator to transpose the specified buffer ."""
|
trans = nnabla_pb2 . Function ( )
trans . type = "Transpose"
set_function_name ( trans , node_name , base_name , func_counter )
trans . input . extend ( [ in_name ] )
trans . output . extend ( [ out_name ] )
tp = trans . transpose_param
tp . axes . extend ( axes )
return trans
|
def calculate_rate ( country_code , exception_name ) :
"""Calculates the VAT rate for a customer based on their declared country
and any declared exception information .
: param country _ code :
The two - character country code where the user resides
: param exception _ name :
The name of an exception for the country , as returned from
vat _ moss . declared _ residence . options ( )
: raises :
ValueError - if country _ code is not two characers , or exception _ name is not None or a valid exception from options ( )
: return :
A tuple of ( Decimal VAT rate , country _ code , exception name [ or None ] )"""
|
if not country_code or not isinstance ( country_code , str_cls ) or len ( country_code ) != 2 :
raise ValueError ( 'Invalidly formatted country code' )
if exception_name and not isinstance ( exception_name , str_cls ) :
raise ValueError ( 'Exception name is not None or a string' )
country_code = country_code . upper ( )
if country_code not in rates . BY_COUNTRY :
return ( Decimal ( '0.0' ) , country_code , None )
country_info = rates . BY_COUNTRY [ country_code ]
if not exception_name :
return ( country_info [ 'rate' ] , country_code , None )
if exception_name not in country_info [ 'exceptions' ] :
raise ValueError ( '"%s" is not a valid exception for %s' % ( exception_name , country_code ) )
rate_info = country_info [ 'exceptions' ] [ exception_name ]
if isinstance ( rate_info , Decimal ) :
rate = rate_info
else : # This allows handling the complex case of the UK RAF bases in Cyprus
# that map to the standard country rate . The country code and exception
# name need to be changed in addition to gettting a special rate .
rate , country_code , exception_name = rate_info
return ( rate , country_code , exception_name )
|
def table_lookup ( image , table , border_value , iterations = None ) :
'''Perform a morphological transform on an image , directed by its neighbors
image - a binary image
table - a 512 - element table giving the transform of each pixel given
the values of that pixel and its 8 - connected neighbors .
border _ value - the value of pixels beyond the border of the image .
This should test as True or False .
The pixels are numbered like this :
0 1 2
3 4 5
6 7 8
The index at a pixel is the sum of 2 * * < pixel - number > for pixels
that evaluate to true .'''
|
# Test for a table that never transforms a zero into a one :
center_is_zero = np . array ( [ ( x & 2 ** 4 ) == 0 for x in range ( 2 ** 9 ) ] )
use_index_trick = False
if ( not np . any ( table [ center_is_zero ] ) and ( np . issubdtype ( image . dtype , bool ) or np . issubdtype ( image . dtype , int ) ) ) : # Use the index trick
use_index_trick = True
invert = False
elif ( np . all ( table [ ~ center_is_zero ] ) and np . issubdtype ( image . dtype , bool ) ) : # All ones stay ones , invert the table and the image and do the trick
use_index_trick = True
invert = True
image = ~ image
# table index 0 - > 511 and the output is reversed
table = ~ table [ 511 - np . arange ( 512 ) ]
border_value = not border_value
if use_index_trick :
orig_image = image
index_i , index_j , image = prepare_for_index_lookup ( image , border_value )
index_i , index_j = index_lookup ( index_i , index_j , image , table , iterations )
image = extract_from_image_lookup ( orig_image , index_i , index_j )
if invert :
image = ~ image
return image
counter = 0
while counter != iterations :
counter += 1
# We accumulate into the indexer to get the index into the table
# at each point in the image
if image . shape [ 0 ] < 3 or image . shape [ 1 ] < 3 :
image = image . astype ( bool )
indexer = np . zeros ( image . shape , int )
indexer [ 1 : , 1 : ] += image [ : - 1 , : - 1 ] * 2 ** 0
indexer [ 1 : , : ] += image [ : - 1 , : ] * 2 ** 1
indexer [ 1 : , : - 1 ] += image [ : - 1 , 1 : ] * 2 ** 2
indexer [ : , 1 : ] += image [ : , : - 1 ] * 2 ** 3
indexer [ : , : ] += image [ : , : ] * 2 ** 4
indexer [ : , : - 1 ] += image [ : , 1 : ] * 2 ** 5
indexer [ : - 1 , 1 : ] += image [ 1 : , : - 1 ] * 2 ** 6
indexer [ : - 1 , : ] += image [ 1 : , : ] * 2 ** 7
indexer [ : - 1 , : - 1 ] += image [ 1 : , 1 : ] * 2 ** 8
else :
indexer = table_lookup_index ( np . ascontiguousarray ( image , np . uint8 ) )
if border_value :
indexer [ 0 , : ] |= 2 ** 0 + 2 ** 1 + 2 ** 2
indexer [ - 1 , : ] |= 2 ** 6 + 2 ** 7 + 2 ** 8
indexer [ : , 0 ] |= 2 ** 0 + 2 ** 3 + 2 ** 6
indexer [ : , - 1 ] |= 2 ** 2 + 2 ** 5 + 2 ** 8
new_image = table [ indexer ]
if np . all ( new_image == image ) :
break
image = new_image
return image
|
def resolve_compound_variable_fields ( dbg , thread_id , frame_id , scope , attrs ) :
"""Resolve compound variable in debugger scopes by its name and attributes
: param thread _ id : id of the variable ' s thread
: param frame _ id : id of the variable ' s frame
: param scope : can be BY _ ID , EXPRESSION , GLOBAL , LOCAL , FRAME
: param attrs : after reaching the proper scope , we have to get the attributes until we find
the proper location ( i . e . : obj \t attr1 \t attr2)
: return : a dictionary of variables ' s fields"""
|
var = getVariable ( dbg , thread_id , frame_id , scope , attrs )
try :
_type , _typeName , resolver = get_type ( var )
return _typeName , resolver . get_dictionary ( var )
except :
pydev_log . exception ( 'Error evaluating: thread_id: %s\nframe_id: %s\nscope: %s\nattrs: %s.' , thread_id , frame_id , scope , attrs )
|
def Authenticate ( self , app_id , challenge_data , print_callback = sys . stderr . write ) :
"""See base class ."""
|
# Ensure environment variable is present
plugin_cmd = os . environ . get ( SK_SIGNING_PLUGIN_ENV_VAR )
if plugin_cmd is None :
raise errors . PluginError ( '{} env var is not set' . format ( SK_SIGNING_PLUGIN_ENV_VAR ) )
# Prepare input to signer
client_data_map , signing_input = self . _BuildPluginRequest ( app_id , challenge_data , self . origin )
# Call plugin
print_callback ( 'Please insert and touch your security key\n' )
response = self . _CallPlugin ( [ plugin_cmd ] , signing_input )
# Handle response
key_challenge_pair = ( response [ 'keyHandle' ] , response [ 'challengeHash' ] )
client_data_json = client_data_map [ key_challenge_pair ]
client_data = client_data_json . encode ( )
return self . _BuildAuthenticatorResponse ( app_id , client_data , response )
|
def by_col ( cls , df , events , populations , w = None , inplace = False , pvalue = 'sim' , outvals = None , swapname = '' , ** stat_kws ) :
"""Function to compute a Moran _ Rate statistic on a dataframe
Arguments
df : pandas . DataFrame
a pandas dataframe with a geometry column
events : string or list of strings
one or more names where events are stored
populations : string or list of strings
one or more names where the populations corresponding to the
events are stored . If one population column is provided , it is
used for all event columns . If more than one population column
is provided but there is not a population for every event
column , an exception will be raised .
w : pysal weights object
a weights object aligned with the dataframe . If not provided , this
is searched for in the dataframe ' s metadata
inplace : bool
a boolean denoting whether to operate on the dataframe inplace or to
return a series contaning the results of the computation . If
operating inplace , the derived columns will be named
' column _ moran _ rate '
pvalue : string
a string denoting which pvalue should be returned . Refer to the
the Moran _ Rate statistic ' s documentation for available p - values
outvals : list of strings
list of arbitrary attributes to return as columns from the
Moran _ Rate statistic
* * stat _ kws : keyword arguments
options to pass to the underlying statistic . For this , see the
documentation for the Moran _ Rate statistic .
Returns
If inplace , None , and operation is conducted on dataframe in memory . Otherwise ,
returns a copy of the dataframe with the relevant columns attached .
See Also
For further documentation , refer to the Moran _ Rate class in pysal . esda"""
|
if not inplace :
new = df . copy ( )
cls . by_col ( new , events , populations , w = w , inplace = True , pvalue = pvalue , outvals = outvals , swapname = swapname , ** stat_kws )
return new
if isinstance ( events , str ) :
events = [ events ]
if isinstance ( populations , str ) :
populations = [ populations ]
if len ( populations ) < len ( events ) :
populations = populations * len ( events )
if len ( events ) != len ( populations ) :
raise ValueError ( 'There is not a one-to-one matching between events and ' 'populations!\nEvents: {}\n\nPopulations:' ' {}' . format ( events , populations ) )
adjusted = stat_kws . pop ( 'adjusted' , True )
if isinstance ( adjusted , bool ) :
adjusted = [ adjusted ] * len ( events )
if swapname is '' :
swapname = cls . __name__ . lower ( )
rates = [ assuncao_rate ( df [ e ] , df [ pop ] ) if adj else df [ e ] . astype ( float ) / df [ pop ] for e , pop , adj in zip ( events , populations , adjusted ) ]
names = [ '-' . join ( ( e , p ) ) for e , p in zip ( events , populations ) ]
out_df = df . copy ( )
rate_df = out_df . from_items ( list ( zip ( names , rates ) ) )
# trick to avoid importing pandas
stat_df = _univariate_handler ( rate_df , names , w = w , inplace = False , pvalue = pvalue , outvals = outvals , swapname = swapname , stat = Moran , # how would this get done w / super ?
** stat_kws )
for col in stat_df . columns :
df [ col ] = stat_df [ col ]
|
def processLedger ( self ) -> None :
"""Checks ledger config txns and perfomes recent one
: return :"""
|
logger . debug ( '{} processing config ledger for any POOL_CONFIGs' . format ( self ) , extra = { "tags" : [ "pool-config" ] } )
for _ , txn in self . ledger . getAllTxn ( ) :
if get_type ( txn ) == POOL_CONFIG :
self . handleConfigTxn ( txn )
|
def env_get ( context ) :
"""Get $ ENVs into the pypyr context .
Context is a dictionary or dictionary - like . context is mandatory .
context [ ' env ' ] [ ' get ' ] must exist . It ' s a dictionary .
Values are the names of the $ ENVs to write to the pypyr context .
Keys are the pypyr context item to which to write the $ ENV values .
For example , say input context is :
key1 : value1
key2 : value2
pypyrCurrentDir : value3
env :
get :
pypyrUser : USER
pypyrCurrentDir : PWD
This will result in context :
key1 : value1
key2 : value2
key3 : value3
pypyrUser : < < value of $ USER here > >
pypyrCurrentDir : < < value of $ PWD here , not value3 > >"""
|
get = context [ 'env' ] . get ( 'get' , None )
exists = False
if get :
logger . debug ( "start" )
for k , v in get . items ( ) :
logger . debug ( f"setting context {k} to $ENV {v}" )
context [ k ] = os . environ [ v ]
logger . info ( f"saved {len(get)} $ENVs to context." )
exists = True
logger . debug ( "done" )
return exists
|
def build_model_classes ( metadata ) :
"""Generate a model class for any models contained in the specified spec file ."""
|
i = importlib . import_module ( metadata )
env = get_jinja_env ( )
model_template = env . get_template ( 'model.py.jinja2' )
for model in i . models :
with open ( model_path ( model . name . lower ( ) ) , 'w' ) as t :
t . write ( model_template . render ( model_md = model ) )
|
def forward ( self , layer_input : torch . Tensor , layer_output : torch . Tensor , layer_index : int = None , total_layers : int = None ) -> torch . Tensor : # pylint : disable = arguments - differ
"""Apply dropout to this layer , for this whole mini - batch .
dropout _ prob = layer _ index / total _ layers * undecayed _ dropout _ prob if layer _ idx and
total _ layers is specified , else it will use the undecayed _ dropout _ prob directly .
Parameters
layer _ input ` ` torch . FloatTensor ` ` required
The input tensor of this layer .
layer _ output ` ` torch . FloatTensor ` ` required
The output tensor of this layer , with the same shape as the layer _ input .
layer _ index ` ` int ` `
The layer index , starting from 1 . This is used to calcuate the dropout prob
together with the ` total _ layers ` parameter .
total _ layers ` ` int ` `
The total number of layers .
Returns
output : ` ` torch . FloatTensor ` `
A tensor with the same shape as ` layer _ input ` and ` layer _ output ` ."""
|
if layer_index is not None and total_layers is not None :
dropout_prob = 1.0 * self . undecayed_dropout_prob * layer_index / total_layers
else :
dropout_prob = 1.0 * self . undecayed_dropout_prob
if self . training :
if torch . rand ( 1 ) < dropout_prob :
return layer_input
else :
return layer_output + layer_input
else :
return ( 1 - dropout_prob ) * layer_output + layer_input
|
def clone ( self ) : # TODO : check docstring
"""Returns a deep copy of self
Function clones :
* allocation
* nodes
Returns
type
Deep copy of self"""
|
new_node = self . __class__ ( self . _name , self . _demand )
return new_node
|
def enable ( app_id , enabled = True ) :
'''Enable or disable an existing assistive access application .
app _ id
The bundle ID or command to set assistive access status .
enabled
Sets enabled or disabled status . Default is ` ` True ` ` .
CLI Example :
. . code - block : : bash
salt ' * ' assistive . enable / usr / bin / osascript
salt ' * ' assistive . enable com . smileonmymac . textexpander enabled = False'''
|
enable_str = '1' if enabled else '0'
for a in _get_assistive_access ( ) :
if app_id == a [ 0 ] :
cmd = 'sqlite3 "/Library/Application Support/com.apple.TCC/TCC.db" ' '"UPDATE access SET allowed=\'{0}\' WHERE client=\'{1}\'"' . format ( enable_str , app_id )
call = __salt__ [ 'cmd.run_all' ] ( cmd , output_loglevel = 'debug' , python_shell = False )
if call [ 'retcode' ] != 0 :
comment = ''
if 'stderr' in call :
comment += call [ 'stderr' ]
if 'stdout' in call :
comment += call [ 'stdout' ]
raise CommandExecutionError ( 'Error enabling app: {0}' . format ( comment ) )
return True
return False
|
def get_serializer_in ( self , * args , ** kwargs ) :
"""Return the serializer instance that should be used for validating and
deserializing input , and for serializing output ."""
|
serializer_class = self . get_serializer_class_in ( )
kwargs [ 'context' ] = self . get_serializer_context ( )
return serializer_class ( * args , ** kwargs )
|
def get_password_data ( name = None , kwargs = None , instance_id = None , call = None , ) :
'''Return password data for a Windows instance .
By default only the encrypted password data will be returned . However , if a
key _ file is passed in , then a decrypted password will also be returned .
Note that the key _ file references the private key that was used to generate
the keypair associated with this instance . This private key will _ not _ be
transmitted to Amazon ; it is only used internally inside of Salt Cloud to
decrypt data _ after _ it has been received from Amazon .
CLI Examples :
. . code - block : : bash
salt - cloud - a get _ password _ data mymachine
salt - cloud - a get _ password _ data mymachine key _ file = / root / ec2key . pem
Note : PKCS1 _ v1_5 was added in PyCrypto 2.5'''
|
if call != 'action' :
raise SaltCloudSystemExit ( 'The get_password_data action must be called with ' '-a or --action.' )
if not instance_id :
instance_id = _get_node ( name ) [ 'instanceId' ]
if kwargs is None :
kwargs = { }
if instance_id is None :
if 'instance_id' in kwargs :
instance_id = kwargs [ 'instance_id' ]
del kwargs [ 'instance_id' ]
params = { 'Action' : 'GetPasswordData' , 'InstanceId' : instance_id }
ret = { }
data = aws . query ( params , return_root = True , location = get_location ( ) , provider = get_provider ( ) , opts = __opts__ , sigver = '4' )
for item in data :
ret [ next ( six . iterkeys ( item ) ) ] = next ( six . itervalues ( item ) )
if not HAS_M2 and not HAS_PYCRYPTO :
return ret
if 'key' not in kwargs :
if 'key_file' in kwargs :
with salt . utils . files . fopen ( kwargs [ 'key_file' ] , 'r' ) as kf_ :
kwargs [ 'key' ] = salt . utils . stringutils . to_unicode ( kf_ . read ( ) )
if 'key' in kwargs :
pwdata = ret . get ( 'passwordData' , None )
if pwdata is not None :
rsa_key = kwargs [ 'key' ]
pwdata = base64 . b64decode ( pwdata )
if HAS_M2 :
key = RSA . load_key_string ( rsa_key . encode ( 'ascii' ) )
password = key . private_decrypt ( pwdata , RSA . pkcs1_padding )
else :
dsize = Crypto . Hash . SHA . digest_size
sentinel = Crypto . Random . new ( ) . read ( 15 + dsize )
key_obj = Crypto . PublicKey . RSA . importKey ( rsa_key )
key_obj = PKCS1_v1_5 . new ( key_obj )
password = key_obj . decrypt ( pwdata , sentinel )
ret [ 'password' ] = salt . utils . stringutils . to_unicode ( password )
return ret
|
def add_values_to_run_set_xml ( self , runSet , cputime , walltime , energy ) :
"""This function adds the result values to the XML representation of a runSet ."""
|
self . add_column_to_xml ( runSet . xml , 'cputime' , cputime )
self . add_column_to_xml ( runSet . xml , 'walltime' , walltime )
energy = intel_cpu_energy . format_energy_results ( energy )
for energy_key , energy_value in energy . items ( ) :
self . add_column_to_xml ( runSet . xml , energy_key , energy_value )
|
def register_bool_option ( cls , name , description = None ) :
"""Register a Boolean switch as state option .
This is equivalent to cls . register _ option ( name , set ( [ bool ] ) , description = description )
: param str name : Name of the state option .
: param str description : The description of this state option .
: return : None"""
|
cls . register_option ( name , { bool } , default = False , description = description )
|
def value_nth_person ( self , n , array , default = 0 ) :
"""Get the value of array for the person whose position in the entity is n .
Note that this position is arbitrary , and that members are not sorted .
If the nth person does not exist , return ` ` default ` ` instead .
The result is a vector which dimension is the number of entities ."""
|
self . members . check_array_compatible_with_entity ( array )
positions = self . members_position
nb_persons_per_entity = self . nb_persons ( )
members_map = self . ordered_members_map
result = self . filled_array ( default , dtype = array . dtype )
# For households that have at least n persons , set the result as the value of criteria for the person for which the position is n .
# The map is needed b / c the order of the nth persons of each household in the persons vector is not necessarily the same than the household order .
result [ nb_persons_per_entity > n ] = array [ members_map ] [ positions [ members_map ] == n ]
return result
|
def _find_elements ( self , result , elements ) :
"""Find interesting elements from XML .
This function tries to only look for specified elements
without parsing the entire XML . The specified elements is better
located near the beginning .
Args :
result : response XML .
elements : a set of interesting element tags .
Returns :
A dict from element tag to element value ."""
|
element_mapping = { }
result = StringIO . StringIO ( result )
for _ , e in ET . iterparse ( result , events = ( 'end' , ) ) :
if not elements :
break
if e . tag in elements :
element_mapping [ e . tag ] = e . text
elements . remove ( e . tag )
return element_mapping
|
def on ( self ) :
"""Send ON command to device ."""
|
on_command = StandardSend ( self . _address , COMMAND_LIGHT_ON_0X11_NONE , 0xff )
self . _send_method ( on_command , self . _on_message_received )
|
def get_child_ids ( self , parent_alias ) :
"""Returns child IDs of the given parent category
: param str parent _ alias : Parent category alias
: rtype : list
: return : a list of child IDs"""
|
self . _cache_init ( )
return self . _cache_get_entry ( self . CACHE_NAME_PARENTS , parent_alias , [ ] )
|
def do_get ( self , params ) :
"""\x1b [1mNAME \x1b [0m
get - Gets the znode ' s value
\x1b [1mSYNOPSIS \x1b [0m
get < path > [ watch ]
\x1b [1mOPTIONS \x1b [0m
* watch : set a ( data ) watch on the path ( default : false )
\x1b [1mEXAMPLES \x1b [0m
> get / foo
bar
# sets a watch
> get / foo true
bar
# trigger the watch
> set / foo ' notbar '
WatchedEvent ( type = ' CHANGED ' , state = ' CONNECTED ' , path = u ' / foo ' )"""
|
watcher = lambda evt : self . show_output ( str ( evt ) )
kwargs = { "watch" : watcher } if params . watch else { }
value , _ = self . _zk . get ( params . path , ** kwargs )
# maybe it ' s compressed ?
if value is not None :
try :
value = zlib . decompress ( value )
except :
pass
self . show_output ( value )
|
def component_for_entity ( self , entity : int , component_type : Type [ C ] ) -> C :
"""Retrieve a Component instance for a specific Entity .
Retrieve a Component instance for a specific Entity . In some cases ,
it may be necessary to access a specific Component instance .
For example : directly modifying a Component to handle user input .
Raises a KeyError if the given Entity and Component do not exist .
: param entity : The Entity ID to retrieve the Component for .
: param component _ type : The Component instance you wish to retrieve .
: return : The Component instance requested for the given Entity ID ."""
|
return self . _entities [ entity ] [ component_type ]
|
def seq_dup_levels_plot ( self ) :
"""Create the HTML for the Sequence Duplication Levels plot"""
|
data = dict ( )
max_dupval = 0
for s_name in self . fastqc_data :
try :
thisdata = { }
for d in self . fastqc_data [ s_name ] [ 'sequence_duplication_levels' ] :
thisdata [ d [ 'duplication_level' ] ] = d [ 'percentage_of_total' ]
max_dupval = max ( max_dupval , d [ 'percentage_of_total' ] )
data [ s_name ] = OrderedDict ( )
for k in self . dup_keys :
try :
data [ s_name ] [ k ] = thisdata [ k ]
except KeyError :
pass
except KeyError :
pass
if len ( data ) == 0 :
log . debug ( 'sequence_length_distribution not found in FastQC reports' )
return None
pconfig = { 'id' : 'fastqc_sequence_duplication_levels_plot' , 'title' : 'FastQC: Sequence Duplication Levels' , 'categories' : True , 'ylab' : '% of Library' , 'xlab' : 'Sequence Duplication Level' , 'ymax' : 100 if max_dupval <= 100.0 else None , 'ymin' : 0 , 'yMinTickInterval' : 0.1 , 'colors' : self . get_status_cols ( 'sequence_duplication_levels' ) , 'tt_label' : '<b>{point.x}</b>: {point.y:.1f}%' , }
self . add_section ( name = 'Sequence Duplication Levels' , anchor = 'fastqc_sequence_duplication_levels' , description = 'The relative level of duplication found for every sequence.' , helptext = '''
From the [FastQC Help](http://www.bioinformatics.babraham.ac.uk/projects/fastqc/Help/3%20Analysis%20Modules/8%20Duplicate%20Sequences.html):
_In a diverse library most sequences will occur only once in the final set.
A low level of duplication may indicate a very high level of coverage of the
target sequence, but a high level of duplication is more likely to indicate
some kind of enrichment bias (eg PCR over amplification). This graph shows
the degree of duplication for every sequence in a library: the relative
number of sequences with different degrees of duplication._
_Only sequences which first appear in the first 100,000 sequences
in each file are analysed. This should be enough to get a good impression
for the duplication levels in the whole file. Each sequence is tracked to
the end of the file to give a representative count of the overall duplication level._
_The duplication detection requires an exact sequence match over the whole length of
the sequence. Any reads over 75bp in length are truncated to 50bp for this analysis._
_In a properly diverse library most sequences should fall into the far left of the
plot in both the red and blue lines. A general level of enrichment, indicating broad
oversequencing in the library will tend to flatten the lines, lowering the low end
and generally raising other categories. More specific enrichments of subsets, or
the presence of low complexity contaminants will tend to produce spikes towards the
right of the plot._
''' , plot = linegraph . plot ( data , pconfig ) )
|
def edit_dataset_metadata ( request , dataset_id = None ) :
"""Renders a template to upload or edit a Dataset .
Most of the heavy lifting is done by add _ dataset ( . . . ) ."""
|
if request . method == 'POST' :
return add_dataset ( request , dataset_id )
elif request . method == 'GET' : # create a blank form
# Edit
if dataset_id :
metadata_form = DatasetUploadForm ( instance = get_object_or_404 ( Dataset , pk = dataset_id ) )
# Upload
else :
metadata_form = DatasetUploadForm ( )
return render ( request , 'datafreezer/upload.html' , { 'fileUploadForm' : metadata_form , } )
|
def user_segment ( self ) :
"""| Comment : The id of the user segment to which this section belongs"""
|
if self . api and self . user_segment_id :
return self . api . _get_user_segment ( self . user_segment_id )
|
def decorator_handle ( tokens ) :
"""Process decorators ."""
|
defs = [ ]
decorates = [ ]
for i , tok in enumerate ( tokens ) :
if "simple" in tok and len ( tok ) == 1 :
decorates . append ( "@" + tok [ 0 ] )
elif "test" in tok and len ( tok ) == 1 :
varname = decorator_var + "_" + str ( i )
defs . append ( varname + " = " + tok [ 0 ] )
decorates . append ( "@" + varname )
else :
raise CoconutInternalException ( "invalid decorator tokens" , tok )
return "\n" . join ( defs + decorates ) + "\n"
|
def terminal ( self , out = None , border = None ) :
"""Serializes the sequence of QR Codes as ANSI escape code .
See : py : meth : ` QRCode . terminal ( ) ` for details ."""
|
for qrcode in self :
qrcode . terminal ( out = out , border = border )
|
def is_topic_head ( self ) :
"""Returns ` ` True ` ` if the post is the first post of the topic ."""
|
return self . topic . first_post . id == self . id if self . topic . first_post else False
|
def set_timer ( self , duration ) :
"""Setup the next alarm to fire and then wait for it to fire .
: param int duration : How long to sleep"""
|
# Make sure that the application is not shutting down before sleeping
if self . is_shutting_down :
LOGGER . debug ( 'Not sleeping, application is trying to shutdown' )
return
# Set the signal timer
signal . setitimer ( signal . ITIMER_REAL , duration , 0 )
|
def purge_stale_services ( argv = None ) :
"""Command - line utility to periodically purge stale entries from the " services " table .
It is designed to be used in conjunction with cron ."""
|
argv = argv or sys . argv
arg_parser = argparse . ArgumentParser ( prog = os . path . basename ( argv [ 0 ] ) , description = ( 'doublethink-purge-stale-services: utility to periodically ' 'purge stale entries from the "services" table.' ) )
arg_parser . add_argument ( "-d" , "--rethinkdb-db" , required = True , dest = "database" , help = "A RethinkDB database containing a 'services' table" )
arg_parser . add_argument ( "-s" , "--rethinkdb-servers" , metavar = "SERVERS" , dest = "servers" , default = 'localhost' , help = "rethinkdb servers, e.g. db0.foo.org,db0.foo.org:38015,db1.foo.org" )
arg_parser . add_argument ( '-v' , '--verbose' , dest = 'log_level' , action = 'store_const' , default = logging . INFO , const = logging . DEBUG , help = ( 'verbose logging' ) )
args = arg_parser . parse_args ( argv [ 1 : ] )
logging . basicConfig ( stream = sys . stdout , level = args . log_level , format = ( '%(asctime)s %(process)d %(levelname)s %(threadName)s ' '%(name)s.%(funcName)s(%(filename)s:%(lineno)d) %(message)s' ) )
args . servers = [ srv . strip ( ) for srv in args . servers . split ( "," ) ]
rethinker = doublethink . Rethinker ( servers = args . servers , db = args . database )
registry = doublethink . services . ServiceRegistry ( rethinker )
registry . purge_stale_services ( )
return 0
|
def fillna ( self , value ) :
"""Returns Index with missing values replaced with value .
Parameters
value : { int , float , bytes , bool }
Scalar value to replace missing values with .
Returns
Index
With missing values replaced ."""
|
if not is_scalar ( value ) :
raise TypeError ( 'Value to replace with is not a valid scalar' )
return Index ( weld_replace ( self . weld_expr , self . weld_type , default_missing_data_literal ( self . weld_type ) , value ) , self . dtype , self . name )
|
def value_or_default ( self , value ) :
'''Returns the given value or the specified default value for this
field'''
|
if value is None :
if callable ( self . default ) :
return self . default ( )
else :
return self . default
return value
|
def _create_html_tasklist ( self , taskpaperDocPath ) :
"""* create an html version of the single taskpaper index task list *
* * Key Arguments : * *
- ` ` taskpaperDocPath ` ` - - path to the task index taskpaper doc
* * Return : * *
- ` ` htmlFilePath ` ` - - the path to the output HTML file"""
|
self . log . info ( 'starting the ``_create_html_tasklist`` method' )
if self . editorialRootPath :
return
title = self . workspaceName
content = "<h1>%(title)s tasks</h1><ul>\n" % locals ( )
# OPEN TASKPAPER FILE
doc = document ( taskpaperDocPath )
docTasks = doc . tasks
for task in docTasks :
tagString = " " . join ( task . tags )
tagString2 = ""
for t in task . tags :
t1 = t . split ( "(" ) [ 0 ]
tagString2 += """ <span class="%(t1)s tag">@%(t)s</span>""" % locals ( )
notes = task . notes
filepath = notes [ 0 ] . title . split ( " > " ) [ 0 ]
basename = os . path . basename ( filepath ) . replace ( ".taskpaper" , "" ) . replace ( "-" , " " )
filepath = "dryx-open://" + filepath
taskTitle = u"""<a href="%(filepath)s"><span class="bullet %(tagString)s">◉</span> </a>""" % locals ( ) + task . title [ 2 : ] + tagString2
if len ( notes [ 0 ] . title . split ( " > " ) ) > 1 :
parent = notes [ 0 ] . title . split ( " > " ) [ 1 ]
parent = """<span class="parent">%(basename)s > %(parent)s</span></br>\n""" % locals ( )
else :
parent = """<span class="parent">%(basename)s</span></br>\n""" % locals ( )
taskContent = """</span>\n\t\t</br><span class="notes">""" . join ( task . to_string ( title = False , indentLevel = 0 ) . split ( "\n" ) [ 1 : ] )
if len ( taskContent ) :
taskContent = """\n\t<br><span class="notes">""" + taskContent + """\n\t</span>"""
else :
taskContent = ""
htmlTask = """<li class="XXX">%(parent)s%(taskTitle)s%(taskContent)s</li>\n""" % locals ( )
content += htmlTask
content += "</ul>"
htmlFilePath = taskpaperDocPath . replace ( ".taskpaper" , ".html" )
try :
self . log . debug ( "attempting to open the file %s" % ( htmlFilePath , ) )
writeFile = codecs . open ( htmlFilePath , encoding = 'utf-8' , mode = 'w' )
except IOError , e :
message = 'could not open the file %s' % ( htmlFilePath , )
self . log . critical ( message )
raise IOError ( message )
writeFile . write ( content )
writeFile . close ( )
self . log . info ( 'completed the ``_create_html_tasklist`` method' )
return htmlFilePath
|
def get_value ( self , variable = None ) :
"""Gets given environment variable value .
: param variable : Variable to retrieve value .
: type variable : unicode
: return : Variable value .
: rtype : unicode
: note : If the * * variable * * argument is not given the first * * self . _ _ variables * * attribute value will be returned ."""
|
if variable :
self . get_values ( variable )
return self . __variables [ variable ]
else :
self . get_values ( )
return foundations . common . get_first_item ( self . __variables . values ( ) )
|
def pause_point ( self , msg = 'SHUTIT PAUSE POINT' , print_input = True , resize = True , color = '32' , default_msg = None , interact = False , wait = - 1 ) :
"""Inserts a pause in the build session , which allows the user to try
things out before continuing . Ignored if we are not in an interactive
mode .
Designed to help debug the build , or drop to on failure so the
situation can be debugged .
@ param msg : Message to display to user on pause point .
@ param print _ input : Whether to take input at this point ( i . e . interact ) , or
simply pause pending any input .
Default : True
@ param resize : If True , try to resize terminal .
Default : False
@ param color : Color to print message ( typically 31 for red , 32 for green )
@ param default _ msg : Whether to print the standard blurb
@ param interact : Interact without mediation , and set up environment .
@ param wait : Wait a few seconds rather than for input ( for video mode )
@ type msg : string
@ type print _ input : boolean
@ type resize : boolean
@ type wait : decimal
@ return : True if pause point handled ok , else false"""
|
shutit = self . shutit
# Try and stop user being ' clever ' if we are in an exam and not in debug
if shutit . build [ 'exam' ] and shutit . loglevel not in ( 'DEBUG' , ) :
self . send ( ShutItSendSpec ( self , send = ' command alias exit=/bin/true && command alias logout=/bin/true && command alias kill=/bin/true && command alias alias=/bin/true' , echo = False , record_command = False , ignore_background = True ) )
# Flush history before we ' exit ' the current session .
# THIS CAUSES BUGS IF WE ARE NOT IN A SHELL . . . COMMENTING OUT
# IF THE DEFAULT PEXPECT = = THE CURRENCT EXPECTED , THEN OK , gnuplot in shutit - scripts with walkthrough = True is a good test
# Errors seen when check _ exit = True
if self . in_shell :
self . send ( ShutItSendSpec ( self , send = ' set +m && { : $(history -a) & } 2>/dev/null' , check_exit = False , echo = False , record_command = False , ignore_background = True ) )
if print_input : # Do not resize if we are in video mode ( ie wait > 0)
if resize and wait < 0 : # It is possible we do not have distro set yet , so wrap in try / catch
try :
assert not self . sendline ( ShutItSendSpec ( self , send = '' , echo = False , ignore_background = True ) ) , shutit_util . print_debug ( )
except Exception :
pass
if default_msg is None :
if not shutit . build [ 'video' ] and not shutit . build [ 'training' ] and not shutit . build [ 'exam' ] and not shutit . build [ 'walkthrough' ] and self . shutit . loglevel not in ( 'DEBUG' , ) :
pp_msg = '\r\nYou now have a standard shell.'
if not interact :
pp_msg += '\r\nHit CTRL and then ] at the same time to continue ShutIt run, CTRL-q to quit.'
if shutit . build [ 'delivery' ] == 'docker' :
pp_msg += '\r\nHit CTRL and u to save the state to a docker image'
shutit . log ( shutit_util . colorise ( color , '\r\n' + 80 * '=' + '\r\n' + msg + '\r\n' + 80 * '=' + '\r\n' + pp_msg ) , transient = True , level = logging . CRITICAL )
else :
shutit . log ( '\r\n' + ( shutit_util . colorise ( color , msg ) ) , transient = True , level = logging . critical )
else :
shutit . log ( shutit_util . colorise ( color , msg ) + '\r\n' + default_msg + '\r\n' , transient = True , level = logging . CRITICAL )
oldlog = self . pexpect_child . logfile
self . pexpect_child . logfile = None
if wait > 0 :
time . sleep ( wait )
else : # Re - set the window size to match the original window .
# TODO : sigwinch . Line assumes no change .
self . pexpect_child . setwinsize ( shutit_global . shutit_global_object . root_window_size [ 0 ] , shutit_global . shutit_global_object . root_window_size [ 1 ] )
# TODO : handle exams better ?
self . expect ( '.*' )
if not shutit . build [ 'exam' ] and self . shutit . loglevel not in ( 'DEBUG' , ) :
if self . in_shell : # Give them a ' normal ' shell .
assert not self . sendline ( ShutItSendSpec ( self , send = ' bash' , echo = False , ignore_background = True ) ) , shutit_util . print_debug ( )
self . expect ( '.*' )
else :
shutit . log ( 'Cannot create subshell, as not in a shell.' , level = logging . DEBUG )
if interact :
self . pexpect_child . interact ( )
try : # shutit _ global . shutit _ global _ object . shutit _ print ( ' pre interact ' )
if shutit_global . shutit_global_object . ispy3 : # For some reason interact barfs when we use _ pause _ input _ filter , so drop it for PY3 : https : / / github . com / pexpect / pexpect / blob / master / pexpect / pty _ spawn . py # L819
self . pexpect_child . interact ( )
else :
self . pexpect_child . interact ( input_filter = self . _pause_input_filter )
# shutit _ global . shutit _ global _ object . shutit _ print ( ' post interact ' )
self . handle_pause_point_signals ( )
# shutit _ global . shutit _ global _ object . shutit _ print ( ' post handle _ pause _ point _ signals ' )
except Exception as e :
shutit . fail ( 'Terminating ShutIt within pause point.\r\n' + str ( e ) )
# pragma : no cover
if not shutit . build [ 'exam' ] and self . shutit . loglevel not in ( 'DEBUG' , ) :
if self . in_shell :
assert not self . send ( ShutItSendSpec ( self , send = ' exit' , check_exit = False , echo = False , ignore_background = True ) ) , shutit_util . print_debug ( )
else :
shutit . log ( 'Cannot exit as not in shell' , level = logging . DEBUG )
self . pexpect_child . logfile = oldlog
else :
pass
shutit . build [ 'ctrlc_stop' ] = False
return True
|
def _set_collector ( self , v , load = False ) :
"""Setter method for collector , mapped from YANG variable / telemetry / collector ( list )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ collector is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ collector ( ) directly ."""
|
if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = YANGListType ( "collector_name" , collector . collector , yang_name = "collector" , rest_name = "collector" , parent = self , is_container = 'list' , user_ordered = False , path_helper = self . _path_helper , yang_keys = 'collector-name' , extensions = { u'tailf-common' : { u'cli-full-command' : None , u'cli-suppress-list-no' : None , u'callpoint' : u'TelemetryCollector' , u'info' : u'Telemetry collector Configuration' } } ) , is_container = 'list' , yang_name = "collector" , rest_name = "collector" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'cli-full-command' : None , u'cli-suppress-list-no' : None , u'callpoint' : u'TelemetryCollector' , u'info' : u'Telemetry collector Configuration' } } , namespace = 'urn:brocade.com:mgmt:brocade-telemetry' , defining_module = 'brocade-telemetry' , yang_type = 'list' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """collector must be of a type compatible with list""" , 'defined-type' : "list" , 'generated-type' : """YANGDynClass(base=YANGListType("collector_name",collector.collector, yang_name="collector", rest_name="collector", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='collector-name', extensions={u'tailf-common': {u'cli-full-command': None, u'cli-suppress-list-no': None, u'callpoint': u'TelemetryCollector', u'info': u'Telemetry collector Configuration'}}), is_container='list', yang_name="collector", rest_name="collector", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'cli-suppress-list-no': None, u'callpoint': u'TelemetryCollector', u'info': u'Telemetry collector Configuration'}}, namespace='urn:brocade.com:mgmt:brocade-telemetry', defining_module='brocade-telemetry', yang_type='list', is_config=True)""" , } )
self . __collector = t
if hasattr ( self , '_set' ) :
self . _set ( )
|
def setup_step_out ( self , frame ) :
"""Setup debugger for a " stepOut " """
|
self . frame_calling = None
self . frame_stop = None
self . frame_return = frame . f_back
self . frame_suspend = False
self . pending_stop = True
return
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.