signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def mutate ( self , row ) :
"""Add a row to the batch . If the current batch meets one of the size
limits , the batch is sent synchronously .
For example :
. . literalinclude : : snippets . py
: start - after : [ START bigtable _ batcher _ mutate ]
: end - before : [ END bigtable _ batcher _ mutate ]
: type row : class
: param row : class : ` ~ google . cloud . bigtable . row . DirectRow ` .
: raises : One of the following :
* : exc : ` ~ . table . _ BigtableRetryableError ` if any
row returned a transient error .
* : exc : ` RuntimeError ` if the number of responses doesn ' t
match the number of rows that were retried
* : exc : ` . batcher . MaxMutationsError ` if any row exceeds max
mutations count ."""
|
mutation_count = len ( row . _get_mutations ( ) )
if mutation_count > MAX_MUTATIONS :
raise MaxMutationsError ( "The row key {} exceeds the number of mutations {}." . format ( row . row_key , mutation_count ) )
if ( self . total_mutation_count + mutation_count ) >= MAX_MUTATIONS :
self . flush ( )
self . rows . append ( row )
self . total_mutation_count += mutation_count
self . total_size += row . get_mutations_size ( )
if self . total_size >= self . max_row_bytes or len ( self . rows ) >= self . flush_count :
self . flush ( )
|
def _raise_or_append_exception ( self ) :
"""The connection is presumably dead and we need to raise or
append an exception .
If we have a list for exceptions , append the exception and let
the connection handle it , if not raise the exception here .
: return :"""
|
message = ( 'Connection dead, no heartbeat or data received in >= ' '%ds' % ( self . _interval * 2 ) )
why = AMQPConnectionError ( message )
if self . _exceptions is None :
raise why
self . _exceptions . append ( why )
|
def register ( self , classes = [ ] ) :
"""Registers new plugins .
The registration only creates a new entry for a plugin inside the _ classes dictionary .
It does not activate or even initialise the plugin .
A plugin must be a class , which inherits directly or indirectly from GwBasePattern .
: param classes : List of plugin classes
: type classes : list"""
|
if not isinstance ( classes , list ) :
raise AttributeError ( "plugins must be a list, not %s." % type ( classes ) )
plugin_registered = [ ]
for plugin_class in classes :
plugin_name = plugin_class . __name__
self . register_class ( plugin_class , plugin_name )
self . _log . debug ( "Plugin %s registered" % plugin_name )
plugin_registered . append ( plugin_name )
self . _log . info ( "Plugins registered: %s" % ", " . join ( plugin_registered ) )
|
def delete_posix_account ( self , name , retry = google . api_core . gapic_v1 . method . DEFAULT , timeout = google . api_core . gapic_v1 . method . DEFAULT , metadata = None , ) :
"""Deletes a POSIX account .
Example :
> > > from google . cloud import oslogin _ v1
> > > client = oslogin _ v1 . OsLoginServiceClient ( )
> > > name = client . project _ path ( ' [ USER ] ' , ' [ PROJECT ] ' )
> > > client . delete _ posix _ account ( name )
Args :
name ( str ) : A reference to the POSIX account to update . POSIX accounts are
identified by the project ID they are associated with . A reference to
the POSIX account is in format ` ` users / { user } / projects / { project } ` ` .
retry ( Optional [ google . api _ core . retry . Retry ] ) : A retry object used
to retry requests . If ` ` None ` ` is specified , requests will not
be retried .
timeout ( Optional [ float ] ) : The amount of time , in seconds , to wait
for the request to complete . Note that if ` ` retry ` ` is
specified , the timeout applies to each individual attempt .
metadata ( Optional [ Sequence [ Tuple [ str , str ] ] ] ) : Additional metadata
that is provided to the method .
Raises :
google . api _ core . exceptions . GoogleAPICallError : If the request
failed for any reason .
google . api _ core . exceptions . RetryError : If the request failed due
to a retryable error and retry attempts failed .
ValueError : If the parameters are invalid ."""
|
# Wrap the transport method to add retry and timeout logic .
if "delete_posix_account" not in self . _inner_api_calls :
self . _inner_api_calls [ "delete_posix_account" ] = google . api_core . gapic_v1 . method . wrap_method ( self . transport . delete_posix_account , default_retry = self . _method_configs [ "DeletePosixAccount" ] . retry , default_timeout = self . _method_configs [ "DeletePosixAccount" ] . timeout , client_info = self . _client_info , )
request = oslogin_pb2 . DeletePosixAccountRequest ( name = name )
self . _inner_api_calls [ "delete_posix_account" ] ( request , retry = retry , timeout = timeout , metadata = metadata )
|
def get_models ( self , columns = None ) :
"""Get the hydrated models without eager loading .
: param columns : The columns to get
: type columns : list
: return : A list of models
: rtype : list"""
|
results = self . _query . get ( columns )
connection = self . _model . get_connection_name ( )
return self . _model . hydrate ( results , connection ) . all ( )
|
def opened ( self , block_identifier : BlockSpecification ) -> bool :
"""Returns if the channel is opened ."""
|
return self . token_network . channel_is_opened ( participant1 = self . participant1 , participant2 = self . participant2 , block_identifier = block_identifier , channel_identifier = self . channel_identifier , )
|
def clear_dead_threads ( self ) :
"""Remove Thread objects from the snapshot
referring to threads no longer running ."""
|
for tid in self . get_thread_ids ( ) :
aThread = self . get_thread ( tid )
if not aThread . is_alive ( ) :
self . _del_thread ( aThread )
|
def decode_iter ( data , codec_options = DEFAULT_CODEC_OPTIONS ) :
"""Decode BSON data to multiple documents as a generator .
Works similarly to the decode _ all function , but yields one document at a
time .
` data ` must be a string of concatenated , valid , BSON - encoded
documents .
: Parameters :
- ` data ` : BSON data
- ` codec _ options ` ( optional ) : An instance of
: class : ` ~ bson . codec _ options . CodecOptions ` .
. . versionchanged : : 3.0
Replaced ` as _ class ` , ` tz _ aware ` , and ` uuid _ subtype ` options with
` codec _ options ` .
. . versionadded : : 2.8"""
|
if not isinstance ( codec_options , CodecOptions ) :
raise _CODEC_OPTIONS_TYPE_ERROR
position = 0
end = len ( data ) - 1
while position < end :
obj_size = _UNPACK_INT ( data [ position : position + 4 ] ) [ 0 ]
elements = data [ position : position + obj_size ]
position += obj_size
yield _bson_to_dict ( elements , codec_options )
|
def process_checkpoint ( self , msg : Checkpoint , sender : str ) -> bool :
"""Process checkpoint messages
: return : whether processed ( True ) or stashed ( False )"""
|
self . logger . info ( '{} processing checkpoint {} from {}' . format ( self , msg , sender ) )
result , reason = self . validator . validate_checkpoint_msg ( msg )
if result == DISCARD :
self . discard ( msg , "{} discard message {} from {} " "with the reason: {}" . format ( self , msg , sender , reason ) , self . logger . trace )
elif result == PROCESS :
self . _do_process_checkpoint ( msg , sender )
else :
self . logger . debug ( "{} stashing checkpoint message {} with " "the reason: {}" . format ( self , msg , reason ) )
self . stasher . stash ( ( msg , sender ) , result )
return False
return True
|
def import_teamocil ( sconf ) :
"""Return tmuxp config from a ` teamocil ` _ yaml config .
. . _ teamocil : https : / / github . com / remiprev / teamocil
Parameters
sconf : dict
python dict for session configuration
Notes
Todos :
- change ' root ' to a cd or start _ directory
- width in pane - > main - pain - width
- with _ env _ var
- clear
- cmd _ separator"""
|
tmuxp_config = { }
if 'session' in sconf :
sconf = sconf [ 'session' ]
if 'name' in sconf :
tmuxp_config [ 'session_name' ] = sconf [ 'name' ]
else :
tmuxp_config [ 'session_name' ] = None
if 'root' in sconf :
tmuxp_config [ 'start_directory' ] = sconf . pop ( 'root' )
tmuxp_config [ 'windows' ] = [ ]
for w in sconf [ 'windows' ] :
windowdict = { 'window_name' : w [ 'name' ] }
if 'clear' in w :
windowdict [ 'clear' ] = w [ 'clear' ]
if 'filters' in w :
if 'before' in w [ 'filters' ] :
for b in w [ 'filters' ] [ 'before' ] :
windowdict [ 'shell_command_before' ] = w [ 'filters' ] [ 'before' ]
if 'after' in w [ 'filters' ] :
for b in w [ 'filters' ] [ 'after' ] :
windowdict [ 'shell_command_after' ] = w [ 'filters' ] [ 'after' ]
if 'root' in w :
windowdict [ 'start_directory' ] = w . pop ( 'root' )
if 'splits' in w :
w [ 'panes' ] = w . pop ( 'splits' )
if 'panes' in w :
for p in w [ 'panes' ] :
if 'cmd' in p :
p [ 'shell_command' ] = p . pop ( 'cmd' )
if 'width' in p : # todo support for height / width
p . pop ( 'width' )
windowdict [ 'panes' ] = w [ 'panes' ]
if 'layout' in w :
windowdict [ 'layout' ] = w [ 'layout' ]
tmuxp_config [ 'windows' ] . append ( windowdict )
return tmuxp_config
|
def calculate_eclipses ( M1s , M2s , R1s , R2s , mag1s , mag2s , u11s = 0.394 , u21s = 0.296 , u12s = 0.394 , u22s = 0.296 , Ps = None , period = None , logperkde = RAGHAVAN_LOGPERKDE , incs = None , eccs = None , mininc = None , calc_mininc = True , maxecc = 0.97 , ecc_fn = draw_eccs , band = 'Kepler' , return_probability_only = False , return_indices = True , MAfn = None ) :
"""Returns random eclipse parameters for provided inputs
: param M1s , M2s , R1s , R2s , mag1s , mag2s : ( array - like )
Primary and secondary properties ( mass , radius , magnitude )
: param u11s , u21s , u12s , u22s : ( optional )
Limb darkening parameters ( u11 = u1 for star 1 , u21 = u2 for star 1 , etc . )
: param Ps : ( array - like , optional )
Orbital periods ; same size as ` ` M1s ` ` , etc .
If only a single period is desired , use ` ` period ` ` .
: param period : ( optional )
Orbital period ; use this keyword if only a single period is desired .
: param logperkde : ( optional )
If neither ` ` Ps ` ` nor ` ` period ` ` is provided , then periods will be
randomly generated according to this log - period distribution .
Default is taken from the Raghavan ( 2010 ) period distribution .
: param incs , eccs : ( optional )
Inclinations and eccentricities . If not passed , they will be generated .
Eccentricities will be generated according to ` ` ecc _ fn ` ` ; inclinations
will be randomly generated out to ` ` mininc ` ` .
: param mininc : ( optional )
Minimum inclination to generate . Useful if you want to enhance
efficiency by only generating mostly eclipsing , instead of mostly
non - eclipsing systems . If not provided and ` ` calc _ mininc ` ` is
` ` True ` ` , then this will be calculated based on inputs .
: param calc _ mininc : ( optional )
Whether to calculate ` ` mininc ` ` based on inputs . If truly isotropic
inclinations are desired , set this to ` ` False ` ` .
: param maxecc : ( optional )
Maximum eccentricity to generate .
: param ecc _ fn : ( callable , optional )
Orbital eccentricity generating function . Must return ` ` n ` ` orbital
eccentricities generated according to provided period ( s ) : :
eccs = ecc _ fn ( n , Ps )
Defaults to : func : ` stars . utils . draw _ eccs ` .
: param band : ( optional )
Photometric bandpass in which eclipse is observed .
: param return _ probability _ only : ( optional )
If ` ` True ` ` , then will return only the average eclipse probability
of population .
: param return _ indices : ( optional )
If ` ` True ` ` , returns the indices of the original input arrays
that the output ` ` DataFrame ` ` corresponds to . * * This behavior
will / should be changed to just return a ` ` DataFrame ` ` of the same
length as inputs . . . * *
: param MAfn : ( optional )
: class : ` transit _ basic . MAInterpolationFunction ` object .
If not passed , then one with default parameters will
be created .
: return :
* [ ` ` wany ` ` : indices describing which of the original input
arrays the output ` ` DataFrame ` ` corresponds to .
* ` ` df ` ` : ` ` DataFrame ` ` with the following columns :
` ` [ { band } _ mag _ tot , P , ecc , inc , w , dpri , dsec ,
T14 _ pri , T23 _ pri , T14 _ sec , T23 _ sec , b _ pri ,
b _ sec , { band } _ mag _ 1 , { band } _ mag _ 2 , fluxfrac _ 1,
fluxfrac _ 2 , switched , u1_1 , u2_1 , u1_2 , u2_2 ] ` ` .
* * N . B . that this will be shorter than your input arrays ,
because not everything will eclipse ; this behavior
will likely be changed in the future because it ' s confusing . * *
* ` ` ( prob , dprob ) ` ` Eclipse probability with Poisson uncertainty"""
|
if MAfn is None :
logging . warning ( 'MAInterpolationFunction not passed, so generating one...' )
MAfn = MAInterpolationFunction ( nzs = 200 , nps = 400 , pmin = 0.007 , pmax = 1 / 0.007 )
M1s = np . atleast_1d ( M1s )
M2s = np . atleast_1d ( M2s )
R1s = np . atleast_1d ( R1s )
R2s = np . atleast_1d ( R2s )
nbad = ( np . isnan ( M1s ) | np . isnan ( M2s ) | np . isnan ( R1s ) | np . isnan ( R2s ) ) . sum ( )
if nbad > 0 :
logging . warning ( '{} M1s are nan' . format ( np . isnan ( M1s ) . sum ( ) ) )
logging . warning ( '{} M2s are nan' . format ( np . isnan ( M2s ) . sum ( ) ) )
logging . warning ( '{} R1s are nan' . format ( np . isnan ( R1s ) . sum ( ) ) )
logging . warning ( '{} R2s are nan' . format ( np . isnan ( R2s ) . sum ( ) ) )
mag1s = mag1s * np . ones_like ( M1s )
mag2s = mag2s * np . ones_like ( M1s )
u11s = u11s * np . ones_like ( M1s )
u21s = u21s * np . ones_like ( M1s )
u12s = u12s * np . ones_like ( M1s )
u22s = u22s * np . ones_like ( M1s )
n = np . size ( M1s )
# a bit clunky here , but works .
simPs = False
if period :
Ps = np . ones ( n ) * period
else :
if Ps is None :
Ps = 10 ** ( logperkde . rvs ( n ) )
simPs = True
simeccs = False
if eccs is None :
if not simPs and period is not None :
eccs = ecc_fn ( n , period , maxecc = maxecc )
else :
eccs = ecc_fn ( n , Ps , maxecc = maxecc )
simeccs = True
bad_Ps = np . isnan ( Ps )
if bad_Ps . sum ( ) > 0 :
logging . warning ( '{} nan periods. why?' . format ( bad_Ps . sum ( ) ) )
bad_eccs = np . isnan ( eccs )
if bad_eccs . sum ( ) > 0 :
logging . warning ( '{} nan eccentricities. why?' . format ( bad_eccs . sum ( ) ) )
semimajors = semimajor ( Ps , M1s + M2s ) * AU
# in AU
# check to see if there are simulated instances that are
# too close ; i . e . periastron sends secondary within roche
# lobe of primary
tooclose = withinroche ( semimajors * ( 1 - eccs ) / AU , M1s , R1s , M2s , R2s )
ntooclose = tooclose . sum ( )
tries = 0
maxtries = 5
if simPs :
while ntooclose > 0 :
lastntooclose = ntooclose
Ps [ tooclose ] = 10 ** ( logperkde . rvs ( ntooclose ) )
if simeccs :
eccs [ tooclose ] = draw_eccs ( ntooclose , Ps [ tooclose ] )
semimajors [ tooclose ] = semimajor ( Ps [ tooclose ] , M1s [ tooclose ] + M2s [ tooclose ] ) * AU
tooclose = withinroche ( semimajors * ( 1 - eccs ) / AU , M1s , R1s , M2s , R2s )
ntooclose = tooclose . sum ( )
if ntooclose == lastntooclose : # prevent infinite loop
tries += 1
if tries > maxtries :
logging . info ( '{} binaries are "too close"; gave up trying to fix.' . format ( ntooclose ) )
break
else :
while ntooclose > 0 :
lastntooclose = ntooclose
if simeccs :
eccs [ tooclose ] = draw_eccs ( ntooclose , Ps [ tooclose ] )
semimajors [ tooclose ] = semimajor ( Ps [ tooclose ] , M1s [ tooclose ] + M2s [ tooclose ] ) * AU
# wtooclose = where ( semimajors * ( 1 - eccs ) < 2 * ( R1s + R2s ) * RSUN )
tooclose = withinroche ( semimajors * ( 1 - eccs ) / AU , M1s , R1s , M2s , R2s )
ntooclose = tooclose . sum ( )
if ntooclose == lastntooclose : # prevent infinite loop
tries += 1
if tries > maxtries :
logging . info ( '{} binaries are "too close"; gave up trying to fix.' . format ( ntooclose ) )
break
# randomize inclinations , either full range , or within restricted range
if mininc is None and calc_mininc :
mininc = minimum_inclination ( Ps , M1s , M2s , R1s , R2s )
if incs is None :
if mininc is None :
incs = np . arccos ( np . random . random ( n ) )
# random inclinations in radians
else :
incs = np . arccos ( np . random . random ( n ) * np . cos ( mininc * np . pi / 180 ) )
if mininc :
prob = np . cos ( mininc * np . pi / 180 )
else :
prob = 1
logging . debug ( 'initial probability given mininc starting at {}' . format ( prob ) )
ws = np . random . random ( n ) * 2 * np . pi
switched = ( R2s > R1s )
R_large = switched * R2s + ~ switched * R1s
R_small = switched * R1s + ~ switched * R2s
b_tras = semimajors * np . cos ( incs ) / ( R_large * RSUN ) * ( 1 - eccs ** 2 ) / ( 1 + eccs * np . sin ( ws ) )
b_occs = semimajors * np . cos ( incs ) / ( R_large * RSUN ) * ( 1 - eccs ** 2 ) / ( 1 - eccs * np . sin ( ws ) )
b_tras [ tooclose ] = np . inf
b_occs [ tooclose ] = np . inf
ks = R_small / R_large
Rtots = ( R_small + R_large ) / R_large
tra = ( b_tras < Rtots )
occ = ( b_occs < Rtots )
nany = ( tra | occ ) . sum ( )
peb = nany / float ( n )
prob *= peb
if return_probability_only :
return prob , prob * np . sqrt ( nany ) / n
i = ( tra | occ )
wany = np . where ( i )
P , M1 , M2 , R1 , R2 , mag1 , mag2 , inc , ecc , w = Ps [ i ] , M1s [ i ] , M2s [ i ] , R1s [ i ] , R2s [ i ] , mag1s [ i ] , mag2s [ i ] , incs [ i ] * 180 / np . pi , eccs [ i ] , ws [ i ] * 180 / np . pi
a = semimajors [ i ]
# in cm already
b_tra = b_tras [ i ]
b_occ = b_occs [ i ]
u11 = u11s [ i ]
u21 = u21s [ i ]
u12 = u12s [ i ]
u22 = u22s [ i ]
switched = ( R2 > R1 )
R_large = switched * R2 + ~ switched * R1
R_small = switched * R1 + ~ switched * R2
k = R_small / R_large
# calculate durations
T14_tra = P / np . pi * np . arcsin ( R_large * RSUN / a * np . sqrt ( ( 1 + k ) ** 2 - b_tra ** 2 ) / np . sin ( inc * np . pi / 180 ) ) * np . sqrt ( 1 - ecc ** 2 ) / ( 1 + ecc * np . sin ( w * np . pi / 180 ) )
# *24*60
T23_tra = P / np . pi * np . arcsin ( R_large * RSUN / a * np . sqrt ( ( 1 - k ) ** 2 - b_tra ** 2 ) / np . sin ( inc * np . pi / 180 ) ) * np . sqrt ( 1 - ecc ** 2 ) / ( 1 + ecc * np . sin ( w * np . pi / 180 ) )
# *24*60
T14_occ = P / np . pi * np . arcsin ( R_large * RSUN / a * np . sqrt ( ( 1 + k ) ** 2 - b_occ ** 2 ) / np . sin ( inc * np . pi / 180 ) ) * np . sqrt ( 1 - ecc ** 2 ) / ( 1 - ecc * np . sin ( w * np . pi / 180 ) )
# *24*60
T23_occ = P / np . pi * np . arcsin ( R_large * RSUN / a * np . sqrt ( ( 1 - k ) ** 2 - b_occ ** 2 ) / np . sin ( inc * np . pi / 180 ) ) * np . sqrt ( 1 - ecc ** 2 ) / ( 1 - ecc * np . sin ( w * np . pi / 180 ) )
# *24*60
bad = ( np . isnan ( T14_tra ) & np . isnan ( T14_occ ) )
if bad . sum ( ) > 0 :
logging . error ( 'Something snuck through with no eclipses!' )
logging . error ( 'k: {}' . format ( k [ bad ] ) )
logging . error ( 'b_tra: {}' . format ( b_tra [ bad ] ) )
logging . error ( 'b_occ: {}' . format ( b_occ [ bad ] ) )
logging . error ( 'T14_tra: {}' . format ( T14_tra [ bad ] ) )
logging . error ( 'T14_occ: {}' . format ( T14_occ [ bad ] ) )
logging . error ( 'under sqrt (tra): {}' . format ( ( 1 + k [ bad ] ) ** 2 - b_tra [ bad ] ** 2 ) )
logging . error ( 'under sqrt (occ): {}' . format ( ( 1 + k [ bad ] ) ** 2 - b_occ [ bad ] ** 2 ) )
logging . error ( 'eccsq: {}' . format ( ecc [ bad ] ** 2 ) )
logging . error ( 'a in Rsun: {}' . format ( a [ bad ] / RSUN ) )
logging . error ( 'R_large: {}' . format ( R_large [ bad ] ) )
logging . error ( 'R_small: {}' . format ( R_small [ bad ] ) )
logging . error ( 'P: {}' . format ( P [ bad ] ) )
logging . error ( 'total M: {}' . format ( M1 [ bad ] + M2 [ bad ] ) )
T14_tra [ ( np . isnan ( T14_tra ) ) ] = 0
T23_tra [ ( np . isnan ( T23_tra ) ) ] = 0
T14_occ [ ( np . isnan ( T14_occ ) ) ] = 0
T23_occ [ ( np . isnan ( T23_occ ) ) ] = 0
# calling mandel - agol
ftra = MAfn ( k , b_tra , u11 , u21 )
focc = MAfn ( 1 / k , b_occ / k , u12 , u22 )
# fix those with k or 1 / k out of range of MAFN . . . . or do it in MAfn eventually ?
wtrabad = np . where ( ( k < MAfn . pmin ) | ( k > MAfn . pmax ) )
woccbad = np . where ( ( 1 / k < MAfn . pmin ) | ( 1 / k > MAfn . pmax ) )
for ind in wtrabad [ 0 ] :
ftra [ ind ] = occultquad ( b_tra [ ind ] , u11 [ ind ] , u21 [ ind ] , k [ ind ] )
for ind in woccbad [ 0 ] :
focc [ ind ] = occultquad ( b_occ [ ind ] / k [ ind ] , u12 [ ind ] , u22 [ ind ] , 1 / k [ ind ] )
F1 = 10 ** ( - 0.4 * mag1 ) + switched * 10 ** ( - 0.4 * mag2 )
F2 = 10 ** ( - 0.4 * mag2 ) + switched * 10 ** ( - 0.4 * mag1 )
dtra = 1 - ( F2 + F1 * ftra ) / ( F1 + F2 )
docc = 1 - ( F1 + F2 * focc ) / ( F1 + F2 )
totmag = - 2.5 * np . log10 ( F1 + F2 )
# wswitched = where ( switched )
dtra [ switched ] , docc [ switched ] = ( docc [ switched ] , dtra [ switched ] )
T14_tra [ switched ] , T14_occ [ switched ] = ( T14_occ [ switched ] , T14_tra [ switched ] )
T23_tra [ switched ] , T23_occ [ switched ] = ( T23_occ [ switched ] , T23_tra [ switched ] )
b_tra [ switched ] , b_occ [ switched ] = ( b_occ [ switched ] , b_tra [ switched ] )
# mag1 [ wswitched ] , mag2 [ wswitched ] = ( mag2 [ wswitched ] , mag1 [ wswitched ] )
F1 [ switched ] , F2 [ switched ] = ( F2 [ switched ] , F1 [ switched ] )
u11 [ switched ] , u12 [ switched ] = ( u12 [ switched ] , u11 [ switched ] )
u21 [ switched ] , u22 [ switched ] = ( u22 [ switched ] , u21 [ switched ] )
dtra [ ( np . isnan ( dtra ) ) ] = 0
docc [ ( np . isnan ( docc ) ) ] = 0
if np . any ( np . isnan ( ecc ) ) :
logging . warning ( '{} nans in eccentricity. why?' . format ( np . isnan ( ecc ) . sum ( ) ) )
df = pd . DataFrame ( { '{}_mag_tot' . format ( band ) : totmag , 'P' : P , 'ecc' : ecc , 'inc' : inc , 'w' : w , 'dpri' : dtra , 'dsec' : docc , 'T14_pri' : T14_tra , 'T23_pri' : T23_tra , 'T14_sec' : T14_occ , 'T23_sec' : T23_occ , 'b_pri' : b_tra , 'b_sec' : b_occ , '{}_mag_1' . format ( band ) : mag1 , '{}_mag_2' . format ( band ) : mag2 , 'fluxfrac_1' : F1 / ( F1 + F2 ) , 'fluxfrac_2' : F2 / ( F1 + F2 ) , 'switched' : switched , 'u1_1' : u11 , 'u2_1' : u21 , 'u1_2' : u12 , 'u2_2' : u22 } )
df . reset_index ( inplace = True )
logging . debug ( 'final prob: {}' . format ( prob ) )
if return_indices :
return wany , df , ( prob , prob * np . sqrt ( nany ) / n )
else :
return df , ( prob , prob * np . sqrt ( nany ) / n )
|
def all ( self ) :
"""Returns list with vids of all indexed partitions ."""
|
partitions = [ ]
query = text ( """
SELECT dataset_vid, vid
FROM partition_index;""" )
for result in self . backend . library . database . connection . execute ( query ) :
dataset_vid , vid = result
partitions . append ( PartitionSearchResult ( dataset_vid = dataset_vid , vid = vid , score = 1 ) )
return partitions
|
def recordWidget ( xparent , widget ) :
"""Records the inputed widget to the parent profile .
: param xparent | < xml . etree . Element >
widget | < QWidget >"""
|
# record a splitter
if isinstance ( widget , XSplitter ) :
xwidget = ElementTree . SubElement ( xparent , 'split' )
if ( widget . orientation ( ) == Qt . Horizontal ) :
xwidget . set ( 'orient' , 'horizontal' )
else :
xwidget . set ( 'orient' , 'vertical' )
xwidget . set ( 'state' , nativestring ( widget . saveState ( ) . toBase64 ( ) ) )
# record sub - widgets
for i in range ( widget . count ( ) ) :
XViewProfile . recordWidget ( xwidget , widget . widget ( i ) )
# record a view panel
elif isinstance ( widget , XViewPanel ) :
xwidget = ElementTree . SubElement ( xparent , 'panel' )
xwidget . set ( 'current' , nativestring ( widget . currentIndex ( ) ) )
xwidget . set ( 'hideTabs' , nativestring ( widget . hideTabsWhenLocked ( ) ) )
for i in range ( widget . count ( ) ) :
XViewProfile . recordWidget ( xwidget , widget . widget ( i ) )
# record a view
elif widget is not None :
xwidget = ElementTree . SubElement ( xparent , 'view' )
xwidget . set ( 'name' , nativestring ( widget . objectName ( ) ) )
xwidget . set ( 'title' , nativestring ( widget . windowTitle ( ) ) )
xwidget . set ( 'type' , nativestring ( widget . viewTypeName ( ) ) )
xwidget . set ( 'group' , nativestring ( widget . viewingGroup ( ) ) )
# store that this was the current widget
if widget . isCurrent ( ) :
xwidget . set ( 'current' , 'True' )
widget . saveXml ( xwidget )
|
def construct_time_based_gas_price_strategy ( max_wait_seconds , sample_size = 120 , probability = 98 ) :
"""A gas pricing strategy that uses recently mined block data to derive a gas
price for which a transaction is likely to be mined within X seconds with
probability P .
: param max _ wait _ seconds : The desired maxiumum number of seconds the
transaction should take to mine .
: param sample _ size : The number of recent blocks to sample
: param probability : An integer representation of the desired probability
that the transaction will be mined within ` ` max _ wait _ seconds ` ` . 0 means 0%
and 100 means 100 % ."""
|
def time_based_gas_price_strategy ( web3 , transaction_params ) :
avg_block_time = _get_avg_block_time ( web3 , sample_size = sample_size )
wait_blocks = int ( math . ceil ( max_wait_seconds / avg_block_time ) )
raw_miner_data = _get_raw_miner_data ( web3 , sample_size = sample_size )
miner_data = _aggregate_miner_data ( raw_miner_data )
probabilities = _compute_probabilities ( miner_data , wait_blocks = wait_blocks , sample_size = sample_size , )
gas_price = _compute_gas_price ( probabilities , probability / 100 )
return gas_price
return time_based_gas_price_strategy
|
def tensor_markov ( * args ) :
"""Computes the product of two independent markov chains .
: param m1 : a tuple containing the nodes and the transition matrix of the first chain
: param m2 : a tuple containing the nodes and the transition matrix of the second chain
: return : a tuple containing the nodes and the transition matrix of the product chain"""
|
if len ( args ) > 2 :
m1 = args [ 0 ]
m2 = args [ 1 ]
tail = args [ 2 : ]
prod = tensor_markov ( m1 , m2 )
return tensor_markov ( prod , tail )
elif len ( args ) == 2 :
m1 , m2 = args
n1 , t1 = m1
n2 , t2 = m2
n1 = np . array ( n1 , dtype = float )
n2 = np . array ( n2 , dtype = float )
t1 = np . array ( t1 , dtype = float )
t2 = np . array ( t2 , dtype = float )
assert ( n1 . shape [ 0 ] == t1 . shape [ 0 ] == t1 . shape [ 1 ] )
assert ( n2 . shape [ 0 ] == t2 . shape [ 0 ] == t2 . shape [ 1 ] )
t = np . kron ( t1 , t2 )
p = t1 . shape [ 0 ]
q = t2 . shape [ 0 ]
np . tile ( n2 , ( 1 , p ) )
# n = np . row _ stack ( [
# np . repeat ( n1 , q , axis = 1 ) ,
# np . tile ( n2 , ( 1 , p ) )
n = np . column_stack ( [ np . repeat ( n1 , q , axis = 0 ) , np . tile ( n2 , ( p , 1 ) ) ] )
return [ n , t ]
else :
raise Exception ( "Incorrect number of arguments. Expected at least 2. Found {}." . format ( len ( args ) ) )
|
def reshape_1d ( df ) :
"""If parameter is 1D row vector then convert it into 2D matrix ."""
|
shape = df . shape
if len ( shape ) == 1 :
return df . reshape ( shape [ 0 ] , 1 )
else :
return df
|
def get_segment_efforts ( self , segment_id , athlete_id = None , start_date_local = None , end_date_local = None , limit = None ) :
"""Gets all efforts on a particular segment sorted by start _ date _ local
Returns an array of segment effort summary representations sorted by
start _ date _ local ascending or by elapsed _ time if an athlete _ id is
provided .
If no filtering parameters is provided all efforts for the segment
will be returned .
Date range filtering is accomplished using an inclusive start and end time ,
thus start _ date _ local and end _ date _ local must be sent together . For open
ended ranges pick dates significantly in the past or future . The
filtering is done over local time for the segment , so there is no need
for timezone conversion . For example , all efforts on Jan . 1st , 2014
for a segment in San Francisco , CA can be fetched using
2014-01-01T00:00:00Z and 2014-01-01T23:59:59Z .
http : / / strava . github . io / api / v3 / segments / # all _ efforts
: param segment _ id : ID of the segment .
: type segment _ id : param
: int athlete _ id : ( optional ) ID of athlete .
: type athlete _ id : int
: param start _ date _ local : ( optional ) efforts before this date will be excluded .
Either as ISO8601 or datetime object
: type start _ date _ local : datetime . datetime or str
: param end _ date _ local : ( optional ) efforts after this date will be excluded .
Either as ISO8601 or datetime object
: type end _ date _ local : datetime . datetime or str
: param limit : ( optional ) , limit number of efforts .
: type limit : int
: return : An iterator of : class : ` stravalib . model . SegmentEffort ` efforts on a segment .
: rtype : : class : ` BatchedResultsIterator `"""
|
params = { "segment_id" : segment_id }
if athlete_id is not None :
params [ 'athlete_id' ] = athlete_id
if start_date_local :
if isinstance ( start_date_local , six . string_types ) :
start_date_local = arrow . get ( start_date_local ) . naive
params [ "start_date_local" ] = start_date_local . strftime ( "%Y-%m-%dT%H:%M:%SZ" )
if end_date_local :
if isinstance ( end_date_local , six . string_types ) :
end_date_local = arrow . get ( end_date_local ) . naive
params [ "end_date_local" ] = end_date_local . strftime ( "%Y-%m-%dT%H:%M:%SZ" )
if limit is not None :
params [ "limit" ] = limit
result_fetcher = functools . partial ( self . protocol . get , '/segments/{segment_id}/all_efforts' , ** params )
return BatchedResultsIterator ( entity = model . BaseEffort , bind_client = self , result_fetcher = result_fetcher , limit = limit )
|
def get_parsed_args ( self , comp_words ) :
"""gets the parsed args from a patched parser"""
|
active_parsers = self . _patch_argument_parser ( )
parsed_args = argparse . Namespace ( )
self . completing = True
if USING_PYTHON2 : # Python 2 argparse only properly works with byte strings .
comp_words = [ ensure_bytes ( word ) for word in comp_words ]
try :
active_parsers [ 0 ] . parse_known_args ( comp_words , namespace = parsed_args )
except BaseException : # pylint : disable = broad - except
pass
self . completing = False
return parsed_args
|
def nodeInLanguageStem ( _ : Context , n : Node , s : ShExJ . LanguageStem ) -> bool :
"""http : / / shex . io / shex - semantics / # values
* * nodeIn * * : asserts that an RDF node n is equal to an RDF term s or is in a set defined by a
: py : class : ` ShExJ . IriStem ` , : py : class : ` LiteralStem ` or : py : class : ` LanguageStem ` .
The expression ` nodeInLanguageStem ( n , s ) ` is satisfied iff :
# ) ` s ` is a : py : class : ` ShExJ . WildCard ` or
# ) ` n ` is a language - tagged string and fn : starts - with ( ` n . language ` , ` s ` )"""
|
return isinstance ( s , ShExJ . Wildcard ) or ( isinstance ( n , Literal ) and n . language is not None and str ( n . language ) . startswith ( str ( s ) ) )
|
def create_new_account ( data_dir , password , ** geth_kwargs ) :
"""Creates a new Ethereum account on geth .
This is useful for testing when you want to stress
interaction ( transfers ) between Ethereum accounts .
This command communicates with ` ` geth ` ` command over
terminal interaction . It creates keystore folder and new
account there .
This function only works against offline geth processes ,
because geth builds an account cache when starting up .
If geth process is already running you can create new
accounts using
` web3 . personal . newAccount ( )
< https : / / github . com / ethereum / go - ethereum / wiki / JavaScript - Console # personalnewaccount > _ `
RPC API .
Example py . test fixture for tests :
. . code - block : : python
import os
from geth . wrapper import DEFAULT _ PASSWORD _ PATH
from geth . accounts import create _ new _ account
@ pytest . fixture
def target _ account ( ) - > str :
' ' ' Create a new Ethereum account on a running Geth node .
The account can be used as a withdrawal target for tests .
: return : 0x address of the account
# We store keystore files in the current working directory
# of the test run
data _ dir = os . getcwd ( )
# Use the default password " this - is - not - a - secure - password "
# as supplied in geth / default _ blockchain _ password file .
# The supplied password must be bytes , not string ,
# as we only want ASCII characters and do not want to
# deal encoding problems with passwords
account = create _ new _ account ( data _ dir , DEFAULT _ PASSWORD _ PATH )
return account
: param data _ dir : Geth data fir path - where to keep " keystore " folder
: param password : Path to a file containing the password
for newly created account
: param geth _ kwargs : Extra command line arguments passwrord to geth
: return : Account as 0x prefixed hex string"""
|
if os . path . exists ( password ) :
geth_kwargs [ 'password' ] = password
command , proc = spawn_geth ( dict ( data_dir = data_dir , suffix_args = [ 'account' , 'new' ] , ** geth_kwargs ) )
if os . path . exists ( password ) :
stdoutdata , stderrdata = proc . communicate ( )
else :
stdoutdata , stderrdata = proc . communicate ( b"\n" . join ( ( password , password ) ) )
if proc . returncode :
raise ValueError ( format_error_message ( "Error trying to create a new account" , command , proc . returncode , stdoutdata , stderrdata , ) )
match = account_regex . search ( stdoutdata )
if not match :
raise ValueError ( format_error_message ( "Did not find an address in process output" , command , proc . returncode , stdoutdata , stderrdata , ) )
return b'0x' + match . groups ( ) [ 0 ]
|
def parse_parameter ( self , tup_tree ) :
"""< ! ELEMENT PARAMETER ( QUALIFIER * ) >
< ! ATTLIST PARAMETER
% CIMName ;
% CIMType ; # REQUIRED >"""
|
self . check_node ( tup_tree , 'PARAMETER' , ( 'NAME' , 'TYPE' ) , ( ) , ( 'QUALIFIER' , ) )
attrl = attrs ( tup_tree )
qualifiers = self . list_of_matching ( tup_tree , ( 'QUALIFIER' , ) )
return CIMParameter ( attrl [ 'NAME' ] , type = attrl [ 'TYPE' ] , is_array = False , qualifiers = qualifiers , embedded_object = False )
|
def filter_N_top ( self , inst_rc , N_top , rank_type = 'sum' ) :
'''Filter the matrix rows or columns based on sum / variance , and only keep the top'''
|
inst_df = self . dat_to_df ( )
inst_df = run_filter . filter_N_top ( inst_rc , inst_df , N_top , rank_type )
self . df_to_dat ( inst_df )
|
def clear ( self ) :
"""Brutely clears the key value store for keys with THUMBNAIL _ KEY _ PREFIX
prefix . Use this in emergency situations . Normally you would probably
want to use the ` ` cleanup ` ` method instead ."""
|
all_keys = self . _find_keys_raw ( settings . THUMBNAIL_KEY_PREFIX )
if all_keys :
self . _delete_raw ( * all_keys )
|
def total_ordering ( cls ) : # pragma : no cover
"""Class decorator that fills in missing ordering methods"""
|
convert = { '__lt__' : [ ( '__gt__' , lambda self , other : not ( self < other or self == other ) ) , ( '__le__' , lambda self , other : self < other or self == other ) , ( '__ge__' , lambda self , other : not self < other ) ] , '__le__' : [ ( '__ge__' , lambda self , other : not self <= other or self == other ) , ( '__lt__' , lambda self , other : self <= other and not self == other ) , ( '__gt__' , lambda self , other : not self <= other ) ] , '__gt__' : [ ( '__lt__' , lambda self , other : not ( self > other or self == other ) ) , ( '__ge__' , lambda self , other : self > other or self == other ) , ( '__le__' , lambda self , other : not self > other ) ] , '__ge__' : [ ( '__le__' , lambda self , other : ( not self >= other ) or self == other ) , ( '__gt__' , lambda self , other : self >= other and not self == other ) , ( '__lt__' , lambda self , other : not self >= other ) ] }
roots = set ( dir ( cls ) ) & set ( convert )
if not roots :
raise ValueError ( 'must define at least one ordering operation: < > <= >=' )
root = max ( roots )
# prefer _ _ lt _ _ to _ _ le _ _ to _ _ gt _ _ to _ _ ge _ _
for opname , opfunc in convert [ root ] :
if opname not in roots :
opfunc . __name__ = opname
opfunc . __doc__ = getattr ( int , opname ) . __doc__
setattr ( cls , opname , opfunc )
return cls
|
def save_current_nb_as_html ( info = False ) :
"""Save the current notebook as html file in the same directory"""
|
assert in_ipynb ( )
full_path = get_notebook_name ( )
path , filename = os . path . split ( full_path )
wd_save = os . getcwd ( )
os . chdir ( path )
cmd = 'jupyter nbconvert --to html "{}"' . format ( filename )
os . system ( cmd )
os . chdir ( wd_save )
if info :
print ( "target dir: " , path )
print ( "cmd: " , cmd )
print ( "working dir: " , wd_save )
|
def get_root_folder ( ) :
"""returns the home folder and program root depending on OS"""
|
locations = { 'linux' : { 'hme' : '/home/duncan/' , 'core_folder' : '/home/duncan/dev/src/python/AIKIF' } , 'win32' : { 'hme' : 'T:\\user\\' , 'core_folder' : 'T:\\user\\dev\\src\\python\\AIKIF' } , 'cygwin' : { 'hme' : os . getcwd ( ) + os . sep , 'core_folder' : os . getcwd ( ) } , 'darwin' : { 'hme' : os . getcwd ( ) + os . sep , 'core_folder' : os . getcwd ( ) } }
hme = locations [ sys . platform ] [ 'hme' ]
core_folder = locations [ sys . platform ] [ 'core_folder' ]
if not os . path . exists ( core_folder ) :
hme = os . getcwd ( )
core_folder = os . getcwd ( )
print ( 'config.py : running on CI build (or you need to modify the paths in config.py)' )
return hme , core_folder
|
def _default_ising_beta_range ( h , J ) :
"""Determine the starting and ending beta from h J
Args :
h ( dict )
J ( dict )
Assume each variable in J is also in h .
We use the minimum bias to give a lower bound on the minimum energy gap , such at the
final sweeps we are highly likely to settle into the current valley ."""
|
# Get nonzero , absolute biases
abs_h = [ abs ( hh ) for hh in h . values ( ) if hh != 0 ]
abs_J = [ abs ( jj ) for jj in J . values ( ) if jj != 0 ]
abs_biases = abs_h + abs_J
if not abs_biases :
return [ 0.1 , 1.0 ]
# Rough approximation of min change in energy when flipping a qubit
min_delta_energy = min ( abs_biases )
# Combine absolute biases by variable
abs_bias_dict = { k : abs ( v ) for k , v in h . items ( ) }
for ( k1 , k2 ) , v in J . items ( ) :
abs_bias_dict [ k1 ] += abs ( v )
abs_bias_dict [ k2 ] += abs ( v )
# Find max change in energy when flipping a single qubit
max_delta_energy = max ( abs_bias_dict . values ( ) )
# Selecting betas based on probability of flipping a qubit
# Hot temp : We want to scale hot _ beta so that for the most unlikely qubit flip , we get at least
# 50 % chance of flipping . ( This means all other qubits will have > 50 % chance of flipping
# initially . ) Most unlikely flip is when we go from a very low energy state to a high energy
# state , thus we calculate hot _ beta based on max _ delta _ energy .
# 0.50 = exp ( - hot _ beta * max _ delta _ energy )
# Cold temp : Towards the end of the annealing schedule , we want to minimize the chance of
# flipping . Don ' t want to be stuck between small energy tweaks . Hence , set cold _ beta so that
# at minimum energy change , the chance of flipping is set to 1 % .
# 0.01 = exp ( - cold _ beta * min _ delta _ energy )
hot_beta = np . log ( 2 ) / max_delta_energy
cold_beta = np . log ( 100 ) / min_delta_energy
return [ hot_beta , cold_beta ]
|
def describe ( self , language = DEFAULT_LANGUAGE , min_score : int = 75 ) -> dict :
"""Return a dictionary that describes a given language tag in a specified
natural language .
See ` language _ name ` and related methods for more specific versions of this .
The desired ` language ` will in fact be matched against the available
options using the matching technique that this module provides . We can
illustrate many aspects of this by asking for a description of Shavian
script ( a script devised by author George Bernard Shaw ) , and where you
might find it , in various languages .
> > > from pprint import pprint
> > > shaw = Language . make ( script = ' Shaw ' ) . maximize ( )
> > > pprint ( shaw . describe ( ' en ' ) )
{ ' language ' : ' English ' , ' region ' : ' United Kingdom ' , ' script ' : ' Shavian ' }
> > > pprint ( shaw . describe ( ' fr ' ) )
{ ' language ' : ' anglais ' , ' region ' : ' Royaume - Uni ' , ' script ' : ' shavien ' }
> > > pprint ( shaw . describe ( ' es ' ) )
{ ' language ' : ' inglés ' , ' region ' : ' Reino Unido ' , ' script ' : ' shaviano ' }
> > > pprint ( shaw . describe ( ' pt ' ) )
{ ' language ' : ' inglês ' , ' region ' : ' Reino Unido ' , ' script ' : ' shaviano ' }
> > > pprint ( shaw . describe ( ' uk ' ) )
{ ' language ' : ' англійська ' , ' region ' : ' Велика Британія ' , ' script ' : ' шоу ' }
> > > pprint ( shaw . describe ( ' arb ' ) )
{ ' language ' : ' الإنجليزية ' , ' region ' : ' المملكة المتحدة ' , ' script ' : ' الشواني ' }
> > > pprint ( shaw . describe ( ' th ' ) )
{ ' language ' : ' อังกฤษ ' , ' region ' : ' สหราชอาณาจักร ' , ' script ' : ' ซอเวียน ' }
> > > pprint ( shaw . describe ( ' zh - Hans ' ) )
{ ' language ' : ' 英语 ' , ' region ' : ' 英国 ' , ' script ' : ' 萧伯纳式文 ' }
> > > pprint ( shaw . describe ( ' zh - Hant ' ) )
{ ' language ' : ' 英文 ' , ' region ' : ' 英國 ' , ' script ' : ' 簫柏納字符 ' }
> > > pprint ( shaw . describe ( ' ja ' ) )
{ ' language ' : ' 英語 ' , ' region ' : ' イギリス ' , ' script ' : ' ショー文字 ' }
When we don ' t have a localization for the language , we fall back on
' und ' , which just shows the language codes .
> > > pprint ( shaw . describe ( ' lol ' ) )
{ ' language ' : ' en ' , ' region ' : ' GB ' , ' script ' : ' Shaw ' }
Wait , is that a real language ?
> > > pprint ( Language . get ( ' lol ' ) . maximize ( ) . describe ( ) )
{ ' language ' : ' Mongo ' , ' region ' : ' Congo - Kinshasa ' , ' script ' : ' Latin ' }"""
|
names = { }
if self . language :
names [ 'language' ] = self . language_name ( language , min_score )
if self . script :
names [ 'script' ] = self . script_name ( language , min_score )
if self . region :
names [ 'region' ] = self . region_name ( language , min_score )
if self . variants :
names [ 'variants' ] = self . variant_names ( language , min_score )
return names
|
def get_methods_by_name ( self , name ) :
"""generator of methods matching name . This will include any bridges
present ."""
|
return ( m for m in self . methods if m . get_name ( ) == name )
|
def list_protocols ( profile , ** libcloud_kwargs ) :
'''Return a list of supported protocols .
: param profile : The profile key
: type profile : ` ` str ` `
: param libcloud _ kwargs : Extra arguments for the driver ' s list _ protocols method
: type libcloud _ kwargs : ` ` dict ` `
: return : a list of supported protocols
: rtype : ` ` list ` ` of ` ` str ` `
CLI Example :
. . code - block : : bash
salt myminion libcloud _ storage . list _ protocols profile1'''
|
conn = _get_driver ( profile = profile )
libcloud_kwargs = salt . utils . args . clean_kwargs ( ** libcloud_kwargs )
return conn . list_protocols ( ** libcloud_kwargs )
|
def firmware_drivers ( self ) :
"""Gets the FirmwareDrivers API client .
Returns :
FirmwareDrivers :"""
|
if not self . __firmware_drivers :
self . __firmware_drivers = FirmwareDrivers ( self . __connection )
return self . __firmware_drivers
|
def classorder ( self , classes ) :
"""Return a list of class IDs in order for presentational purposes : order is determined first and foremost by explicit ordering , else alphabetically by label or as a last resort by class ID"""
|
return [ classid for classid , classitem in sorted ( ( ( classid , classitem ) for classid , classitem in classes . items ( ) if 'seqnr' in classitem ) , key = lambda pair : pair [ 1 ] [ 'seqnr' ] ) ] + [ classid for classid , classitem in sorted ( ( ( classid , classitem ) for classid , classitem in classes . items ( ) if 'seqnr' not in classitem ) , key = lambda pair : pair [ 1 ] [ 'label' ] if 'label' in pair [ 1 ] else pair [ 1 ] [ 'id' ] ) ]
|
def problem_serializing ( value , e = None ) :
"""THROW ERROR ABOUT SERIALIZING"""
|
from mo_logs import Log
try :
typename = type ( value ) . __name__
except Exception :
typename = "<error getting name>"
try :
rep = text_type ( repr ( value ) )
except Exception as _ :
rep = None
if rep == None :
Log . error ( "Problem turning value of type {{type}} to json" , type = typename , cause = e )
else :
Log . error ( "Problem turning value ({{value}}) of type {{type}} to json" , value = rep , type = typename , cause = e )
|
def logit ( self , msg , pid , user , cname , priority = None ) :
"""Function for formatting content and logging to syslog"""
|
if self . stream :
print ( msg , file = self . stream )
elif priority == logging . WARNING :
self . logger . warning ( "{0}[pid:{1}] user:{2}: WARNING - {3}" . format ( cname , pid , user , msg ) )
elif priority == logging . ERROR :
self . logger . error ( "{0}[pid:{1}] user:{2}: ERROR - {3}" . format ( cname , pid , user , msg ) )
else :
self . logger . info ( "{0}[pid:{1}] user:{2}: INFO - {3}" . format ( cname , pid , user , msg ) )
|
def ReplaceStoredProcedure ( self , sproc_link , sproc , options = None ) :
"""Replaces a stored procedure and returns it .
: param str sproc _ link :
The link to the stored procedure .
: param dict sproc :
: param dict options :
The request options for the request .
: return :
The replaced Stored Procedure .
: rtype :
dict"""
|
if options is None :
options = { }
CosmosClient . __ValidateResource ( sproc )
sproc = sproc . copy ( )
if sproc . get ( 'serverScript' ) :
sproc [ 'body' ] = str ( sproc [ 'serverScript' ] )
elif sproc . get ( 'body' ) :
sproc [ 'body' ] = str ( sproc [ 'body' ] )
path = base . GetPathFromLink ( sproc_link )
sproc_id = base . GetResourceIdOrFullNameFromLink ( sproc_link )
return self . Replace ( sproc , path , 'sprocs' , sproc_id , None , options )
|
def get_contributor_sort_value ( self , obj ) :
"""Generate display name for contributor ."""
|
user = obj . contributor
if user . first_name or user . last_name :
contributor = user . get_full_name ( )
else :
contributor = user . username
return contributor . strip ( ) . lower ( )
|
def addNotice ( self , data ) :
"""Add custom notice to front - end for this NodeServers
: param data : String of characters to add as a notification in the front - end ."""
|
LOGGER . info ( 'Sending addnotice to Polyglot: {}' . format ( data ) )
message = { 'addnotice' : data }
self . send ( message )
|
def search ( pattern , sentence , * args , ** kwargs ) :
"""Returns a list of all matches found in the given sentence ."""
|
return compile ( pattern , * args , ** kwargs ) . search ( sentence )
|
def init_fftw_plan ( self , planning_effort = 'measure' , ** kwargs ) :
"""Initialize the FFTW plan for this transform for later use .
If the implementation of this operator is not ' pyfftw ' , this
method should not be called .
Parameters
planning _ effort : str , optional
Flag for the amount of effort put into finding an optimal
FFTW plan . See the ` FFTW doc on planner flags
< http : / / www . fftw . org / fftw3 _ doc / Planner - Flags . html > ` _ .
Options : { ' estimate ' , ' measure ' , ' patient ' , ' exhaustive ' }
planning _ timelimit : float or ` ` None ` ` , optional
Limit planning time to roughly this many seconds .
Default : ` ` None ` ` ( no limit )
threads : int , optional
Number of threads to use . Default : 1
Raises
ValueError
If ` impl ` is not ' pyfftw '
Notes
To save memory , clear the plan when the transform is no longer
used ( the plan stores 2 arrays ) .
See Also
clear _ fftw _ plan"""
|
if self . impl != 'pyfftw' :
raise ValueError ( 'cannot create fftw plan without fftw backend' )
# Using available temporaries if possible
inverse = isinstance ( self , FourierTransformInverse )
if inverse :
rspace = self . range
fspace = self . domain
else :
rspace = self . domain
fspace = self . range
if rspace . field == ComplexNumbers ( ) : # C2C : Use either one of ' r ' or ' f ' temporary if initialized
if self . _tmp_r is not None :
arr_in = arr_out = self . _tmp_r
elif self . _tmp_f is not None :
arr_in = arr_out = self . _tmp_f
else :
arr_in = arr_out = rspace . element ( ) . asarray ( )
elif self . halfcomplex : # R2HC / HC2R : Use ' r ' and ' f ' temporary distinctly if initialized
if self . _tmp_r is not None :
arr_r = self . _tmp_r
else :
arr_r = rspace . element ( ) . asarray ( )
if self . _tmp_f is not None :
arr_f = self . _tmp_f
else :
arr_f = fspace . element ( ) . asarray ( )
if inverse :
arr_in , arr_out = arr_f , arr_r
else :
arr_in , arr_out = arr_r , arr_f
else : # R2C / C2R : Use ' f ' temporary for both sides if initialized
if self . _tmp_f is not None :
arr_in = arr_out = self . _tmp_f
else :
arr_in = arr_out = fspace . element ( ) . asarray ( )
kwargs . pop ( 'planning_timelimit' , None )
direction = 'forward' if self . sign == '-' else 'backward'
self . _fftw_plan = pyfftw_call ( arr_in , arr_out , direction = direction , halfcomplex = self . halfcomplex , axes = self . axes , planning_effort = planning_effort , ** kwargs )
|
def epoch_cb ( self ) :
"""Callback function after each epoch . Now it records each epoch time
and append it to epoch dataframe ."""
|
metrics = { }
metrics [ 'elapsed' ] = self . elapsed ( )
now = datetime . datetime . now ( )
metrics [ 'epoch_time' ] = now - self . last_epoch_time
self . append_metrics ( metrics , 'epoch' )
self . last_epoch_time = now
|
def sam_readline ( sock , partial = None ) :
"""read a line from a sam control socket"""
|
response = b''
exception = None
while True :
try :
c = sock . recv ( 1 )
if not c :
raise EOFError ( 'SAM connection died. Partial response %r %r' % ( partial , response ) )
elif c == b'\n' :
break
else :
response += c
except ( BlockingIOError , pysocket . timeout ) as e :
if partial is None :
raise e
else :
exception = e
break
if partial is None : # print ( ' < - - ' , response )
return response . decode ( 'ascii' )
else : # print ( ' < - - ' , repr ( partial ) , ' + ' , response , exception )
return ( partial + response . decode ( 'ascii' ) , exception )
|
def hacking_no_removed_module ( logical_line , noqa ) :
r"""Check for removed modules in Python 3.
Examples :
Okay : from os import path
Okay : from os import path as p
Okay : from os import ( path as p )
Okay : import os . path
H237 : import thread
Okay : import thread # noqa
H237 : import commands
H237 : import md5 as std _ md5"""
|
if noqa :
return
line = core . import_normalize ( logical_line . strip ( ) )
if line and line . split ( ) [ 0 ] == 'import' :
module_name = line . split ( ) [ 1 ] . split ( '.' ) [ 0 ]
if module_name in removed_modules :
yield 0 , ( "H237: module %s is " "removed in Python 3" % module_name )
|
def must_open ( filename , mode = "r" , checkexists = False , skipcheck = False , oappend = False ) :
"""Accepts filename and returns filehandle .
Checks on multiple files , stdin / stdout / stderr , . gz or . bz2 file ."""
|
if isinstance ( filename , list ) :
assert "r" in mode
if filename [ 0 ] . endswith ( ( ".gz" , ".bz2" ) ) :
filename = " " . join ( filename )
# allow opening multiple gz / bz2 files
else :
import fileinput
return fileinput . input ( filename )
if filename . startswith ( "s3://" ) :
from jcvi . utils . aws import pull_from_s3
filename = pull_from_s3 ( filename )
if filename in ( "-" , "stdin" ) :
assert "r" in mode
fp = sys . stdin
elif filename == "stdout" :
assert "w" in mode
fp = sys . stdout
elif filename == "stderr" :
assert "w" in mode
fp = sys . stderr
elif filename == "tmp" and mode == "w" :
from tempfile import NamedTemporaryFile
fp = NamedTemporaryFile ( delete = False )
elif filename . endswith ( ".gz" ) :
if 'r' in mode :
cmd = "gunzip -c {0}" . format ( filename )
fp = popen ( cmd , debug = False )
elif 'w' in mode :
import gzip
fp = gzip . open ( filename , mode )
elif filename . endswith ( ".bz2" ) :
if 'r' in mode :
cmd = "bzcat {0}" . format ( filename )
fp = popen ( cmd , debug = False )
elif 'w' in mode :
import bz2
fp = bz2 . BZ2File ( filename , mode )
else :
if checkexists :
assert mode == "w"
overwrite = ( not op . exists ( filename ) ) if skipcheck else check_exists ( filename , oappend )
if overwrite :
if oappend :
fp = open ( filename , "a" )
else :
fp = open ( filename , "w" )
else :
logging . debug ( "File `{0}` already exists. Skipped." . format ( filename ) )
return None
else :
fp = open ( filename , mode )
return fp
|
def update_values ( self ) :
"""Update form values when detection method is selected ."""
|
self . method = self . idx_method . currentText ( )
sw_det = DetectSlowWave ( method = self . method )
self . index [ 'f1' ] . set_value ( sw_det . det_filt [ 'freq' ] [ 0 ] )
self . index [ 'f2' ] . set_value ( sw_det . det_filt [ 'freq' ] [ 1 ] )
self . index [ 'min_trough_dur' ] . set_value ( sw_det . trough_duration [ 0 ] )
self . index [ 'max_trough_dur' ] . set_value ( sw_det . trough_duration [ 1 ] )
self . index [ 'max_trough_amp' ] . set_value ( sw_det . max_trough_amp )
self . index [ 'min_ptp' ] . set_value ( sw_det . min_ptp )
self . index [ 'min_dur' ] . set_value ( sw_det . min_dur )
self . index [ 'max_dur' ] . set_value ( sw_det . max_dur )
|
def value ( self ) :
"""returns the class as a dictionary"""
|
val = { }
for k in self . __allowed_keys :
v = getattr ( self , "_" + k )
if v is not None :
val [ k ] = v
return val
|
def obo ( self ) :
"""str : the ontology serialized in obo format ."""
|
meta = self . _obo_meta ( )
meta = [ meta ] if meta else [ ]
newline = "\n\n" if six . PY3 else "\n\n" . encode ( 'utf-8' )
try : # if ' namespace ' in self . meta :
return newline . join ( meta + [ r . obo for r in self . typedefs ] + [ t . obo for t in self if t . id . startswith ( self . meta [ 'namespace' ] [ 0 ] ) ] )
except KeyError :
return newline . join ( meta + [ r . obo for r in self . typedefs ] + [ t . obo for t in self ] )
|
def stop ( self ) -> None :
"""Stops the running simulation once the current event is done executing ."""
|
if self . is_running :
if _logger is not None :
self . _log ( INFO , "stop" , __now = self . now ( ) )
self . _is_running = False
|
def render_template ( self , plain , rich = None , ** context ) :
'''Render the body of the message from a template . The plain
body will be rendered from a template named ` ` plain ` ` or
` ` plain + ' . txt ' ` ` ( in that order of preference ) . The rich
body will be rendered from ` ` rich ` ` if given , or else from
` ` plain + ' . html ' ` ` . If neither exists , then the message will
have no rich body .'''
|
self . plain = render_template ( [ plain , plain + '.txt' ] , ** context )
if rich is not None :
self . rich = render_template ( rich , ** context )
else :
try :
self . rich = render_template ( plain + '.html' , ** context )
except TemplateNotFound :
pass
|
def result ( self , r = None , ** kwargs ) :
'''Validates a result , stores it in self . results and prints it .
Accepts the same kwargs as the binwalk . core . module . Result class .
@ r - An existing instance of binwalk . core . module . Result .
Returns an instance of binwalk . core . module . Result .'''
|
if r is None :
r = Result ( ** kwargs )
# Add the name of the current module to the result
r . module = self . __class__ . __name__
# Any module that is reporting results , valid or not , should be marked
# as enabled
if not self . enabled :
self . enabled = True
self . validate ( r )
self . _plugins_result ( r )
# Update the progress status automatically if it is not being done
# manually by the module
if r . offset and r . file and self . AUTO_UPDATE_STATUS :
self . status . total = r . file . length
self . status . completed = r . offset
self . status . fp = r . file
for dependency in self . dependencies :
try :
getattr ( self , dependency . attribute ) . callback ( r )
except AttributeError :
continue
if r . valid :
self . results . append ( r )
if r . display :
display_args = self . _build_display_args ( r )
if display_args :
self . config . display . format_strings ( self . HEADER_FORMAT , self . RESULT_FORMAT )
self . config . display . result ( * display_args )
return r
|
def _wait_for_reader ( self ) :
"""Checks for backpressure by the downstream reader ."""
|
if self . max_size <= 0 : # Unlimited queue
return
if self . write_item_offset - self . cached_remote_offset <= self . max_size :
return
# Hasn ' t reached max size
remote_offset = internal_kv . _internal_kv_get ( self . read_ack_key )
if remote_offset is None : # logger . debug ( " [ writer ] Waiting for reader to start . . . " )
while remote_offset is None :
time . sleep ( 0.01 )
remote_offset = internal_kv . _internal_kv_get ( self . read_ack_key )
remote_offset = int ( remote_offset )
if self . write_item_offset - remote_offset > self . max_size :
logger . debug ( "[writer] Waiting for reader to catch up {} to {} - {}" . format ( remote_offset , self . write_item_offset , self . max_size ) )
while self . write_item_offset - remote_offset > self . max_size :
time . sleep ( 0.01 )
remote_offset = int ( internal_kv . _internal_kv_get ( self . read_ack_key ) )
self . cached_remote_offset = remote_offset
|
def hexdiff ( x , y ) :
"""Show differences between 2 binary strings"""
|
x = bytes_encode ( x ) [ : : - 1 ]
y = bytes_encode ( y ) [ : : - 1 ]
SUBST = 1
INSERT = 1
d = { ( - 1 , - 1 ) : ( 0 , ( - 1 , - 1 ) ) }
for j in range ( len ( y ) ) :
d [ - 1 , j ] = d [ - 1 , j - 1 ] [ 0 ] + INSERT , ( - 1 , j - 1 )
for i in range ( len ( x ) ) :
d [ i , - 1 ] = d [ i - 1 , - 1 ] [ 0 ] + INSERT , ( i - 1 , - 1 )
for j in range ( len ( y ) ) :
for i in range ( len ( x ) ) :
d [ i , j ] = min ( ( d [ i - 1 , j - 1 ] [ 0 ] + SUBST * ( x [ i ] != y [ j ] ) , ( i - 1 , j - 1 ) ) , # noqa : E501
( d [ i - 1 , j ] [ 0 ] + INSERT , ( i - 1 , j ) ) , ( d [ i , j - 1 ] [ 0 ] + INSERT , ( i , j - 1 ) ) )
backtrackx = [ ]
backtracky = [ ]
i = len ( x ) - 1
j = len ( y ) - 1
while not ( i == j == - 1 ) :
i2 , j2 = d [ i , j ] [ 1 ]
backtrackx . append ( x [ i2 + 1 : i + 1 ] )
backtracky . append ( y [ j2 + 1 : j + 1 ] )
i , j = i2 , j2
x = y = i = 0
colorize = { 0 : lambda x : x , - 1 : conf . color_theme . left , 1 : conf . color_theme . right }
dox = 1
doy = 0
btx_len = len ( backtrackx )
while i < btx_len :
linex = backtrackx [ i : i + 16 ]
liney = backtracky [ i : i + 16 ]
xx = sum ( len ( k ) for k in linex )
yy = sum ( len ( k ) for k in liney )
if dox and not xx :
dox = 0
doy = 1
if dox and linex == liney :
doy = 1
if dox :
xd = y
j = 0
while not linex [ j ] :
j += 1
xd -= 1
print ( colorize [ doy - dox ] ( "%04x" % xd ) , end = ' ' )
x += xx
line = linex
else :
print ( " " , end = ' ' )
if doy :
yd = y
j = 0
while not liney [ j ] :
j += 1
yd -= 1
print ( colorize [ doy - dox ] ( "%04x" % yd ) , end = ' ' )
y += yy
line = liney
else :
print ( " " , end = ' ' )
print ( " " , end = ' ' )
cl = ""
for j in range ( 16 ) :
if i + j < btx_len :
if line [ j ] :
col = colorize [ ( linex [ j ] != liney [ j ] ) * ( doy - dox ) ]
print ( col ( "%02X" % orb ( line [ j ] ) ) , end = ' ' )
if linex [ j ] == liney [ j ] :
cl += sane_color ( line [ j ] )
else :
cl += col ( sane ( line [ j ] ) )
else :
print ( " " , end = ' ' )
cl += " "
else :
print ( " " , end = ' ' )
if j == 7 :
print ( "" , end = ' ' )
print ( " " , cl )
if doy or not yy :
doy = 0
dox = 1
i += 16
else :
if yy :
dox = 0
doy = 1
else :
i += 16
|
def setColor ( self , color ) :
"""Convenience method to set the border , fill and highlight colors based
on the inputed color .
: param color | < QColor >"""
|
# sets the border color as the full value
self . setBorderColor ( color )
# set the highlight color as the color with a 140 % alpha
clr = QColor ( color )
clr . setAlpha ( 150 )
self . setHighlightColor ( clr )
# set the fill color as the color with a 50 % alpha
clr = QColor ( color )
clr . setAlpha ( 80 )
self . setFillColor ( clr )
|
def _internal_write ( out_stream , arr ) :
"""Writes numpy . ndarray arr to a file - like object ( with write ( ) method ) in
IDX format ."""
|
if arr . size == 0 :
raise FormatError ( 'Cannot encode empty array.' )
try :
type_byte , struct_lib_type = _DATA_TYPES_NUMPY [ str ( arr . dtype ) ]
except KeyError :
raise FormatError ( 'numpy ndarray type not supported by IDX format.' )
if arr . ndim > _MAX_IDX_DIMENSIONS :
raise FormatError ( 'IDX format cannot encode array with dimensions > 255' )
if max ( arr . shape ) > _MAX_AXIS_LENGTH :
raise FormatError ( 'IDX format cannot encode array with more than ' + str ( _MAX_AXIS_LENGTH ) + ' elements along any axis' )
# Write magic number
out_stream . write ( struct . pack ( 'BBBB' , 0 , 0 , type_byte , arr . ndim ) )
# Write array dimensions
out_stream . write ( struct . pack ( '>' + 'I' * arr . ndim , * arr . shape ) )
# Horrible hack to deal with horrible bug when using struct . pack to encode
# unsigned ints in 2.7 and lower , see http : / / bugs . python . org / issue2263
if sys . version_info < ( 2 , 7 ) and str ( arr . dtype ) == 'uint8' :
arr_as_list = [ int ( i ) for i in arr . reshape ( - 1 ) ]
out_stream . write ( struct . pack ( '>' + struct_lib_type * arr . size , * arr_as_list ) )
else : # Write array contents - note that the limit to number of arguments
# doesn ' t apply to unrolled arguments
out_stream . write ( struct . pack ( '>' + struct_lib_type * arr . size , * arr . reshape ( - 1 ) ) )
|
def get_object ( self , identifier , mask = None ) :
"""Get a Reserved Capacity Group
: param int identifier : Id of the SoftLayer _ Virtual _ ReservedCapacityGroup
: param string mask : override default object Mask"""
|
if mask is None :
mask = "mask[instances[billingItem[item[keyName],category], guest], backendRouter[datacenter]]"
result = self . client . call ( self . rcg_service , 'getObject' , id = identifier , mask = mask )
return result
|
def fetch_file_handler ( unused_build_context , target , fetch , package_dir , tar ) :
"""Handle remote downloadable file URI .
Download the file and cache it under the private builer workspace
( unless already downloaded ) , and add it to the package tar .
TODO ( itamar ) : Support re - downloading if remote changed compared to local ."""
|
dl_dir = join ( package_dir , fetch . name ) if fetch . name else package_dir
fetch_url ( fetch . uri , join ( dl_dir , basename ( urlparse ( fetch . uri ) . path ) ) , dl_dir )
tar . add ( package_dir , arcname = split_name ( target . name ) )
|
def merge_upwards_if_smaller_than ( self , small_size , a_or_u ) :
"""After prune _ if _ smaller _ than is run , we may still have excess
nodes .
For example , with a small _ size of 609710690:
28815419 / data / *
32 / data / srv / *
925746 / data / srv / docker . bak / *
12 / data / srv / docker . bak / shared / *
682860348 / data / srv / docker . bak / shared / standalone / *
This is reduced to :
31147487 / *
682860355 / data / srv / docker . bak / shared / standalone / *
Run this only when done with the scanning ."""
|
# Assert that we ' re not messing things up .
prev_app_size = self . app_size ( )
prev_use_size = self . use_size ( )
small_nodes = self . _find_small_nodes ( small_size , ( ) , a_or_u )
for node , parents in small_nodes : # Check immediate grandparent for isdir = None and if it
# exists , move this there . The isdir = None node is always
# last .
if len ( parents ) >= 2 :
tail = parents [ - 2 ] . _nodes [ - 1 ]
if tail . _isdir is None :
assert tail . _app_size is not None , tail
tail . _add_size ( node . app_size ( ) , node . use_size ( ) )
parents [ - 1 ] . _nodes . remove ( node )
assert len ( parents [ - 1 ] . _nodes )
# The actual assertion .
assert prev_app_size == self . app_size ( ) , ( prev_app_size , self . app_size ( ) )
assert prev_use_size == self . use_size ( ) , ( prev_use_size , self . use_size ( ) )
|
def add_cron ( self , name , command , minute = "*" , hour = "*" , mday = "*" , month = "*" , wday = "*" , who = "root" , env = None ) :
"""Write a file to / etc / cron . d to schedule a command
env is a dict containing environment variables you want to set in the file
name will be used as the name of the file"""
|
if minute == 'random' :
minute = str ( random . randrange ( 60 ) )
if hour == 'random' :
hour = str ( random . randrange ( 24 ) )
fp = open ( '/etc/cron.d/%s' % name , "w" )
if env :
for key , value in env . items ( ) :
fp . write ( '%s=%s\n' % ( key , value ) )
fp . write ( '%s %s %s %s %s %s %s\n' % ( minute , hour , mday , month , wday , who , command ) )
fp . close ( )
|
def as_set ( obj ) :
"""Convert obj into a set , returns None if obj is None .
> > > assert as _ set ( None ) is None and as _ set ( 1 ) = = set ( [ 1 ] ) and as _ set ( range ( 1,3 ) ) = = set ( [ 1 , 2 ] )"""
|
if obj is None or isinstance ( obj , collections . Set ) :
return obj
if not isinstance ( obj , collections . Iterable ) :
return set ( ( obj , ) )
else :
return set ( obj )
|
def fire ( self , * args , ** kwargs ) :
"""Fire event and call all handler functions
You can call EventHandler object itself like e ( * args , * * kwargs ) instead of
e . fire ( * args , * * kwargs ) ."""
|
for func in self . _getfunctionlist ( ) :
if type ( func ) == EventHandler :
func . fire ( * args , ** kwargs )
else :
func ( self . obj , * args , ** kwargs )
|
def find_by_organization ( self , organization , params = { } , ** options ) :
"""Returns the compact records for all teams in the organization visible to
the authorized user .
Parameters
organization : { Id } Globally unique identifier for the workspace or organization .
[ params ] : { Object } Parameters for the request"""
|
path = "/organizations/%s/teams" % ( organization )
return self . client . get_collection ( path , params , ** options )
|
def get_by_username ( cls , username ) :
"""Return a User by email address"""
|
return cls . query ( ) . filter ( cls . username == username ) . first ( )
|
def _split_sequences_multitraj ( dtrajs , lag ) :
"""splits the discrete trajectories into conditional sequences by starting state
Parameters
dtrajs : list of int - iterables
discrete trajectories
nstates : int
total number of discrete states
lag : int
lag time"""
|
n = number_of_states ( dtrajs )
res = [ ]
for i in range ( n ) :
res . append ( [ ] )
for dtraj in dtrajs :
states , seqs = _split_sequences_singletraj ( dtraj , n , lag )
for i in range ( len ( states ) ) :
res [ states [ i ] ] . append ( seqs [ i ] )
return res
|
def load_config ( self , config ) :
"""Load the outputs section of the configuration file ."""
|
# Limit the number of processes to display in the WebUI
if config is not None and config . has_section ( 'outputs' ) :
logger . debug ( 'Read number of processes to display in the WebUI' )
n = config . get_value ( 'outputs' , 'max_processes_display' , default = None )
logger . debug ( 'Number of processes to display in the WebUI: {}' . format ( n ) )
|
def trimLeft ( self , amount ) :
"""Trim this fastqSequence in - place by removing < amount > nucleotides from
the 5 ' end ( left end ) .
: param amount : the number of nucleotides to trim from the left - side of
this sequence ."""
|
if amount == 0 :
return
self . sequenceData = self . sequenceData [ amount : ]
self . sequenceQual = self . sequenceQual [ amount : ]
|
def function_call ( self , function , arguments ) :
"""Generates code for a function call
function - function index in symbol table
arguments - list of arguments ( indexes in symbol table )"""
|
# push each argument to stack
for arg in arguments :
self . push ( self . symbol ( arg ) )
self . free_if_register ( arg )
self . newline_text ( "CALL\t" + self . symtab . get_name ( function ) , True )
args = self . symtab . get_attribute ( function )
# generates stack cleanup if function has arguments
if args > 0 :
args_space = self . symtab . insert_constant ( "{0}" . format ( args * 4 ) , SharedData . TYPES . UNSIGNED )
self . arithmetic ( "+" , "%15" , args_space , "%15" )
|
def divide ( n , iterable ) :
"""split an iterable into n groups , per https : / / more - itertools . readthedocs . io / en / latest / api . html # grouping
: param int n : Number of unique groups
: param iter iterable : An iterable to split up
: return : a list of new iterables derived from the original iterable
: rtype : list"""
|
seq = tuple ( iterable )
q , r = divmod ( len ( seq ) , n )
ret = [ ]
for i in range ( n ) :
start = ( i * q ) + ( i if i < r else r )
stop = ( ( i + 1 ) * q ) + ( i + 1 if i + 1 < r else r )
ret . append ( iter ( seq [ start : stop ] ) )
return ret
|
def from_content ( cls , content ) :
"""Parses a Tibia . com response into a House object .
Parameters
content : : class : ` str `
HTML content of the page .
Returns
: class : ` House `
The house contained in the page , or None if the house doesn ' t exist .
Raises
InvalidContent
If the content is not the house section on Tibia . com"""
|
parsed_content = parse_tibiacom_content ( content )
image_column , desc_column , * _ = parsed_content . find_all ( 'td' )
if "Error" in image_column . text :
return None
image = image_column . find ( 'img' )
for br in desc_column . find_all ( "br" ) :
br . replace_with ( "\n" )
description = desc_column . text . replace ( "\u00a0" , " " ) . replace ( "\n\n" , "\n" )
lines = description . splitlines ( )
try :
name , beds , info , state , * _ = lines
except ValueError :
raise InvalidContent ( "content does is not from the house section of Tibia.com" )
house = cls ( name . strip ( ) )
house . image_url = image [ "src" ]
house . id = int ( id_regex . search ( house . image_url ) . group ( 1 ) )
m = bed_regex . search ( beds )
if m :
house . type = HouseType . GUILDHALL if m . group ( "type" ) in [ "guildhall" , "clanhall" ] else HouseType . HOUSE
beds_word = m . group ( "beds" )
if beds_word == "no" :
house . beds = 0
else :
house . beds = parse_number_words ( beds_word )
m = info_regex . search ( info )
if m :
house . world = m . group ( "world" )
house . rent = int ( m . group ( "rent" ) )
house . size = int ( m . group ( "size" ) )
house . _parse_status ( state )
return house
|
def reset ( self ) :
'''Reset the state of the container and its internal fields'''
|
super ( Container , self ) . reset ( )
for field in self . _fields :
field . reset ( )
self . _field_idx = 0
|
def __create_remote_webdriver_from_config ( self , testname = None ) :
'''Reads the config value for browser type .'''
|
desired_capabilities = self . _generate_desired_capabilities ( testname )
remote_url = self . _config_reader . get ( WebDriverFactory . REMOTE_URL_CONFIG )
# Instantiate remote webdriver .
driver = webdriver . Remote ( desired_capabilities = desired_capabilities , command_executor = remote_url )
# Log IP Address of node if configured , so it can be used to
# troubleshoot issues if they occur .
log_driver_props = self . _config_reader . get ( WebDriverFactory . LOG_REMOTEDRIVER_PROPS , default_value = False ) in [ True , "true" , "TRUE" , "True" ]
if "wd/hub" in remote_url and log_driver_props :
try :
grid_addr = remote_url [ : remote_url . index ( "wd/hub" ) ]
info_request_response = urllib2 . urlopen ( grid_addr + "grid/api/testsession?session=" + driver . session_id , "" , 5000 )
node_info = info_request_response . read ( )
_wtflog . info ( u ( "RemoteWebdriver using node: " ) + u ( node_info ) . strip ( ) )
except : # Unable to get IP Address of remote webdriver .
# This happens with many 3rd party grid providers as they don ' t want you accessing info on nodes on
# their internal network .
pass
return driver
|
def dbnsfp ( self ) :
"""dbnsfp"""
|
tstart = datetime . now ( )
# python . . / scripts / annotate _ vcfs . py - i mm13173_14 . ug . target1 . vcf - r 1000genomes dbsnp138 clinvar esp6500 - a . . / data / 1000genomes / ALL . wgs . integrated _ phase1 _ v3.20101123 . snps _ indels _ sv . sites . vcf . gz . . / data / dbsnp138/00 - All . vcf . gz . . / data / dbsnp138 / clinvar _ 00 - latest . vcf . gz . . / data / ESP6500 / ESP6500 . vcf . gz
# command = ' python % s / cadd _ dann . py - n % s - i sanity _ check / checked . vcf 2 > log / cadd _ dann . log ' % ( scripts _ dir , cadd _ vest _ cores )
# self . shell ( command )
db = dbnsfp . Dbnsfp ( self . vcf_file , settings . dbnsfp_cores )
db . run ( )
tend = datetime . now ( )
execution_time = tend - tstart
|
def diff_json_files ( left_files , right_files ) :
'''Compute the difference between two sets of basis set JSON files
The output is a set of files that correspond to each file in
` left _ files ` . Each resulting dictionary will contain only the elements / shells
that exist in that entry and not in any of the files in ` right _ files ` .
This only works on the shell level , and will only subtract entire shells
that are identical . ECP potentials are not affected .
` left _ files ` and ` right _ files ` are lists of file paths . The output
is written to files with the same names as those in ` left _ files ` ,
but with ` . diff ` added to the end . If those files exist , they are overwritten .
Parameters
left _ files : list of str
Paths to JSON files to use as the base
right _ files : list of str
Paths to JSON files to subtract from each file of ` left _ files `
Returns
None'''
|
left_data = [ fileio . read_json_basis ( x ) for x in left_files ]
right_data = [ fileio . read_json_basis ( x ) for x in right_files ]
d = diff_basis_dict ( left_data , right_data )
for idx , diff_bs in enumerate ( d ) :
fpath = left_files [ idx ]
fileio . write_json_basis ( fpath + '.diff' , diff_bs )
|
def confd_state_rest_listen_tcp_port ( self , ** kwargs ) :
"""Auto Generated Code"""
|
config = ET . Element ( "config" )
confd_state = ET . SubElement ( config , "confd-state" , xmlns = "http://tail-f.com/yang/confd-monitoring" )
rest = ET . SubElement ( confd_state , "rest" )
listen = ET . SubElement ( rest , "listen" )
tcp = ET . SubElement ( listen , "tcp" )
port = ET . SubElement ( tcp , "port" )
port . text = kwargs . pop ( 'port' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config )
|
def on_before_transform_template ( self , template_dict ) :
"""Hook method that gets called before the SAM template is processed .
The template has pass the validation and is guaranteed to contain a non - empty " Resources " section .
: param dict template _ dict : Dictionary of the SAM template
: return : Nothing"""
|
template = SamTemplate ( template_dict )
# Temporarily add Serverless : : Api resource corresponding to Implicit API to the template .
# This will allow the processing code to work the same way for both Implicit & Explicit APIs
# If there are no implicit APIs , we will remove from the template later .
# If the customer has explicitly defined a resource with the id of " ServerlessRestApi " ,
# capture it . If the template ends up not defining any implicit api ' s , instead of just
# removing the " ServerlessRestApi " resource , we just restore what the author defined .
self . existing_implicit_api_resource = copy . deepcopy ( template . get ( self . implicit_api_logical_id ) )
template . set ( self . implicit_api_logical_id , ImplicitApiResource ( ) . to_dict ( ) )
errors = [ ]
for logicalId , function in template . iterate ( SamResourceType . Function . value ) :
api_events = self . _get_api_events ( function )
condition = function . condition
if len ( api_events ) == 0 :
continue
try :
self . _process_api_events ( function , api_events , template , condition )
except InvalidEventException as ex :
errors . append ( InvalidResourceException ( logicalId , ex . message ) )
self . _maybe_add_condition_to_implicit_api ( template_dict )
self . _maybe_add_conditions_to_implicit_api_paths ( template )
self . _maybe_remove_implicit_api ( template )
if len ( errors ) > 0 :
raise InvalidDocumentException ( errors )
|
def _clean_dic ( self , dic ) :
"""Clean recursively all empty or None values inside a dict ."""
|
aux_dic = dic . copy ( )
for key , value in iter ( dic . items ( ) ) :
if value is None or value == '' :
del aux_dic [ key ]
elif type ( value ) is dict :
cleaned_dict = self . _clean_dic ( value )
if not cleaned_dict :
del aux_dic [ key ]
continue
aux_dic [ key ] = cleaned_dict
return aux_dic
|
def linear_insert ( self , item , priority ) :
"""Linear search . Performance is O ( n ^ 2 ) ."""
|
with self . lock :
self_data = self . data
rotate = self_data . rotate
maxlen = self . _maxlen
length = len ( self_data )
count = length
# in practice , this is better than doing a rotate ( - 1 ) every
# loop and getting self . data [ 0 ] each time only because deque
# implements a very efficient iterator in C
for i in self_data :
if priority > i [ 1 ] :
break
count -= 1
rotate ( - count )
self_data . appendleft ( ( item , priority ) )
rotate ( length - count )
try :
self . items [ item ] += 1
except TypeError :
self . items [ repr ( item ) ] += 1
if maxlen is not None and maxlen < len ( self_data ) :
self . _poplast ( )
|
def _evaluate_sql_query_subprocess ( self , predicted_query : str , sql_query_labels : List [ str ] ) -> int :
"""We evaluate here whether the predicted query and the query label evaluate to the
exact same table . This method is only called by the subprocess , so we just exit with
1 if it is correct and 0 otherwise ."""
|
postprocessed_predicted_query = self . postprocess_query_sqlite ( predicted_query )
try :
self . _cursor . execute ( postprocessed_predicted_query )
predicted_rows = self . _cursor . fetchall ( )
except sqlite3 . Error as error :
logger . warning ( f'Error executing predicted: {error}' )
exit ( 0 )
# If predicted table matches any of the reference tables then it is counted as correct .
target_rows = None
for sql_query_label in sql_query_labels :
postprocessed_sql_query_label = self . postprocess_query_sqlite ( sql_query_label )
try :
self . _cursor . execute ( postprocessed_sql_query_label )
target_rows = self . _cursor . fetchall ( )
except sqlite3 . Error as error :
logger . warning ( f'Error executing predicted: {error}' )
if predicted_rows == target_rows :
exit ( 1 )
exit ( 0 )
|
def running_window ( iterable , size ) :
"""Generate n - size running window .
Example : :
> > > for i in running _ windows ( [ 1 , 2 , 3 , 4 , 5 ] , size = 3 ) :
. . . print ( i )
[1 , 2 , 3]
[2 , 3 , 4]
[3 , 4 , 5]
* * 中文文档 * *
简单滑窗函数 。"""
|
if size > len ( iterable ) :
raise ValueError ( "size can not be greater than length of iterable." )
fifo = collections . deque ( maxlen = size )
for i in iterable :
fifo . append ( i )
if len ( fifo ) == size :
yield list ( fifo )
|
def add_domain_to_toctree ( app , doctree , docname ) :
"""Add domain objects to the toctree dynamically .
This should be attached to the ` ` doctree - resolved ` ` event .
This works by :
* Finding each domain node ( addnodes . desc )
* Figuring out it ' s parent that will be in the toctree
( nodes . section , or a previously added addnodes . desc )
* Finding that parent in the TOC Tree based on it ' s ID
* Taking that element in the TOC Tree ,
and finding it ' s parent that is a TOC Listing ( nodes . bullet _ list )
* Adding the new TOC element for our specific node as a child of that nodes . bullet _ list
* This checks that bullet _ list ' s last child ,
and checks that it is also a nodes . bullet _ list ,
effectively nesting it under that element"""
|
toc = app . env . tocs [ docname ]
for desc_node in doctree . traverse ( addnodes . desc ) :
try :
ref_id = desc_node . children [ 0 ] . attributes [ "ids" ] [ 0 ]
except ( KeyError , IndexError ) as e :
LOGGER . warning ( "Invalid desc node: %s" % e )
continue
try : # Python domain object
ref_text = desc_node [ 0 ] . attributes [ "fullname" ] . split ( "." ) [ - 1 ] . split ( "(" ) [ 0 ]
except ( KeyError , IndexError ) : # TODO [ eric ] : Support other Domains and ways of accessing this data
# Use ` astext ` for other types of domain objects
ref_text = desc_node [ 0 ] . astext ( ) . split ( "." ) [ - 1 ] . split ( "(" ) [ 0 ]
# This is the actual object that will exist in the TOC Tree
# Sections by default , and other Desc nodes that we ' ve previously placed .
parent_node = _traverse_parent ( node = desc_node , objtypes = ( addnodes . desc , nodes . section ) )
if parent_node :
toc_reference = _get_toc_reference ( app , parent_node , toc , docname )
if toc_reference : # Get the last child of our parent ' s bullet list , this is where " we " live .
toc_insertion_point = _traverse_parent ( toc_reference , nodes . bullet_list ) [ - 1 ]
# Ensure we ' re added another bullet list so that we nest inside the parent ,
# not next to it
if toc_insertion_point and isinstance ( toc_insertion_point [ 0 ] , nodes . bullet_list ) :
new_insert = toc_insertion_point [ 0 ]
to_add = _build_toc_node ( docname , anchor = ref_id , text = ref_text )
new_insert . append ( to_add )
else :
to_add = _build_toc_node ( docname , anchor = ref_id , text = ref_text , bullet = True )
toc_insertion_point . append ( to_add )
|
def is_blocked ( self , ip ) :
"""Determine if an IP address should be considered blocked ."""
|
blocked = True
if ip in self . allowed_admin_ips :
blocked = False
for allowed_range in self . allowed_admin_ip_ranges :
if ipaddress . ip_address ( ip ) in ipaddress . ip_network ( allowed_range ) :
blocked = False
return blocked
|
def wait ( self , log_file ) :
"Wait until the process is ready ."
|
lines = map ( self . log_line , self . filter_lines ( self . get_lines ( log_file ) ) )
return any ( std . re . search ( self . pattern , line ) for line in lines )
|
def set_config_for_routing_entity ( self , routing_entity : Union [ web . Resource , web . StaticResource , web . ResourceRoute ] , config ) :
"""Record configuration for resource or it ' s route ."""
|
if isinstance ( routing_entity , ( web . Resource , web . StaticResource ) ) :
resource = routing_entity
# Add resource configuration or fail if it ' s already added .
if resource in self . _resource_config :
raise ValueError ( "CORS is already configured for {!r} resource." . format ( resource ) )
self . _resource_config [ resource ] = _ResourceConfig ( default_config = config )
elif isinstance ( routing_entity , web . ResourceRoute ) :
route = routing_entity
# Add resource ' s route configuration or fail if it ' s already added .
if route . resource not in self . _resource_config :
self . set_config_for_routing_entity ( route . resource , config )
if route . resource not in self . _resource_config :
raise ValueError ( "Can't setup CORS for {!r} request, " "CORS must be enabled for route's resource first." . format ( route ) )
resource_config = self . _resource_config [ route . resource ]
if route . method in resource_config . method_config :
raise ValueError ( "Can't setup CORS for {!r} route: CORS already " "configured on resource {!r} for {} method" . format ( route , route . resource , route . method ) )
resource_config . method_config [ route . method ] = config
else :
raise ValueError ( "Resource or ResourceRoute expected, got {!r}" . format ( routing_entity ) )
|
def space_labels ( document ) :
"""Ensure space around bold compound labels ."""
|
for label in document . xpath ( './/bold' ) : # TODO : Make this more permissive to match chemical _ label in parser
if not label . text or not re . match ( '^\(L?\d\d?[a-z]?\):?$' , label . text , re . I ) :
continue
parent = label . getparent ( )
previous = label . getprevious ( )
if previous is None :
text = parent . text or ''
if not text . endswith ( ' ' ) :
parent . text = text + ' '
else :
text = previous . tail or ''
if not text . endswith ( ' ' ) :
previous . tail = text + ' '
text = label . tail or ''
if not text . endswith ( ' ' ) :
label . tail = text + ' '
return document
|
def add_case ( self , case_obj ) :
"""Add a case obj with individuals to adapter
Args :
case _ obj ( puzzle . models . Case )"""
|
for ind_obj in case_obj . individuals :
self . _add_individual ( ind_obj )
logger . debug ( "Adding case {0} to plugin" . format ( case_obj . case_id ) )
self . case_objs . append ( case_obj )
if case_obj . tabix_index :
logger . debug ( "Setting filters.can_filter_range to True" )
self . filters . can_filter_range = True
|
def save_object ( collection , obj ) :
"""Save an object ` ` obj ` ` to the given ` ` collection ` ` .
` ` obj . id ` ` must be unique across all other existing objects in
the given collection . If ` ` id ` ` is not present in the object , a
* UUID * is assigned as the object ' s ` ` id ` ` .
Indexes already defined on the ` ` collection ` ` are updated after
the object is saved .
Returns the object ."""
|
if 'id' not in obj :
obj . id = uuid ( )
id = obj . id
path = object_path ( collection , id )
temp_path = '%s.temp' % path
with open ( temp_path , 'w' ) as f :
data = _serialize ( obj )
f . write ( data )
shutil . move ( temp_path , path )
if id in _db [ collection ] . cache :
_db [ collection ] . cache [ id ] = obj
_update_indexes_for_mutated_object ( collection , obj )
return obj
|
def update_http_rules ( rules , content_type = 'text/plain' ) :
"""Adds rules to global http mock .
It permits to set mock in a more global way than decorators , cf . :
https : / / github . com / openstack / requests - mock
Here we assume urls in the passed dict are regex we recompile before adding
a rule .
Rules example :
> > > def fake _ duckduckgo _ cb ( request ) :
. . . return 200 , { } , ' Coincoin ! '
> > > rules = [
' method ' : ' GET ' ,
' status _ code ' : 200,
' text ' : ' I am watching you ' ,
' url ' : r ' ^ https : / / www . google . com / # q = '
' method ' : ' GET ' ,
' text ' : fake _ duckduckgo _ cb ,
' url ' : r ' ^ https : / / duckduckgo . com / ? q = '"""
|
for kw in deepcopy ( rules ) :
kw [ 'url' ] = re . compile ( kw [ 'url' ] )
# ensure headers dict for at least have a default content type
if 'Content-Type' not in kw . get ( 'headers' , { } ) :
kw [ 'headers' ] = dict ( kw . get ( 'headers' , { } ) , ** { 'Content-Type' : content_type , } )
method = kw . pop ( 'method' )
url = kw . pop ( 'url' )
http_mock . register_uri ( method , url , ** kw )
|
def dallinger_package_path ( ) :
"""Return the absolute path of the root directory of the installed
Dallinger package :
> > > utils . dallinger _ package _ location ( )
' / Users / janedoe / projects / Dallinger3 / dallinger '"""
|
dist = get_distribution ( "dallinger" )
src_base = os . path . join ( dist . location , dist . project_name )
return src_base
|
def getmeths ( method_type ) :
"""returns MagIC method codes available for a given type"""
|
meths = [ ]
if method_type == 'GM' :
meths . append ( 'GM-PMAG-APWP' )
meths . append ( 'GM-ARAR' )
meths . append ( 'GM-ARAR-AP' )
meths . append ( 'GM-ARAR-II' )
meths . append ( 'GM-ARAR-NI' )
meths . append ( 'GM-ARAR-TF' )
meths . append ( 'GM-CC-ARCH' )
meths . append ( 'GM-CC-ARCHMAG' )
meths . append ( 'GM-C14' )
meths . append ( 'GM-FOSSIL' )
meths . append ( 'GM-FT' )
meths . append ( 'GM-INT-L' )
meths . append ( 'GM-INT-S' )
meths . append ( 'GM-ISO' )
meths . append ( 'GM-KAR' )
meths . append ( 'GM-PMAG-ANOM' )
meths . append ( 'GM-PMAG-POL' )
meths . append ( 'GM-PBPB' )
meths . append ( 'GM-RATH' )
meths . append ( 'GM-RBSR' )
meths . append ( 'GM-RBSR-I' )
meths . append ( 'GM-RBSR-MA' )
meths . append ( 'GM-SMND' )
meths . append ( 'GM-SMND-I' )
meths . append ( 'GM-SMND-MA' )
meths . append ( 'GM-CC-STRAT' )
meths . append ( 'GM-LUM-TH' )
meths . append ( 'GM-UPA' )
meths . append ( 'GM-UPB' )
meths . append ( 'GM-UTH' )
meths . append ( 'GM-UTHHE' )
else :
pass
return meths
|
def _setup_appium ( self ) :
"""Setup Appium webdriver
: returns : a new remote Appium driver"""
|
self . config . set ( 'Server' , 'host' , '127.0.0.1' )
self . config . set ( 'Server' , 'port' , '4723' )
return self . _create_remote_driver ( )
|
def rdann ( record_name , extension , sampfrom = 0 , sampto = None , shift_samps = False , pb_dir = None , return_label_elements = [ 'symbol' ] , summarize_labels = False ) :
"""Read a WFDB annotation file record _ name . extension and return an
Annotation object .
Parameters
record _ name : str
The record name of the WFDB annotation file . ie . for file ' 100 . atr ' ,
record _ name = ' 100 ' .
extension : str
The annotatator extension of the annotation file . ie . for file
'100 . atr ' , extension = ' atr ' .
sampfrom : int , optional
The minimum sample number for annotations to be returned .
sampto : int , optional
The maximum sample number for annotations to be returned .
shift _ samps : bool , optional
Specifies whether to return the sample indices relative to ` sampfrom `
( True ) , or sample 0 ( False ) .
pb _ dir : str , optional
Option used to stream data from Physiobank . The Physiobank database
directory from which to find the required annotation file . eg . For
record ' 100 ' in ' http : / / physionet . org / physiobank / database / mitdb ' :
pb _ dir = ' mitdb ' .
return _ label _ elements : list , optional
The label elements that are to be returned from reading the annotation
file . A list with at least one of the following options : ' symbol ' ,
' label _ store ' , ' description ' .
summarize _ labels : bool , optional
If True , assign a summary table of the set of annotation labels
contained in the file to the ' contained _ labels ' attribute of the
returned object . This table will contain the columns :
[ ' label _ store ' , ' symbol ' , ' description ' , ' n _ occurrences ' ]
Returns
annotation : Annotation
The Annotation object . Call help ( wfdb . Annotation ) for the attribute
descriptions .
Notes
For every annotation sample , the annotation file explictly stores the
' sample ' and ' symbol ' fields , but not necessarily the others . When reading
annotation files using this function , fields which are not stored in the
file will either take their default values of 0 or None , or will be carried
over from their previous values if any .
Examples
> > > ann = wfdb . rdann ( ' sample - data / 100 ' , ' atr ' , sampto = 300000)"""
|
return_label_elements = check_read_inputs ( sampfrom , sampto , return_label_elements )
# Read the file in byte pairs
filebytes = load_byte_pairs ( record_name , extension , pb_dir )
# Get wfdb annotation fields from the file bytes
( sample , label_store , subtype , chan , num , aux_note ) = proc_ann_bytes ( filebytes , sampto )
# Get the indices of annotations that hold definition information about
# the entire annotation file , and other empty annotations to be removed .
potential_definition_inds , rm_inds = get_special_inds ( sample , label_store , aux_note )
# Try to extract information describing the annotation file
( fs , custom_labels ) = interpret_defintion_annotations ( potential_definition_inds , aux_note )
# Remove annotations that do not store actual sample and label information
( sample , label_store , subtype , chan , num , aux_note ) = rm_empty_indices ( rm_inds , sample , label_store , subtype , chan , num , aux_note )
# Convert lists to numpy arrays dtype = ' int '
( sample , label_store , subtype , chan , num ) = lists_to_int_arrays ( sample , label_store , subtype , chan , num )
# Try to get fs from the header file if it is not contained in the
# annotation file
if fs is None :
try :
rec = record . rdheader ( record_name , pb_dir )
fs = rec . fs
except :
pass
# Create the annotation object
annotation = Annotation ( record_name = os . path . split ( record_name ) [ 1 ] , extension = extension , sample = sample , label_store = label_store , subtype = subtype , chan = chan , num = num , aux_note = aux_note , fs = fs , custom_labels = custom_labels )
# Apply the desired index range
if sampfrom > 0 and sampto is not None :
annotation . apply_range ( sampfrom = sampfrom , sampto = sampto )
# If specified , obtain annotation samples relative to the starting
# index
if shift_samps and len ( sample ) > 0 and sampfrom :
annotation . sample = annotation . sample - sampfrom
# Get the set of unique label definitions contained in this
# annotation
if summarize_labels :
annotation . get_contained_labels ( inplace = True )
# Set / unset the desired label values
annotation . set_label_elements ( return_label_elements )
return annotation
|
def create ( self , image = None ) :
"""Create content and return url . In case of images add the image ."""
|
container = self . context
new = api . content . create ( container = container , type = self . portal_type , title = self . title , safe_id = True , )
if image :
namedblobimage = NamedBlobImage ( data = image . read ( ) , filename = safe_unicode ( image . filename ) )
new . image = namedblobimage
if new :
new . description = safe_unicode ( self . description )
return new . absolute_url ( )
|
def server_identity_is_verified ( self ) :
"""GPGAuth stage0"""
|
# Encrypt a uuid token for the server
server_verify_token = self . gpg . encrypt ( self . _nonce0 , self . server_fingerprint , always_trust = True )
if not server_verify_token . ok :
raise GPGAuthStage0Exception ( 'Encryption of the nonce0 (%s) ' 'to the server fingerprint (%s) failed.' % ( self . _nonce0 , self . server_fingerprint ) )
server_verify_response = post_server_verify_token ( self , keyid = self . user_fingerprint , server_verify_token = str ( server_verify_token ) )
if not check_server_verify_response ( server_verify_response ) :
raise GPGAuthStage0Exception ( "Verify endpoint wrongly formatted" )
if server_verify_response . headers . get ( 'X-GPGAuth-Verify-Response' ) != self . _nonce0 :
raise GPGAuthStage0Exception ( 'The server decrypted something different than what we sent ' '(%s <> %s)' % ( server_verify_response . headers . get ( 'X-GPGAuth-Verify-Response' ) , self . _nonce0 ) )
logger . info ( 'server_identity_is_verified: OK' )
return True
|
def run ( self , workflow_input , * args , ** kwargs ) :
''': param workflow _ input : Dictionary of the workflow ' s input arguments ; see below for more details
: type workflow _ input : dict
: param instance _ type : Instance type on which all stages ' jobs will be run , or a dict mapping function names to instance types . These may be overridden on a per - stage basis if stage _ instance _ types is specified .
: type instance _ type : string or dict
: param stage _ instance _ types : A dict mapping stage IDs , names , or indices to either a string ( representing an instance type to be used for all functions in that stage ) , or a dict mapping function names to instance types .
: type stage _ instance _ types : dict
: param stage _ folders : A dict mapping stage IDs , names , indices , and / or the string " * " to folder values to be used for the stages ' output folders ( use " * " as the default for all unnamed stages )
: type stage _ folders : dict
: param rerun _ stages : A list of stage IDs , names , indices , and / or the string " * " to indicate which stages should be run even if there are cached executions available
: type rerun _ stages : list of strings
: param ignore _ reuse _ stages : Stages of a workflow ( IDs , names , or indices ) or " * " for which job reuse should be disabled
: type ignore _ reuse _ stages : list
: returns : Object handler of the newly created analysis
: rtype : : class : ` ~ dxpy . bindings . dxanalysis . DXAnalysis `
Run the associated workflow . See : meth : ` dxpy . bindings . dxapplet . DXExecutable . run ` for additional args .
When providing input for the workflow , keys should be of one of the following forms :
* " N . name " where * N * is the stage number , and * name * is the
name of the input , e . g . " 0 . reads " if the first stage takes
in an input called " reads "
* " stagename . name " where * stagename * is the stage name , and
* name * is the name of the input within the stage
* " stageID . name " where * stageID * is the stage ID , and * name *
is the name of the input within the stage
* " name " where * name * is the name of a workflow level input
( defined in inputs ) or the name that has been
exported for the workflow ( this name will appear as a key
in the " inputSpec " of this workflow ' s description if it has
been exported for this purpose )'''
|
return super ( DXWorkflow , self ) . run ( workflow_input , * args , ** kwargs )
|
def comment ( self , s , ** args ) :
"""Write DOT comment ."""
|
self . write ( u"// " )
self . writeln ( s = s , ** args )
|
def blacklistNode ( self , nodeName : str , reason : str = None , code : int = None ) :
"""Add the node specified by ` nodeName ` to this node ' s blacklist"""
|
msg = "{} blacklisting node {}" . format ( self , nodeName )
if reason :
msg += " for reason {}" . format ( reason )
if code :
msg += " for code {}" . format ( code )
logger . display ( msg )
self . nodeBlacklister . blacklist ( nodeName )
|
def write_file ( content , * path ) :
"""Simply write some content to a file , overriding the file if necessary ."""
|
with open ( os . path . join ( * path ) , "w" ) as file :
return file . write ( content )
|
def recurse_tree ( path , excludes , opts ) :
"""Look for every file in the directory tree and create the corresponding
ReST files ."""
|
# use absolute path for root , as relative paths like ' . . / . . / foo ' cause
# ' if " / . " in root . . . ' to filter out * all * modules otherwise
path = os . path . abspath ( path )
# check if the base directory is a package and get is name
if INIT in os . listdir ( path ) :
package_name = path . split ( os . path . sep ) [ - 1 ]
else :
package_name = None
toc = [ ]
tree = os . walk ( path , False )
for root , subs , files in tree : # keep only the Python script files
py_files = sorted ( [ f for f in files if os . path . splitext ( f ) [ 1 ] == '.py' ] )
if INIT in py_files :
py_files . remove ( INIT )
py_files . insert ( 0 , INIT )
# remove hidden ( ' . ' ) and private ( ' _ ' ) directories
subs = sorted ( [ sub for sub in subs if sub [ 0 ] not in [ '.' , '_' ] ] )
# check if there are valid files to process
# TODO : could add check for windows hidden files
if "/." in root or "/_" in root or not py_files or is_excluded ( root , excludes ) :
continue
if INIT in py_files : # we are in package . . .
if ( # . . . with subpackage ( s )
subs or # . . . with some module ( s )
len ( py_files ) > 1 or # . . . with a not - to - be - skipped INIT file
not shall_skip ( os . path . join ( root , INIT ) ) ) :
subroot = root [ len ( path ) : ] . lstrip ( os . path . sep ) . replace ( os . path . sep , '.' )
create_package_file ( root , package_name , subroot , py_files , opts , subs )
toc . append ( makename ( package_name , subroot ) )
elif root == path : # if we are at the root level , we don ' t require it to be a package
for py_file in py_files :
if not shall_skip ( os . path . join ( path , py_file ) ) :
module = os . path . splitext ( py_file ) [ 0 ]
create_module_file ( package_name , module , opts )
toc . append ( makename ( package_name , module ) )
# create the module ' s index
if not opts . notoc :
create_modules_toc_file ( package_name , toc , opts )
|
def rename ( self , dn : str , new_rdn : str , new_base_dn : Optional [ str ] = None ) -> None :
"""rename a dn in the ldap database ; see ldap module . doesn ' t return a
result if transactions enabled ."""
|
_debug ( "rename" , self , dn , new_rdn , new_base_dn )
# split up the parameters
split_dn = tldap . dn . str2dn ( dn )
split_newrdn = tldap . dn . str2dn ( new_rdn )
assert ( len ( split_newrdn ) == 1 )
# make dn unqualified
rdn = tldap . dn . dn2str ( split_dn [ 0 : 1 ] )
# make newrdn fully qualified dn
tmplist = [ split_newrdn [ 0 ] ]
if new_base_dn is not None :
tmplist . extend ( tldap . dn . str2dn ( new_base_dn ) )
old_base_dn = tldap . dn . dn2str ( split_dn [ 1 : ] )
else :
tmplist . extend ( split_dn [ 1 : ] )
old_base_dn = None
newdn = tldap . dn . dn2str ( tmplist )
_debug ( "--> commit " , self , dn , new_rdn , new_base_dn )
_debug ( "--> rollback" , self , newdn , rdn , old_base_dn )
# on commit carry out action ; on rollback reverse rename
def on_commit ( obj ) :
obj . modify_dn ( dn , new_rdn , new_superior = new_base_dn )
def on_rollback ( obj ) :
obj . modify_dn ( newdn , rdn , new_superior = old_base_dn )
return self . _process ( on_commit , on_rollback )
|
def _pick_unused_port_without_server ( ) : # Protected . pylint : disable = invalid - name
"""Pick an available network port without the help of a port server .
This code ensures that the port is available on both TCP and UDP .
This function is an implementation detail of PickUnusedPort ( ) , and
should not be called by code outside of this module .
Returns :
A port number that is unused on both TCP and UDP .
Raises :
NoFreePortFoundError : No free port could be found ."""
|
# Try random ports first .
rng = random . Random ( )
for _ in range ( 10 ) :
port = int ( rng . randrange ( 15000 , 25000 ) )
if is_port_free ( port ) :
_random_ports . add ( port )
return port
# Next , try a few times to get an OS - assigned port .
# Ambrose discovered that on the 2.6 kernel , calling Bind ( ) on UDP socket
# returns the same port over and over . So always try TCP first .
for _ in range ( 10 ) : # Ask the OS for an unused port .
port = bind ( 0 , _PROTOS [ 0 ] [ 0 ] , _PROTOS [ 0 ] [ 1 ] )
# Check if this port is unused on the other protocol .
if port and bind ( port , _PROTOS [ 1 ] [ 0 ] , _PROTOS [ 1 ] [ 1 ] ) :
_random_ports . add ( port )
return port
# Give up .
raise NoFreePortFoundError ( )
|
def findPolymorphisms ( self , strSeq , strict = False ) :
"""Compares strSeq with self . sequence .
If not ' strict ' , this function ignores the cases of matching heterozygocity ( ex : for a given position i , strSeq [ i ] = A and self . sequence [ i ] = ' A / G ' ) . If ' strict ' it returns all positions where strSeq differs self , sequence"""
|
arr = self . encode ( strSeq ) [ 0 ]
res = [ ]
if not strict :
for i in range ( len ( arr ) + len ( self ) ) :
if i >= len ( arr ) or i > len ( self ) :
break
if arr [ i ] & self [ i ] == 0 :
res . append ( i )
else :
for i in range ( len ( arr ) + len ( self ) ) :
if i >= len ( arr ) or i > len ( self ) :
break
if arr [ i ] != self [ i ] :
res . append ( i )
return res
|
def get_editor_nodes ( self , editor , node = None ) :
"""Returns the : class : ` umbra . components . factory . script _ editor . nodes . EditorNode ` class Nodes with given editor .
: param node : Node to start walking from .
: type node : AbstractNode or AbstractCompositeNode or Object
: param editor : Editor .
: type editor : Editor
: return : EditorNode nodes .
: rtype : list"""
|
return [ editor_node for editor_node in self . list_editor_nodes ( node ) if editor_node . editor == editor ]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.