signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def slugify ( value , allow_unicode = False ) :
"""Normalizes string , converts to lowercase , removes non - alpha characters ,
and converts spaces to hyphens .
: param value : string
: param allow _ unicode : allow utf8 characters
: type allow _ unicode : bool
: return : slugified string
: rtype : str
: Example :
> > > slugify ( ' pekná líščička ' )
' pekna - liscicka '""" | value = str ( value )
if allow_unicode :
value = unicodedata . normalize ( 'NFKC' , value )
value = re . sub ( r'[^\w\s-]' , '' , value , flags = re . U ) . strip ( ) . lower ( )
return re . sub ( r'[-\s]+' , '-' , value , flags = re . U )
else :
value = unicodedata . normalize ( 'NFKD' , value ) . encode ( 'ascii' , 'ignore' ) . decode ( 'ascii' )
value = re . sub ( r'[^\w\s-]' , '' , value ) . strip ( ) . lower ( )
return re . sub ( '[-\s]+' , '-' , value ) |
def _make_predict_proba ( self , func ) :
"""The predict _ proba method will expect 3d arrays , but we are reshaping
them to 2D so that LIME works correctly . This wraps the function
you give in explain _ instance to first reshape the data to have
the shape the the keras - style network expects .""" | def predict_proba ( X ) :
n_samples = X . shape [ 0 ]
new_shape = ( n_samples , self . n_features , self . n_timesteps )
X = np . transpose ( X . reshape ( new_shape ) , axes = ( 0 , 2 , 1 ) )
return func ( X )
return predict_proba |
def add_callback ( self , phase , fn ) :
"""Adds a callback to the context .
The ` phase ` determines when and if the callback is executed , and which
positional arguments are passed in :
' enter '
: Called from ` rhino . Resource ` , after a handler for the current
request has been resolved , but before the handler is called .
Arguments : request
' leave '
: Called from ` rhino . Resource ` , after the handler has returned
successfully .
Arguments : request , response
' finalize '
: Called from ` Mapper ` , before the WSGI response is finalized .
Arguments : request , response
' teardown '
: Called from ` Mapper ` , before control is passed back to the
WSGI layer .
Arguments : -
' close '
: Called when the WSGI server calls ` close ( ) ` on the response
iterator .
Arguments : -
' teardown ' callbacks are guaranteed to be called at the end of every
request , and are suitable for cleanup tasks like closing database
handles , etc . If a teardown callback raises an exception , it is
logged to the server log but does not cause other teardown callbacks
to be skipped .
' enter ' , ' leave ' and ' finalize ' callbacks are only called if no
exception occured before they are reached , including exceptions raised
in other callbacks .
Whether or not ' close ' callbacks are called depends on whether
a WSGI response could be generated successfully , and if the WSGI
server calls ' . close ( ) ' on the returned iterator , as required by the
spec . If that happens , all ' close ' callbacks are called regardless
of exceptions , like ' teardown ' callbacks .""" | try :
self . __callbacks [ phase ] . append ( fn )
except KeyError :
raise KeyError ( "Invalid callback phase '%s'. Must be one of %s" % ( phase , _callback_phases ) ) |
def decode_response ( client_message , to_object = None ) :
"""Decode response from client message""" | parameters = dict ( base = None , increment = None , batch_size = None )
parameters [ 'base' ] = client_message . read_long ( )
parameters [ 'increment' ] = client_message . read_long ( )
parameters [ 'batch_size' ] = client_message . read_int ( )
return parameters |
def p_exprlt ( p ) :
"""expr : expr LT expr""" | a = int ( p [ 1 ] ) if p [ 1 ] . isdigit ( ) else 0
b = int ( p [ 3 ] ) if p [ 3 ] . isdigit ( ) else 0
p [ 0 ] = '1' if a < b else '0' |
def avhrr ( scans_nb , scan_points , scan_angle = 55.37 , frequency = 1 / 6.0 , apply_offset = True ) :
"""Definition of the avhrr instrument .
Source : NOAA KLM User ' s Guide , Appendix J
http : / / www . ncdc . noaa . gov / oa / pod - guide / ncdc / docs / klm / html / j / app - j . htm""" | # build the avhrr instrument ( scan angles )
avhrr_inst = np . vstack ( ( ( scan_points / 1023.5 - 1 ) * np . deg2rad ( - scan_angle ) , np . zeros ( ( len ( scan_points ) , ) ) ) )
avhrr_inst = np . tile ( avhrr_inst [ : , np . newaxis , : ] , [ 1 , np . int ( scans_nb ) , 1 ] )
# building the corresponding times array
# times = ( np . tile ( scan _ points * 0.000025 + 0.0025415 , [ scans _ nb , 1 ] )
# + np . expand _ dims ( offset , 1 ) )
times = np . tile ( scan_points * 0.000025 , [ np . int ( scans_nb ) , 1 ] )
if apply_offset :
offset = np . arange ( np . int ( scans_nb ) ) * frequency
times += np . expand_dims ( offset , 1 )
return ScanGeometry ( avhrr_inst , times ) |
def has_hash_of ( self , destpath , code , package ) :
"""Determine if a file has the hash of the code .""" | if destpath is not None and os . path . isfile ( destpath ) :
with openfile ( destpath , "r" ) as opened :
compiled = readfile ( opened )
hashash = gethash ( compiled )
if hashash is not None and hashash == self . comp . genhash ( package , code ) :
return compiled
return None |
def csd ( self , other , fftlength = None , overlap = None , window = 'hann' , ** kwargs ) :
"""Calculate the CSD ` FrequencySeries ` for two ` TimeSeries `
Parameters
other : ` TimeSeries `
the second ` TimeSeries ` in this CSD calculation
fftlength : ` float `
number of seconds in single FFT , defaults to a single FFT
covering the full duration
overlap : ` float ` , optional
number of seconds of overlap between FFTs , defaults to the
recommended overlap for the given window ( if given ) , or 0
window : ` str ` , ` numpy . ndarray ` , optional
window function to apply to timeseries prior to FFT ,
see : func : ` scipy . signal . get _ window ` for details on acceptable
formats
Returns
csd : ` ~ gwpy . frequencyseries . FrequencySeries `
a data series containing the CSD .""" | return spectral . psd ( ( self , other ) , spectral . csd , fftlength = fftlength , overlap = overlap , window = window , ** kwargs ) |
def create_file_reader ( input_files , topology , featurizer , chunksize = None , ** kw ) :
r"""Creates a ( possibly featured ) file reader by a number of input files and either a topology file or a featurizer .
Parameters
: param input _ files :
A single input file or a list of input files .
: param topology :
A topology file . If given , the featurizer argument can be None .
: param featurizer :
A featurizer . If given , the topology file can be None .
: param chunksize :
The chunk size with which the corresponding reader gets initialized .
: return : Returns the reader .""" | from pyemma . coordinates . data . numpy_filereader import NumPyFileReader
from pyemma . coordinates . data . py_csv_reader import PyCSVReader
from pyemma . coordinates . data import FeatureReader
from pyemma . coordinates . data . fragmented_trajectory_reader import FragmentedTrajectoryReader
# fragmented trajectories
if ( isinstance ( input_files , ( list , tuple ) ) and len ( input_files ) > 0 and any ( isinstance ( item , ( list , tuple ) ) for item in input_files ) ) :
return FragmentedTrajectoryReader ( input_files , topology , chunksize , featurizer )
# normal trajectories
if ( isinstance ( input_files , str ) or ( isinstance ( input_files , ( list , tuple ) ) and ( any ( isinstance ( item , str ) for item in input_files ) or len ( input_files ) is 0 ) ) ) :
reader = None
# check : if single string create a one - element list
if isinstance ( input_files , str ) :
input_list = [ input_files ]
elif len ( input_files ) > 0 and all ( isinstance ( item , str ) for item in input_files ) :
input_list = input_files
else :
if len ( input_files ) is 0 :
raise ValueError ( "The passed input list should not be empty." )
else :
raise ValueError ( "The passed list did not exclusively contain strings or was a list of lists " "(fragmented trajectory)." )
# TODO : this does not handle suffixes like . xyz . gz ( rare )
_ , suffix = os . path . splitext ( input_list [ 0 ] )
suffix = str ( suffix )
# check : do all files have the same file type ? If not : raise ValueError .
if all ( item . endswith ( suffix ) for item in input_list ) : # do all the files exist ? If not : Raise value error
all_exist = True
from six import StringIO
err_msg = StringIO ( )
for item in input_list :
if not os . path . isfile ( item ) :
err_msg . write ( '\n' if err_msg . tell ( ) > 0 else "" )
err_msg . write ( 'File %s did not exist or was no file' % item )
all_exist = False
if not all_exist :
raise ValueError ( 'Some of the given input files were directories' ' or did not exist:\n%s' % err_msg . getvalue ( ) )
featurizer_or_top_provided = featurizer is not None or topology is not None
# we need to check for h5 first , because of mdtraj custom HDF5 traj format ( which is deprecated ) .
if suffix in ( '.h5' , '.hdf5' ) and not featurizer_or_top_provided : # This check is potentially expensive for lots of files , we also re - open the file twice ( causing atime updates etc . )
# So we simply require that no featurizer option is given .
# and not all ( ( _ is _ mdtraj _ hdf5 _ file ( f ) for f in input _ files ) ) :
from pyemma . coordinates . data . h5_reader import H5Reader
reader = H5Reader ( filenames = input_files , chunk_size = chunksize , ** kw )
# CASE 1.1 : file types are MD files
elif FeatureReader . supports_format ( suffix ) : # check : do we either have a featurizer or a topology file name ? If not : raise ValueError .
# create a MD reader with file names and topology
if not featurizer_or_top_provided :
raise ValueError ( 'The input files were MD files which makes it mandatory to have either a ' 'Featurizer or a topology file.' )
reader = FeatureReader ( input_list , featurizer = featurizer , topologyfile = topology , chunksize = chunksize )
elif suffix in ( '.npy' , '.npz' ) :
reader = NumPyFileReader ( input_list , chunksize = chunksize )
# otherwise we assume that given files are ascii tabulated data
else :
reader = PyCSVReader ( input_list , chunksize = chunksize , ** kw )
else :
raise ValueError ( 'Not all elements in the input list were of the type %s!' % suffix )
else :
raise ValueError ( 'Input "{}" was no string or list of strings.' . format ( input_files ) )
return reader |
def export ( self , hashVal , hashPath , tags = None , galleries = None ) :
"""The export function needs to :
- Move source image to asset folder
- Rename to guid . ext
- Save thumbnail , video _ thumbnail , and MP4 versions . If the source is already h264 , then only transcode the thumbnails""" | self . source = hashPath . replace ( '\\' , '/' ) . replace ( ROOT , '' )
galleries = galleries or [ ]
tags = tags or [ ]
# - - Get info
videodata = self . info ( )
self . width = videodata [ 'width' ]
self . height = videodata [ 'height' ]
self . framerate = videodata [ 'framerate' ]
self . duration = videodata [ 'duration' ]
self . generateThumbnail ( )
for gal in galleries :
g = Gallery . objects . get ( pk = int ( gal ) )
g . videos . add ( self )
self . tagArtist ( )
for tagName in tags :
tag = Tag . objects . get_or_create ( name = tagName ) [ 0 ]
self . tags . add ( tag )
if not self . guid :
self . guid = self . getGuid ( ) . guid
# - - Set the temp video while processing
queuedvideo = VideoQueue . objects . get_or_create ( video = self ) [ 0 ]
queuedvideo . save ( )
self . save ( )
try :
item = VideoQueue ( )
item . video = self
item . save ( )
except IntegrityError : # - - Already queued
pass |
def main ( jlink_serial , device ) :
"""Main function .
Args :
jlink _ serial ( str ) : the J - Link serial number
device ( str ) : the target CPU
Returns :
` ` None ` `
Raises :
JLinkException : on error""" | buf = StringIO . StringIO ( )
jlink = pylink . JLink ( log = buf . write , detailed_log = buf . write )
jlink . open ( serial_no = jlink_serial )
jlink . set_tif ( pylink . enums . JLinkInterfaces . SWD )
jlink . connect ( device , verbose = True )
# Figure out our original endianess first .
big_endian = jlink . set_little_endian ( )
if big_endian :
jlink . set_big_endian ( )
print ( 'Target Endian Mode: %s Endian' % ( 'Big' if big_endian else 'Little' ) ) |
def _check_for_mismatches ( self , known_keys ) :
"""check for bad options from value sources""" | for a_value_source in self . values_source_list :
try :
if a_value_source . always_ignore_mismatches :
continue
except AttributeError : # ok , this values source doesn ' t have the concept
# always igoring mismatches , we won ' t tolerate mismatches
pass
# we want to fetch the keys from the value sources so that we can
# check for mismatches . Commandline value sources , are different ,
# we never want to allow unmatched keys from the command line .
# By detecting if this value source is a command line source , we
# can employ the command line ' s own mismatch detection . The
# boolean ' allow _ mismatches ' controls application of the tollerance
# for mismatches .
if hasattr ( a_value_source , 'command_line_value_source' ) :
allow_mismatches = False
else :
allow_mismatches = True
# make a set of all the keys from a value source in the form
# of strings like this : ' x . y . z '
value_source_mapping = a_value_source . get_values ( self , allow_mismatches , self . value_source_object_hook )
value_source_keys_set = set ( [ k for k in DotDict ( value_source_mapping ) . keys_breadth_first ( ) ] )
# make a set of the keys that didn ' t match any of the known
# keys in the requirements
unmatched_keys = value_source_keys_set . difference ( known_keys )
# some of the unmatched keys may actually be ok because the were
# used during acquisition .
# remove keys of the form ' y . z ' if they match a known key of the
# form ' x . y . z '
for key in unmatched_keys . copy ( ) :
key_is_okay = six . moves . reduce ( lambda x , y : x or y , ( known_key . endswith ( key ) for known_key in known_keys ) )
if key_is_okay :
unmatched_keys . remove ( key )
# anything left in the unmatched _ key set is a badly formed key .
# issue a warning
if unmatched_keys :
if self . option_definitions . admin . strict . default : # raise hell . . .
if len ( unmatched_keys ) > 1 :
raise NotAnOptionError ( "%s are not valid Options" % unmatched_keys )
elif len ( unmatched_keys ) == 1 :
raise NotAnOptionError ( "%s is not a valid Option" % unmatched_keys . pop ( ) )
else :
warnings . warn ( 'Invalid options: %s' % ', ' . join ( sorted ( unmatched_keys ) ) ) |
def _init_map ( self , record_types = None , ** kwargs ) :
"""Initialize form map""" | osid_objects . OsidObjectForm . _init_map ( self , record_types = record_types )
self . _my_map [ 'numericScoreIncrement' ] = self . _numeric_score_increment_default
self . _my_map [ 'lowestNumericScore' ] = self . _lowest_numeric_score_default
self . _my_map [ 'basedOnGrades' ] = self . _based_on_grades_default
self . _my_map [ 'highestNumericScore' ] = self . _highest_numeric_score_default
self . _my_map [ 'assignedGradebookIds' ] = [ str ( kwargs [ 'gradebook_id' ] ) ]
self . _my_map [ 'grades' ] = [ ] |
def uniqify_once ( func ) :
"""Make sure that a method returns a unique name .""" | @ six . wraps ( func )
def unique_once ( self , * args , ** kwargs ) :
return self . unique_once ( func ( self , * args , ** kwargs ) )
return unique_once |
def log_info ( msg , logger = "TaskLogger" ) :
"""Log an INFO message
Convenience function to log a message to the default Logger
Parameters
msg : str
Message to be logged
logger : str , optional ( default : " TaskLogger " )
Unique name of the logger to retrieve
Returns
logger : TaskLogger""" | tasklogger = get_tasklogger ( logger )
tasklogger . info ( msg )
return tasklogger |
def on_session_start ( self , data ) : # pylint : disable = W0613
"""XMPP session started""" | # Send initial presence
self . send_presence ( ppriority = self . _initial_priority )
# Request roster
self . get_roster ( ) |
def inet_pton ( af , addr ) :
"""Convert an IP address from text representation into binary form""" | print ( 'hello' )
if af == socket . AF_INET :
return inet_aton ( addr )
elif af == socket . AF_INET6 : # IPv6 : The use of " : : " indicates one or more groups of 16 bits of zeros .
# We deal with this form of wildcard using a special marker .
JOKER = b"*"
while b"::" in addr :
addr = addr . replace ( b"::" , b":" + JOKER + b":" )
joker_pos = None
# The last part of an IPv6 address can be an IPv4 address
ipv4_addr = None
if b"." in addr :
ipv4_addr = addr . split ( b":" ) [ - 1 ]
result = b""
parts = addr . split ( b":" )
for part in parts :
if part == JOKER : # Wildcard is only allowed once
if joker_pos is None :
joker_pos = len ( result )
else :
raise Exception ( "Illegal syntax for IP address" )
elif part == ipv4_addr : # FIXME : Make sure IPv4 can only be last part
# FIXME : inet _ aton allows IPv4 addresses with less than 4 octets
result += socket . inet_aton ( ipv4_addr )
else : # Each part must be 16bit . Add missing zeroes before decoding .
try :
result += part . rjust ( 4 , b"0" ) . decode ( "hex" )
except TypeError :
raise Exception ( "Illegal syntax for IP address" )
# If there ' s a wildcard , fill up with zeros to reach 128bit ( 16 bytes )
if JOKER in addr :
result = ( result [ : joker_pos ] + b"\x00" * ( 16 - len ( result ) ) + result [ joker_pos : ] )
if len ( result ) != 16 :
raise Exception ( "Illegal syntax for IP address" )
return result
else :
raise Exception ( "Address family not supported" ) |
def urban_adj_factor ( self ) :
"""Return urban adjustment factor ( UAF ) used to adjust QMED and growth curves .
Methodology source : eqn . 8 , Kjeldsen 2010
: return : urban adjustment factor
: rtype : float""" | urbext = self . catchment . descriptors . urbext ( self . year )
result = self . _pruaf ( ) ** 2.16 * ( 1 + urbext ) ** 0.37
self . results_log [ 'urban_extent' ] = urbext
self . results_log [ 'urban_adj_factor' ] = result
return result |
def _compute_acq ( self , x ) :
"""Integrated Expected Improvement""" | means , stds = self . model . predict ( x )
fmins = self . model . get_fmin ( )
f_acqu = 0
for m , s , fmin in zip ( means , stds , fmins ) :
_ , Phi , _ = get_quantiles ( self . jitter , fmin , m , s )
f_acqu += Phi
return f_acqu / len ( means ) |
def mod_watch ( name , ** kwargs ) :
'''Install / reinstall a package based on a watch requisite
. . note : :
This state exists to support special handling of the ` ` watch ` `
: ref : ` requisite < requisites > ` . It should not be called directly .
Parameters for this function should be set by the state being triggered .''' | sfun = kwargs . pop ( 'sfun' , None )
mapfun = { 'purged' : purged , 'latest' : latest , 'removed' : removed , 'installed' : installed }
if sfun in mapfun :
return mapfun [ sfun ] ( name , ** kwargs )
return { 'name' : name , 'changes' : { } , 'comment' : 'pkg.{0} does not work with the watch requisite' . format ( sfun ) , 'result' : False } |
def is_same_channel ( self , left , right ) :
"""Check if given nicknames are equal in the server ' s case mapping .""" | return self . normalize ( left ) == self . normalize ( right ) |
def apply_with ( self , _ , val , ctx ) :
"""constructor
example val :
# header values used in multipart / form - data according to RFC2388
' header ' : {
' Content - Type ' : ' text / plain ' ,
# according to RFC2388 , available values are ' 7bit ' , ' 8bit ' , ' binary '
' Content - Transfer - Encoding ' : ' binary '
filename : ' a . txt ' ,
data : None ( or any file - like object )
: param val : dict containing file info .""" | self . header = val . get ( 'header' , { } )
self . data = val . get ( 'data' , None )
self . filename = val . get ( 'filename' , '' )
if self . data == None and self . filename == '' :
raise ValidationError ( 'should have file name or file object, not: {0}, {1}' . format ( self . data , self . filename ) ) |
def get_cache_key ( page ) :
"""Create the cache key for the current page and language""" | try :
site_id = page . node . site_id
except AttributeError :
site_id = page . site_id
return _get_cache_key ( 'page_sitemap' , page , 'default' , site_id ) |
def _SerializeAttributeContainer ( self , attribute_container ) :
"""Serializes an attribute container .
Args :
attribute _ container ( AttributeContainer ) : attribute container .
Returns :
bytes : serialized attribute container .
Raises :
IOError : if the attribute container cannot be serialized .
OSError : if the attribute container cannot be serialized .""" | if self . _serializers_profiler :
self . _serializers_profiler . StartTiming ( attribute_container . CONTAINER_TYPE )
try :
attribute_container_data = self . _serializer . WriteSerialized ( attribute_container )
if not attribute_container_data :
raise IOError ( 'Unable to serialize attribute container: {0:s}.' . format ( attribute_container . CONTAINER_TYPE ) )
attribute_container_data = attribute_container_data . encode ( 'utf-8' )
finally :
if self . _serializers_profiler :
self . _serializers_profiler . StopTiming ( attribute_container . CONTAINER_TYPE )
return attribute_container_data |
def remove_last_entry ( self ) :
"""Remove the last NoteContainer in the Bar .""" | self . current_beat -= 1.0 / self . bar [ - 1 ] [ 1 ]
self . bar = self . bar [ : - 1 ]
return self . current_beat |
def __parse_stream ( self , stream ) :
"""Generic method to parse mailmap streams""" | nline = 0
lines = stream . split ( '\n' )
for line in lines :
nline += 1
# Ignore blank lines and comments
m = re . match ( self . LINES_TO_IGNORE_REGEX , line , re . UNICODE )
if m :
continue
line = line . strip ( '\n' ) . strip ( ' ' )
parts = line . split ( '>' )
if len ( parts ) == 0 :
cause = "line %s: invalid format" % str ( nline )
raise InvalidFormatError ( cause = cause )
aliases = [ ]
for part in parts :
part = part . replace ( ',' , ' ' )
part = part . strip ( '\n' ) . strip ( ' ' )
if len ( part ) == 0 :
continue
if part . find ( '<' ) < 0 :
cause = "line %s: invalid format" % str ( nline )
raise InvalidFormatError ( cause = cause )
alias = email . utils . parseaddr ( part + '>' )
aliases . append ( alias )
yield aliases |
def add_dir ( self , path , compress ) :
"""Add all files under directory ` path ` to the MAR file .
Args :
path ( str ) : path to directory to add to this MAR file
compress ( str ) : One of ' xz ' , ' bz2 ' , or None . Defaults to None .""" | if not os . path . isdir ( path ) :
raise ValueError ( '{} is not a directory' . format ( path ) )
for root , dirs , files in os . walk ( path ) :
for f in files :
self . add_file ( os . path . join ( root , f ) , compress ) |
def eval ( self , expr , inplace = False , ** kwargs ) :
"""Evaluate a string describing operations on DataFrame columns .
Operates on columns only , not specific rows or elements . This allows
` eval ` to run arbitrary code , which can make you vulnerable to code
injection if you pass user input to this function .
Parameters
expr : str
The expression string to evaluate .
inplace : bool , default False
If the expression contains an assignment , whether to perform the
operation inplace and mutate the existing DataFrame . Otherwise ,
a new DataFrame is returned .
. . versionadded : : 0.18.0.
kwargs : dict
See the documentation for : func : ` eval ` for complete details
on the keyword arguments accepted by
: meth : ` ~ pandas . DataFrame . query ` .
Returns
ndarray , scalar , or pandas object
The result of the evaluation .
See Also
DataFrame . query : Evaluates a boolean expression to query the columns
of a frame .
DataFrame . assign : Can evaluate an expression or function to create new
values for a column .
eval : Evaluate a Python expression as a string using various
backends .
Notes
For more details see the API documentation for : func : ` ~ eval ` .
For detailed examples see : ref : ` enhancing performance with eval
< enhancingperf . eval > ` .
Examples
> > > df = pd . DataFrame ( { ' A ' : range ( 1 , 6 ) , ' B ' : range ( 10 , 0 , - 2 ) } )
> > > df
A B
0 1 10
1 2 8
2 3 6
3 4 4
4 5 2
> > > df . eval ( ' A + B ' )
0 11
1 10
2 9
3 8
4 7
dtype : int64
Assignment is allowed though by default the original DataFrame is not
modified .
> > > df . eval ( ' C = A + B ' )
A B C
0 1 10 11
1 2 8 10
2 3 6 9
3 4 4 8
4 5 2 7
> > > df
A B
0 1 10
1 2 8
2 3 6
3 4 4
4 5 2
Use ` ` inplace = True ` ` to modify the original DataFrame .
> > > df . eval ( ' C = A + B ' , inplace = True )
> > > df
A B C
0 1 10 11
1 2 8 10
2 3 6 9
3 4 4 8
4 5 2 7""" | from pandas . core . computation . eval import eval as _eval
inplace = validate_bool_kwarg ( inplace , 'inplace' )
resolvers = kwargs . pop ( 'resolvers' , None )
kwargs [ 'level' ] = kwargs . pop ( 'level' , 0 ) + 1
if resolvers is None :
index_resolvers = self . _get_index_resolvers ( )
column_resolvers = self . _get_space_character_free_column_resolvers ( )
resolvers = column_resolvers , index_resolvers
if 'target' not in kwargs :
kwargs [ 'target' ] = self
kwargs [ 'resolvers' ] = kwargs . get ( 'resolvers' , ( ) ) + tuple ( resolvers )
return _eval ( expr , inplace = inplace , ** kwargs ) |
def get_primitive_standard_structure ( self , international_monoclinic = True ) :
"""Gives a structure with a primitive cell according to certain standards
the standards are defined in Setyawan , W . , & Curtarolo , S . ( 2010 ) .
High - throughput electronic band structure calculations :
Challenges and tools . Computational Materials Science ,
49(2 ) , 299-312 . doi : 10.1016 / j . commatsci . 2010.05.010
Returns :
The structure in a primitive standardized cell""" | conv = self . get_conventional_standard_structure ( international_monoclinic = international_monoclinic )
lattice = self . get_lattice_type ( )
if "P" in self . get_space_group_symbol ( ) or lattice == "hexagonal" :
return conv
transf = self . get_conventional_to_primitive_transformation_matrix ( international_monoclinic = international_monoclinic )
new_sites = [ ]
latt = Lattice ( np . dot ( transf , conv . lattice . matrix ) )
for s in conv :
new_s = PeriodicSite ( s . specie , s . coords , latt , to_unit_cell = True , coords_are_cartesian = True , properties = s . properties )
if not any ( map ( new_s . is_periodic_image , new_sites ) ) :
new_sites . append ( new_s )
if lattice == "rhombohedral" :
prim = Structure . from_sites ( new_sites )
lengths , angles = prim . lattice . lengths_and_angles
a = lengths [ 0 ]
alpha = math . pi * angles [ 0 ] / 180
new_matrix = [ [ a * cos ( alpha / 2 ) , - a * sin ( alpha / 2 ) , 0 ] , [ a * cos ( alpha / 2 ) , a * sin ( alpha / 2 ) , 0 ] , [ a * cos ( alpha ) / cos ( alpha / 2 ) , 0 , a * math . sqrt ( 1 - ( cos ( alpha ) ** 2 / ( cos ( alpha / 2 ) ** 2 ) ) ) ] ]
new_sites = [ ]
latt = Lattice ( new_matrix )
for s in prim :
new_s = PeriodicSite ( s . specie , s . frac_coords , latt , to_unit_cell = True , properties = s . properties )
if not any ( map ( new_s . is_periodic_image , new_sites ) ) :
new_sites . append ( new_s )
return Structure . from_sites ( new_sites )
return Structure . from_sites ( new_sites ) |
def GetTaskPendingMerge ( self , current_task ) :
"""Retrieves the first task that is pending merge or has a higher priority .
This function will check if there is a task with a higher merge priority
than the current _ task being merged . If so , that task with the higher
priority is returned .
Args :
current _ task ( Task ) : current task being merged or None if no such task .
Returns :
Task : the next task to merge or None if there is no task pending merge or
with a higher priority .""" | next_task = self . _tasks_pending_merge . PeekTask ( )
if not next_task :
return None
if current_task and next_task . merge_priority > current_task . merge_priority :
return None
with self . _lock :
next_task = self . _tasks_pending_merge . PopTask ( )
self . _tasks_merging [ next_task . identifier ] = next_task
return next_task |
def patch_custom_resource_definition_status ( self , name , body , ** kwargs ) : # noqa : E501
"""patch _ custom _ resource _ definition _ status # noqa : E501
partially update status of the specified CustomResourceDefinition # noqa : E501
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . patch _ custom _ resource _ definition _ status ( name , body , async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: param str name : name of the CustomResourceDefinition ( required )
: param UNKNOWN _ BASE _ TYPE body : ( required )
: param str pretty : If ' true ' , then the output is pretty printed .
: param str dry _ run : When present , indicates that modifications should not be persisted . An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request . Valid values are : - All : all dry run stages will be processed
: return : V1beta1CustomResourceDefinition
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . patch_custom_resource_definition_status_with_http_info ( name , body , ** kwargs )
# noqa : E501
else :
( data ) = self . patch_custom_resource_definition_status_with_http_info ( name , body , ** kwargs )
# noqa : E501
return data |
def libnet ( self ) :
"""Not ready yet . Should give the necessary C code that interfaces with libnet to recreate the packet""" | print ( "libnet_build_%s(" % self . __class__ . name . lower ( ) )
det = self . __class__ ( str ( self ) )
for f in self . fields_desc :
val = det . getfieldval ( f . name )
if val is None :
val = 0
elif type ( val ) is int :
val = str ( val )
else :
val = '"%s"' % str ( val )
print ( "\t%s, \t\t/* %s */" % ( val , f . name ) )
print ( ");" ) |
def unparse_flags ( self ) :
"""Unparses all flags to the point before any FLAGS ( argv ) was called .""" | for f in self . _flags ( ) . values ( ) :
f . unparse ( )
# We log this message before marking flags as unparsed to avoid a
# problem when the logging library causes flags access .
logging . info ( 'unparse_flags() called; flags access will now raise errors.' )
self . __dict__ [ '__flags_parsed' ] = False
self . __dict__ [ '__unparse_flags_called' ] = True |
def init ( self ) :
"""Init the connection to the rabbitmq server .""" | if not self . export_enable :
return None
try :
parameters = pika . URLParameters ( 'amqp://' + self . user + ':' + self . password + '@' + self . host + ':' + self . port + '/' )
connection = pika . BlockingConnection ( parameters )
channel = connection . channel ( )
return channel
except Exception as e :
logger . critical ( "Connection to rabbitMQ failed : %s " % e )
return None |
def bench ( client , n ) :
"""Benchmark n requests""" | items = list ( range ( n ) )
# Time client publish operations
started = time . time ( )
msg = b'x'
for i in items :
client . socket . send ( msg )
res = client . socket . recv ( )
assert msg == res
duration = time . time ( ) - started
print ( 'Raw REQ client stats:' )
util . print_stats ( n , duration ) |
def variant_context_w_alignment ( am , var , margin = 20 , tx_ac = None ) :
"""This module is experimental . It requires the uta _ align package from pypi .""" | from uta_align . align . algorithms import align , cigar_alignment
fh = full_house ( am , var , tx_ac = tx_ac )
tm = am . _fetch_AlignmentMapper ( fh [ 'n' ] . ac , fh [ 'g' ] . ac , am . alt_aln_method )
strand = tm . strand
span_g = _ival_to_span ( fh [ 'g' ] . posedit . pos )
span_g = ( span_g [ 0 ] - margin , span_g [ 1 ] + margin )
ival_g = Interval ( SimplePosition ( span_g [ 0 ] ) , SimplePosition ( span_g [ 1 ] ) )
ival_n = tm . g_to_n ( ival_g )
assert ival_n . start . offset == 0 and ival_n . end . offset == 0 , "limited to coding variants"
span_n = _ival_to_span ( ival_n )
ival_c = tm . g_to_c ( ival_g )
span_c = _ival_to_span ( ival_c )
seq_gt = am . hdp . get_seq ( fh [ 'g' ] . ac , span_g [ 0 ] - 1 , span_g [ 1 ] )
seq_gb = complement ( seq_gt )
seq_n = am . hdp . get_seq ( fh [ 'n' ] . ac , span_n [ 0 ] - 1 , span_n [ 1 ] )
if strand == 1 :
a = align ( bytes ( seq_gt ) , bytes ( seq_n ) , b'global' , extended_cigar = True )
else :
seq_n = '' . join ( reversed ( seq_n ) )
a = align ( bytes ( seq_gb ) , bytes ( seq_n ) , b'global' , extended_cigar = True )
aseq_gt , _ = cigar_alignment ( seq_gt , a . query , a . cigar , hide_match = False )
aseq_gb , aseq_n = cigar_alignment ( seq_gb , a . query , a . cigar , hide_match = False )
aln_str = _reformat_aln_str ( cigar_alignment ( a . ref , a . query , a . cigar , hide_match = True ) [ 1 ] )
s_dir = '>' if strand == 1 else '<'
lines = [ [ 1 , 0 , seq_line_fmt ( var = fh [ 'c' ] , span = span_c if strand == 1 else list ( reversed ( span_c ) ) , content = '' , dir = s_dir ) , ] , [ 2 , 0 , seq_line_fmt ( var = fh [ 'n' ] , span = span_n if strand == 1 else list ( reversed ( span_n ) ) , content = aseq_n , dir = s_dir ) , ] , [ 3 , 0 , _line_fmt . format ( pre = '' , content = aln_str , post = a . cigar . to_string ( ) , comment = '' ) , ] , [ 4 , 1 , seq_line_fmt ( var = fh [ 'g' ] , span = span_g , content = aseq_gt , dir = '>' ) , ] , [ 4 , 2 , seq_line_fmt ( var = fh [ 'g' ] , span = span_g , content = aseq_gb , dir = '<' ) , ] , [ 5 , 0 , pointer_line ( var = fh [ 'g' ] , span = span_g ) , ] , ]
if strand == - 1 :
lines . sort ( key = lambda e : ( - e [ 0 ] , e [ 1 ] ) )
return '\n' . join ( r [ 2 ] for r in lines ) |
def _load_text_assets ( self , text_image_file , text_file ) :
"""Internal . Builds a character indexed dictionary of pixels used by the
show _ message function below""" | text_pixels = self . load_image ( text_image_file , False )
with open ( text_file , 'r' ) as f :
loaded_text = f . read ( )
self . _text_dict = { }
for index , s in enumerate ( loaded_text ) :
start = index * 40
end = start + 40
char = text_pixels [ start : end ]
self . _text_dict [ s ] = char |
def get_max_id ( cls , session ) :
"""Get the current max value of the ` ` id ` ` column .
When creating and storing ORM objects in bulk , : mod : ` sqlalchemy ` does not automatically
generate an incrementing primary key ` ` id ` ` . To do this manually , one needs to know the
current max ` ` id ` ` . For ORM object classes that are derived from other ORM object classes ,
the max ` ` id ` ` of the lowest base class is returned . This is designed to be used with
inheritance by joining , in which derived and base class objects have identical ` ` id ` ` values .
Args :
session : database session to operate in""" | # sqlalchemy allows only one level of inheritance , so just check this class and all its bases
id_base = None
for c in [ cls ] + list ( cls . __bases__ ) :
for base_class in c . __bases__ :
if base_class . __name__ == 'Base' :
if id_base is None : # we found our base class for determining the ID
id_base = c
else :
raise RuntimeError ( "Multiple base object classes for class " + cls . __name__ )
# this should never happen
if id_base is None :
raise RuntimeError ( "Error searching for base class of " + cls . __name__ )
# get its max ID
max_id = session . query ( func . max ( id_base . id ) ) . scalar ( )
# if no object is present , None is returned
if max_id is None :
max_id = 0
return max_id |
def update_subscription ( self , subscription_id , url = None , events = None ) :
"""Create subscription
: param subscription _ id : Subscription to update
: param events : Events to subscribe
: param url : Url to send events""" | params = { }
if url is not None :
params [ 'url' ] = url
if events is not None :
params [ 'events' ] = events
url = self . SUBSCRIPTIONS_ID_URL % subscription_id
connection = Connection ( self . token )
connection . set_url ( self . production , url )
connection . add_header ( 'Content-Type' , 'application/json' )
connection . add_params ( params )
return connection . patch_request ( ) |
def _makeTimingRelative ( absoluteDataList ) :
'''Given normal pitch tier data , puts the times on a scale from 0 to 1
Input is a list of tuples of the form
( [ ( time1 , pitch1 ) , ( time2 , pitch2 ) , . . . ]
Also returns the start and end time so that the process can be reversed''' | timingSeq = [ row [ 0 ] for row in absoluteDataList ]
valueSeq = [ list ( row [ 1 : ] ) for row in absoluteDataList ]
relTimingSeq , startTime , endTime = makeSequenceRelative ( timingSeq )
relDataList = [ tuple ( [ time , ] + row ) for time , row in zip ( relTimingSeq , valueSeq ) ]
return relDataList , startTime , endTime |
def find_cards ( self , source = None , ** filters ) :
"""Generate a card pool with all cards matching specified filters""" | if not filters :
new_filters = self . filters . copy ( )
else :
new_filters = filters . copy ( )
for k , v in new_filters . items ( ) :
if isinstance ( v , LazyValue ) :
new_filters [ k ] = v . evaluate ( source )
from . . import cards
return cards . filter ( ** new_filters ) |
def load_cfg ( path , envvar_prefix = 'LIBREANT_' , debug = False ) :
'''wrapper of config _ utils . load _ configs''' | try :
return load_configs ( envvar_prefix , path = path )
except Exception as e :
if debug :
raise
else :
die ( str ( e ) ) |
def t_multiline_NEWLINE ( self , t ) :
r'\ r \ n | \ n | \ r' | if t . lexer . multiline_newline_seen :
return self . t_multiline_OPTION_AND_VALUE ( t )
t . lexer . multiline_newline_seen = True |
def write_config_file ( self , f , comments ) :
"""This method write a sample file , with attributes , descriptions ,
sample values , required flags , using the configuration object
properties .""" | if len ( self . elements ) < 1 :
return
super ( _Section , self ) . write_config_file ( f , comments )
for e in self . elements . values ( ) :
e . write_config_file ( f , comments )
f . write ( "\n" ) |
def get_parameters_as_dictionary ( self , query_string ) :
"""Returns query string parameters as a dictionary .""" | pairs = ( x . split ( '=' , 1 ) for x in query_string . split ( '&' ) )
return dict ( ( k , unquote ( v ) ) for k , v in pairs ) |
def _valid_config ( self , settings ) :
"""Scan through the returned settings to ensure they appear sane .
There are time when the returned buffer has the proper information , but
the reading is inaccurate . When this happens , temperatures will swing
or system values will be set to improper values .
: param settings : Configuration derived from the buffer
: type settings : dict
: returns : bool""" | if ( ( int ( settings [ 'environment_temp' ] ) > self . MAX_BOUND_TEMP or int ( settings [ 'environment_temp' ] ) < self . MIN_BOUND_TEMP ) or ( int ( settings [ 'bean_temp' ] ) > self . MAX_BOUND_TEMP or int ( settings [ 'bean_temp' ] ) < self . MIN_BOUND_TEMP ) ) :
self . _log . error ( 'Temperatures are outside of bounds' )
return False
binary = [ 'drum_motor' , 'chaff_tray' , 'solenoid' , 'cooling_motor' ]
for item in binary :
if int ( settings . get ( item ) ) not in [ 0 , 1 ] :
self . _log . error ( 'Settings show invalid values' )
return False
return True |
def logfile_generator ( self ) :
"""Yield each line of the file , or the next line if several files .""" | if not self . args [ 'exclude' ] : # ask all filters for a start _ limit and fast - forward to the maximum
start_limits = [ f . start_limit for f in self . filters if hasattr ( f , 'start_limit' ) ]
if start_limits :
for logfile in self . args [ 'logfile' ] :
logfile . fast_forward ( max ( start_limits ) )
if len ( self . args [ 'logfile' ] ) > 1 : # merge log files by time
for logevent in self . _merge_logfiles ( ) :
yield logevent
else : # only one file
for logevent in self . args [ 'logfile' ] [ 0 ] :
if self . args [ 'timezone' ] [ 0 ] != 0 and logevent . datetime :
logevent . _datetime = ( logevent . datetime + timedelta ( hours = self . args [ 'timezone' ] [ 0 ] ) )
yield logevent |
def compile ( self , csdl ) :
"""Compile the given CSDL .
Uses API documented at http : / / dev . datasift . com / docs / api / rest - api / endpoints / compile
Raises a DataSiftApiException for any error given by the REST API , including CSDL compilation .
: param csdl : CSDL to compile
: type csdl : str
: returns : dict with extra response data
: rtype : : class : ` ~ datasift . request . DictResponse `
: raises : : class : ` ~ datasift . exceptions . DataSiftApiException ` , : class : ` requests . exceptions . HTTPError `""" | return self . request . post ( 'compile' , data = dict ( csdl = csdl ) ) |
def detach_internet_gateway ( self , internet_gateway_id , vpc_id ) :
"""Detach an internet gateway from a specific VPC .
: type internet _ gateway _ id : str
: param internet _ gateway _ id : The ID of the internet gateway to delete .
: type vpc _ id : str
: param vpc _ id : The ID of the VPC to attach to .
: rtype : Bool
: return : True if successful""" | params = { 'InternetGatewayId' : internet_gateway_id , 'VpcId' : vpc_id }
return self . get_status ( 'DetachInternetGateway' , params ) |
def next_undecoded_checkpoint ( model_dir , timeout_mins = 240 ) :
"""Yields successive checkpoints from model _ dir .""" | last_ckpt = None
last_step = 0
while True : # Get the latest checkpoint .
last_ckpt = tf . contrib . training . wait_for_new_checkpoint ( model_dir , last_ckpt , seconds_to_sleep = 60 , timeout = 60 * timeout_mins )
# Get all the checkpoint from the model dir .
ckpt_path = tf . train . get_checkpoint_state ( model_dir )
all_model_checkpoint_paths = ckpt_path . all_model_checkpoint_paths
ckpt_step = np . inf
next_ckpt = None
# Find the next checkpoint to eval based on last _ step .
for ckpt in all_model_checkpoint_paths :
step = int ( os . path . basename ( ckpt ) . split ( "-" ) [ 1 ] )
if step > last_step and step < ckpt_step :
ckpt_step = step
next_ckpt = ckpt
# If all the checkpoints have been evaluated .
if last_ckpt is None and next_ckpt is None :
tf . logging . info ( "Eval timeout: no new checkpoints within %dm" % timeout_mins )
break
if next_ckpt is not None :
last_step = ckpt_step
last_ckpt = next_ckpt
yield last_ckpt |
def _get_proc_status ( proc ) :
'''Returns the status of a Process instance .
It ' s backward compatible with < 2.0 versions of psutil .''' | try :
return salt . utils . data . decode ( proc . status ( ) if PSUTIL2 else proc . status )
except ( psutil . NoSuchProcess , psutil . AccessDenied ) :
return None |
def create ( self ) :
"""POST / layertemplates : Create a new item .""" | # url ( ' layertemplates ' )
content = request . environ [ 'wsgi.input' ] . read ( int ( request . environ [ 'CONTENT_LENGTH' ] ) )
content = content . decode ( 'utf8' )
content = simplejson . loads ( content )
lt = self . _new_lt_from_user ( content [ 'name' ] , content [ 'comment' ] , content [ 'json' ] , c . user )
response . status = 201
href = h . url_for ( controller = "layertemplates" , action = "show" , id = lt . id )
return { 'name' : lt . name , 'comment' : lt . comment , 'id' : lt . id , 'href' : href } |
def list_commands ( self , ctx ) :
"""List all sub - commands .""" | env = ctx . ensure_object ( environment . Environment )
env . load ( )
return sorted ( env . list_commands ( * self . path ) ) |
def drain ( self , cluster , nodes , max_size ) :
"""Drain all the data for the given nodes and collate them into a list of
batches that will fit within the specified size on a per - node basis .
This method attempts to avoid choosing the same topic - node repeatedly .
Arguments :
cluster ( ClusterMetadata ) : The current cluster metadata
nodes ( list ) : list of node _ ids to drain
max _ size ( int ) : maximum number of bytes to drain
Returns :
dict : { node _ id : list of ProducerBatch } with total size less than the
requested max _ size .""" | if not nodes :
return { }
now = time . time ( )
batches = { }
for node_id in nodes :
size = 0
partitions = list ( cluster . partitions_for_broker ( node_id ) )
ready = [ ]
# to make starvation less likely this loop doesn ' t start at 0
self . _drain_index %= len ( partitions )
start = self . _drain_index
while True :
tp = partitions [ self . _drain_index ]
if tp in self . _batches and tp not in self . muted :
with self . _tp_locks [ tp ] :
dq = self . _batches [ tp ]
if dq :
first = dq [ 0 ]
backoff = ( bool ( first . attempts > 0 ) and bool ( first . last_attempt + self . config [ 'retry_backoff_ms' ] / 1000.0 > now ) )
# Only drain the batch if it is not during backoff
if not backoff :
if ( size + first . records . size_in_bytes ( ) > max_size and len ( ready ) > 0 ) : # there is a rare case that a single batch
# size is larger than the request size due
# to compression ; in this case we will
# still eventually send this batch in a
# single request
break
else :
batch = dq . popleft ( )
batch . records . close ( )
size += batch . records . size_in_bytes ( )
ready . append ( batch )
batch . drained = now
self . _drain_index += 1
self . _drain_index %= len ( partitions )
if start == self . _drain_index :
break
batches [ node_id ] = ready
return batches |
def _move_file_with_sizecheck ( tx_file , final_file ) :
"""Move transaction file to final location ,
with size checks avoiding failed transfers .
Creates an empty file with ' . bcbiotmp ' extention in the destination
location , which serves as a flag . If a file like that is present ,
it means that transaction didn ' t finish successfully .""" | # logger . debug ( " Moving % s to % s " % ( tx _ file , final _ file ) )
tmp_file = final_file + ".bcbiotmp"
open ( tmp_file , 'wb' ) . close ( )
want_size = utils . get_size ( tx_file )
shutil . move ( tx_file , final_file )
transfer_size = utils . get_size ( final_file )
assert want_size == transfer_size , ( 'distributed.transaction.file_transaction: File copy error: ' 'file or directory on temporary storage ({}) size {} bytes ' 'does not equal size of file or directory after transfer to ' 'shared storage ({}) size {} bytes' . format ( tx_file , want_size , final_file , transfer_size ) )
utils . remove_safe ( tmp_file ) |
def render ( self , link_url , image_url , ** kwargs ) :
"""Preview link _ url and image _ url after interpolation .
Args :
link _ url ( str ) : URL of the badge link
image _ url ( str ) : URL of the badge image
* * kwargs : Extra options to send to the server ( e . g . sudo )
Raises :
GitlabAuthenticationError : If authentication is not correct
GitlabRenderError : If the rendering failed
Returns :
dict : The rendering properties""" | path = '%s/render' % self . path
data = { 'link_url' : link_url , 'image_url' : image_url }
return self . gitlab . http_get ( path , data , ** kwargs ) |
def ReadLsbBytes ( rd , offset , value_size ) :
"""Reads value _ size bytes from rd at offset , least signifcant byte first .""" | encoding = None
if value_size == 1 :
encoding = '<B'
elif value_size == 2 :
encoding = '<H'
elif value_size == 4 :
encoding = '<L'
else :
raise errors . HidError ( 'Invalid value size specified' )
ret , = struct . unpack ( encoding , rd [ offset : offset + value_size ] )
return ret |
def nlmsg_attrlen ( nlh , hdrlen ) :
"""Length of attributes data .
https : / / github . com / thom311 / libnl / blob / libnl3_2_25 / lib / msg . c # L154
nlh - - Netlink message header ( nlmsghdr class instance ) .
hdrlen - - length of family specific header ( integer ) .
Returns :
Integer .""" | return max ( nlmsg_len ( nlh ) - libnl . linux_private . netlink . NLMSG_ALIGN ( hdrlen ) , 0 ) |
def process_item ( self , item ) :
"""Calculate new maximum value for each group ,
for " new " items only .""" | group , value = item [ 'group' ] , item [ 'value' ]
if group in self . _groups :
cur_val = self . _groups [ group ]
self . _groups [ group ] = max ( cur_val , value )
else : # New group . Could fetch old max . from target collection ,
# but for the sake of illustration recalculate it from
# the source collection .
self . _src . tracking = False
# examine entire collection
new_max = value
for rec in self . _src . query ( criteria = { 'group' : group } , properties = [ 'value' ] ) :
new_max = max ( new_max , rec [ 'value' ] )
self . _src . tracking = True
# back to incremental mode
# calculate new max
self . _groups [ group ] = new_max |
def answers ( self ) :
"""获取收藏夹内所有答案对象 .
: return : 收藏夹内所有答案 , 返回生成器
: rtype : Answer . Iterable""" | self . _make_soup ( )
# noinspection PyTypeChecker
for answer in self . _page_get_answers ( self . soup ) :
yield answer
i = 2
while True :
soup = BeautifulSoup ( self . _session . get ( self . url [ : - 1 ] + '?page=' + str ( i ) ) . text )
for answer in self . _page_get_answers ( soup ) :
if answer == 0 :
return
yield answer
i += 1 |
def _create_map ( self ) :
"""Initialize Brzozowski Algebraic Method""" | # at state i is represented by the regex self . B [ i ]
for state_a in self . mma . states :
self . A [ state_a . stateid ] = { }
# Create a map state to state , with the transition symbols
for arc in state_a . arcs :
if arc . nextstate in self . A [ state_a . stateid ] :
self . A [ state_a . stateid ] [ arc . nextstate ] . append ( self . mma . isyms . find ( arc . ilabel ) )
else :
self . A [ state_a . stateid ] [ arc . nextstate ] = [ self . mma . isyms . find ( arc . ilabel ) ]
if state_a . final :
self . A [ state_a . stateid ] [ 'string' ] = [ '' ] |
def _unmarshal_relationships ( pkg_reader , package , parts ) :
"""Add a relationship to the source object corresponding to each of the
relationships in * pkg _ reader * with its target _ part set to the actual
target part in * parts * .""" | for source_uri , srel in pkg_reader . iter_srels ( ) :
source = package if source_uri == '/' else parts [ source_uri ]
target = ( srel . target_ref if srel . is_external else parts [ srel . target_partname ] )
source . load_rel ( srel . reltype , target , srel . rId , srel . is_external ) |
def traffic ( stack_name : str , stack_version : Optional [ str ] , percentage : Optional [ int ] , region : Optional [ str ] , remote : Optional [ str ] , output : Optional [ str ] ) :
'''Manage stack traffic''' | lizzy = setup_lizzy_client ( remote )
if percentage is None :
stack_reference = [ stack_name ]
with Action ( 'Requesting traffic info..' ) :
stack_weights = [ ]
for stack in lizzy . get_stacks ( stack_reference , region = region ) :
if stack [ 'status' ] in [ 'CREATE_COMPLETE' , 'UPDATE_COMPLETE' ] :
stack_id = '{stack_name}-{version}' . format_map ( stack )
traffic = lizzy . get_traffic ( stack_id , region = region )
stack_weights . append ( { 'stack_name' : stack_name , 'version' : stack [ 'version' ] , 'identifier' : stack_id , 'weight%' : traffic [ 'weight' ] } )
cols = 'stack_name version identifier weight%' . split ( )
with OutputFormat ( output ) :
print_table ( cols , sorted ( stack_weights , key = lambda x : x [ 'identifier' ] ) )
else :
with Action ( 'Requesting traffic change..' ) :
stack_id = '{stack_name}-{stack_version}' . format_map ( locals ( ) )
lizzy . traffic ( stack_id , percentage , region = region ) |
def save_as ( self , fname , obj = None ) :
"""Save DICOM file given a GDCM DICOM object .
Examples of a GDCM DICOM object :
* gdcm . Writer ( )
* gdcm . Reader ( )
* gdcm . Anonymizer ( )
: param fname : DICOM file name to be saved
: param obj : DICOM object to be saved , if None , Anonymizer ( ) is used""" | writer = gdcm . Writer ( )
writer . SetFileName ( fname )
if obj is None and self . _anon_obj :
obj = self . _anon_obj
else :
raise ValueError ( "Need DICOM object, e.g. obj=gdcm.Anonymizer()" )
writer . SetFile ( obj . GetFile ( ) )
if not writer . Write ( ) :
raise IOError ( "Could not save DICOM file" )
return True |
def step ( self , observation , argmax_sampling = False ) :
"""Select actions based on model ' s output""" | policy_params , q = self ( observation )
actions = self . action_head . sample ( policy_params , argmax_sampling = argmax_sampling )
# log probability - we can do that , because we support only discrete action spaces
logprobs = self . action_head . logprob ( actions , policy_params )
return { 'actions' : actions , 'q' : q , 'logprobs' : policy_params , 'action:logprobs' : logprobs } |
def compute_similarity_score ( self , unit1 , unit2 ) :
"""Returns the similarity score between two words .
The type of similarity scoring method used depends on the currently active
method and clustering type .
: param unit1 : Unit object corresponding to the first word .
: type unit1 : Unit
: param unit2 : Unit object corresponding to the second word .
: type unit2 : Unit
: return : Number indicating degree of similarity of the two input words .
The maximum value is 1 , and a higher value indicates that the words
are more similar .
: rtype : Float
The similarity method used depends both on the type of test being performed
( SEMANTIC or PHONETIC ) and the similarity method currently assigned to the
self . current _ similarity _ measure property of the VFClustEngine object . The
similarity measures used are the following :
- PHONETIC / " phone " : the phonetic similarity score ( PSS ) is calculated
between the phonetic representations of the input units . It is equal
to 1 minus the Levenshtein distance between two strings , normalized
to the length of the longer string . The strings should be compact
phonetic representations of the two words .
( This method is a modification of a Levenshtein distance function
available at http : / / hetland . org / coding / python / levenshtein . py . )
- PHONETIC / " biphone " : the binary common - biphone score ( CBS ) depends
on whether two words share their initial and / or final biphone
( i . e . , set of two phonemes ) . A score of 1 indicates that two words
have the same intial and / or final biphone ; a score of 0 indicates
that two words have neither the same initial nor final biphone .
This is also calculated using the phonetic representation of the
two words .
- SEMANTIC / " lsa " : a semantic relatedness score ( SRS ) is calculated
as the COSINE of the respective term vectors for the first and
second word in an LSA space of the specified clustering _ parameter .
Unlike the PHONETIC methods , this method uses the . text property
of the input Unit objects .""" | if self . type == "PHONETIC" :
word1 = unit1 . phonetic_representation
word2 = unit2 . phonetic_representation
if self . current_similarity_measure == "phone" :
word1_length , word2_length = len ( word1 ) , len ( word2 )
if word1_length > word2_length : # Make sure n < = m , to use O ( min ( n , m ) ) space
word1 , word2 = word2 , word1
word1_length , word2_length = word2_length , word1_length
current = range ( word1_length + 1 )
for i in range ( 1 , word2_length + 1 ) :
previous , current = current , [ i ] + [ 0 ] * word1_length
for j in range ( 1 , word1_length + 1 ) :
add , delete = previous [ j ] + 1 , current [ j - 1 ] + 1
change = previous [ j - 1 ]
if word1 [ j - 1 ] != word2 [ i - 1 ] :
change += 1
current [ j ] = min ( add , delete , change )
phonetic_similarity_score = 1 - current [ word1_length ] / word2_length
return phonetic_similarity_score
elif self . current_similarity_measure == "biphone" :
if word1 [ : 2 ] == word2 [ : 2 ] or word1 [ - 2 : ] == word2 [ - 2 : ] :
common_biphone_score = 1
else :
common_biphone_score = 0
return common_biphone_score
elif self . type == "SEMANTIC" :
word1 = unit1 . text
word2 = unit2 . text
if self . current_similarity_measure == "lsa" :
w1_vec = self . term_vectors [ word1 ]
w2_vec = self . term_vectors [ word2 ]
# semantic _ relatedness _ score = ( numpy . dot ( w1 _ vec , w2 _ vec ) /
# numpy . linalg . norm ( w1 _ vec ) /
# numpy . linalg . norm ( w2 _ vec ) )
dot = sum ( [ w1 * w2 for w1 , w2 in zip ( w1_vec , w2_vec ) ] )
norm1 = sqrt ( sum ( [ w * w for w in w1_vec ] ) )
norm2 = sqrt ( sum ( [ w * w for w in w2_vec ] ) )
semantic_relatedness_score = dot / ( norm1 * norm2 )
return semantic_relatedness_score
elif self . current_similarity_measure == "custom" : # look it up in dict
try :
similarity = self . custom_similarity_scores [ ( word1 , word2 ) ]
except KeyError :
try :
similarity = self . custom_similarity_scores [ ( word2 , word1 ) ]
except KeyError :
if word1 == word2 :
return self . same_word_similarity
# if they ' re the same word , they pass . This should only happen when checking with
# non - adjacent words in the same cluster
else :
return 0
# if words aren ' t found , they are defined as dissimilar
return similarity
return None |
def _init_log ( level = logging . DEBUG ) :
"""Initialise the logging object .
Args :
level ( int ) : Logging level .
Returns :
Logger : Python logging object .""" | log = logging . getLogger ( __file__ )
log . setLevel ( level )
handler = logging . StreamHandler ( sys . stdout )
handler . setLevel ( level )
formatter = logging . Formatter ( '%(asctime)s: %(message)s' , '%Y/%m/%d-%H:%M:%S' )
handler . setFormatter ( formatter )
log . addHandler ( handler )
return log |
def properties_observer ( instance , prop , callback , ** kwargs ) :
"""Adds properties callback handler""" | change_only = kwargs . get ( 'change_only' , True )
observer ( instance , prop , callback , change_only = change_only ) |
def get_premises_model ( ) :
"""Support for custom company premises model
with developer friendly validation .""" | try :
app_label , model_name = PREMISES_MODEL . split ( '.' )
except ValueError :
raise ImproperlyConfigured ( "OPENINGHOURS_PREMISES_MODEL must be of the" " form 'app_label.model_name'" )
premises_model = get_model ( app_label = app_label , model_name = model_name )
if premises_model is None :
raise ImproperlyConfigured ( "OPENINGHOURS_PREMISES_MODEL refers to" " model '%s' that has not been installed" % PREMISES_MODEL )
return premises_model |
def evaluate_selection ( self ) :
"""Evaluates current * * Script _ Editor _ tabWidget * * Widget tab Model editor
selected content in the interactive console .
: return : Method success .
: rtype : bool""" | editor = self . get_current_editor ( )
if not editor :
return False
LOGGER . debug ( "> Evaluating 'Script Editor' selected content." )
if self . evaluate_code ( foundations . strings . to_string ( editor . get_selected_text ( ) . replace ( QChar ( QChar . ParagraphSeparator ) , QString ( "\n" ) ) ) ) :
self . ui_refresh . emit ( )
return True |
def setUserPasswdCredentials ( self , username , password ) :
"""Set username and password in ` ` disk . 0 . os . credentials ` ` .""" | self . setCredentialValues ( username = username , password = password ) |
def follow_bytes ( self , s , index ) :
"Follows transitions ." | for ch in s :
index = self . follow_char ( int_from_byte ( ch ) , index )
if index is None :
return None
return index |
def _nacm_default_deny_stmt ( self , stmt : Statement , sctx : SchemaContext ) -> None :
"""Set NACM default access .""" | if not hasattr ( self , 'default_deny' ) :
return
if stmt . keyword == "default-deny-all" :
self . default_deny = DefaultDeny . all
elif stmt . keyword == "default-deny-write" :
self . default_deny = DefaultDeny . write |
def _init_tag ( self , tag ) :
"""True constructor , which really initializes the : class : ` HTMLElement ` .
This is the function where all the preprocessing happens .
Args :
tag ( str ) : HTML tag as string .""" | self . _element = tag
self . _parseIsTag ( )
self . _parseIsComment ( )
if not self . _istag or self . _iscomment :
self . _tagname = self . _element
else :
self . _parseTagName ( )
if self . _iscomment or not self . _istag :
return
self . _parseIsEndTag ( )
self . _parseIsNonPairTag ( )
if self . _istag and ( not self . _isendtag ) or "=" in self . _element :
self . _parseParams ( ) |
def quick_add ( self , api_token , text , ** kwargs ) :
"""Add a task using the Todoist ' Quick Add Task ' syntax .
: param api _ token : The user ' s login api _ token .
: type api _ token : str
: param text : The text of the task that is parsed . A project
name starts with the ` # ` character , a label starts with a ` @ `
and an assignee starts with a ` + ` .
: type text : str
: param note : The content of the note .
: type note : str
: param reminder : The date of the reminder , added in free form text .
: type reminder : str
: return : The HTTP response to the request .
: rtype : : class : ` requests . Response `""" | params = { 'token' : api_token , 'text' : text }
return self . _post ( 'quick/add' , params , ** kwargs ) |
def obj_classes_from_module ( module ) :
"""Return a list of classes in a module that have a ' classID ' attribute .""" | for name in dir ( module ) :
if not name . startswith ( '_' ) :
cls = getattr ( module , name )
if getattr ( cls , 'classID' , None ) :
yield ( name , cls ) |
def specificity ( Ntn , Nfp , eps = numpy . spacing ( 1 ) ) :
"""Specificity
Wikipedia entry https : / / en . wikipedia . org / wiki / Sensitivity _ and _ specificity
Parameters
Ntn : int > = 0
Number of true negatives .
Nfp : int > = 0
Number of false positives .
eps : float
eps .
Default value numpy . spacing ( 1)
Returns
specificity : float
Specificity""" | return float ( Ntn / ( Ntn + Nfp + eps ) ) |
def get ( self , sid ) :
"""Constructs a CredentialListContext
: param sid : The unique string that identifies the resource
: returns : twilio . rest . trunking . v1 . trunk . credential _ list . CredentialListContext
: rtype : twilio . rest . trunking . v1 . trunk . credential _ list . CredentialListContext""" | return CredentialListContext ( self . _version , trunk_sid = self . _solution [ 'trunk_sid' ] , sid = sid , ) |
def transform ( self , target_type : Type [ T ] , value : F , context : PipelineContext = None ) -> T :
"""Transforms an object to a new type .
Args :
target _ type : The type to be converted to .
value : The object to be transformed .
context : The context of the transformation ( mutable ) .""" | pass |
def asl_obctrl_send ( self , timestamp , uElev , uThrot , uThrot2 , uAilL , uAilR , uRud , obctrl_status , force_mavlink1 = False ) :
'''Off - board controls / commands for ASLUAVs
timestamp : Time since system start [ us ] ( uint64 _ t )
uElev : Elevator command [ ~ ] ( float )
uThrot : Throttle command [ ~ ] ( float )
uThrot2 : Throttle 2 command [ ~ ] ( float )
uAilL : Left aileron command [ ~ ] ( float )
uAilR : Right aileron command [ ~ ] ( float )
uRud : Rudder command [ ~ ] ( float )
obctrl _ status : Off - board computer status ( uint8 _ t )''' | return self . send ( self . asl_obctrl_encode ( timestamp , uElev , uThrot , uThrot2 , uAilL , uAilR , uRud , obctrl_status ) , force_mavlink1 = force_mavlink1 ) |
def linkify_sd_by_s ( self , hosts , services ) :
"""Replace dependent _ service _ description and service _ description
in service dependency by the real object
: param hosts : host list , used to look for a specific one
: type hosts : alignak . objects . host . Hosts
: param services : service list to look for a specific one
: type services : alignak . objects . service . Services
: return : None""" | to_del = [ ]
errors = self . configuration_errors
warns = self . configuration_warnings
for servicedep in self :
try :
s_name = servicedep . dependent_service_description
hst_name = servicedep . dependent_host_name
# The new member list , in id
serv = services . find_srv_by_name_and_hostname ( hst_name , s_name )
if serv is None :
host = hosts . find_by_name ( hst_name )
if not ( host and host . is_excluded_for_sdesc ( s_name ) ) :
errors . append ( "Service %s not found for host %s" % ( s_name , hst_name ) )
elif host :
warns . append ( "Service %s is excluded from host %s ; " "removing this servicedependency as it's unusuable." % ( s_name , hst_name ) )
to_del . append ( servicedep )
continue
servicedep . dependent_service_description = serv . uuid
s_name = servicedep . service_description
hst_name = servicedep . host_name
# The new member list , in id
serv = services . find_srv_by_name_and_hostname ( hst_name , s_name )
if serv is None :
host = hosts . find_by_name ( hst_name )
if not ( host and host . is_excluded_for_sdesc ( s_name ) ) :
errors . append ( "Service %s not found for host %s" % ( s_name , hst_name ) )
elif host :
warns . append ( "Service %s is excluded from host %s ; " "removing this servicedependency as it's unusuable." % ( s_name , hst_name ) )
to_del . append ( servicedep )
continue
servicedep . service_description = serv . uuid
except AttributeError as err :
logger . error ( "[servicedependency] fail to linkify by service %s: %s" , servicedep , err )
to_del . append ( servicedep )
for servicedep in to_del :
self . remove_item ( servicedep ) |
def fhi_header ( filename , ppdesc ) :
"""Parse the FHI abinit header . Example :
Troullier - Martins psp for element Sc Thu Oct 27 17:33:22 EDT 1994
21.00000 3.00000 940714 zatom , zion , pspdat
1 1 2 0 2001 . 00000 pspcod , pspxc , lmax , lloc , mmax , r2well
1.80626423934776 . 22824404341771 1.17378968127746 rchrg , fchrg , qchrg""" | lines = _read_nlines ( filename , 4 )
try :
header = _dict_from_lines ( lines [ : 4 ] , [ 0 , 3 , 6 , 3 ] )
except ValueError : # The last record with rchrg . . . seems to be optional .
header = _dict_from_lines ( lines [ : 3 ] , [ 0 , 3 , 6 ] )
summary = lines [ 0 ]
return NcAbinitHeader ( summary , ** header ) |
def calculate ( self , calc , formula_reg , data_reg , out_reg , timestep = None , idx = None ) :
"""Calculate looping over specified repeat arguments .
: param calc : Calculation to loop over .
: param formula _ reg : Formula registry
: param data _ reg : Data registry
: param out _ reg : Outputs registry
: param timestep : timestep used for dynamic calcs
: param idx : index used in dynamic calcs""" | # the superclass Calculator . calculate ( ) method
base_calculator = super ( LazyLoopingCalculator , self ) . calculate
# call base calculator and return if there are no repeat args
if not self . repeat_args :
base_calculator ( calc , formula_reg , data_reg , out_reg , timestep , idx )
return
# make dictionaries of the calculation data and outputs argument maps
# this maps what the formulas and registries call the repeats arguments
data_rargs , out_rargs = { } , { }
# allocate dictionaries for repeat args
calc_data = calc [ 'args' ] . get ( 'data' )
calc_outs = calc [ 'args' ] . get ( 'outputs' )
# get dictionaries of repeat args from calculation data and outputs
for rarg in self . repeat_args : # rarg could be either data or output so try both
try :
data_rargs [ rarg ] = calc_data [ rarg ]
except ( KeyError , TypeError ) :
out_rargs [ rarg ] = calc_outs [ rarg ]
# get values of repeat data and outputs from registries
rargs = dict ( index_registry ( data_rargs , data_reg , timestep , idx ) , ** index_registry ( out_rargs , out_reg , timestep , idx ) )
rargkeys , rargvals = zip ( * rargs . iteritems ( ) )
# split keys and values
rargvals = zip ( * rargvals )
# reshuffle values , should be same size ?
# allocate dictionary of empty numpy arrays for each return value
returns = calc [ 'returns' ]
# return keys
retvals = { rv : [ ] for rv in returns }
# empty dictionary of return vals
retvalu = { rv : None for rv in returns }
# dictionary of return units
ret_var = { rv : { rv : [ ] for rv in returns } for rv in returns }
# variances
ret_unc = { rv : { rv : [ ] for rv in returns } for rv in returns }
# uncertainty
ret_jac = dict . fromkeys ( returns )
# jacobian
# get calc data and outputs keys to copy from registries
try :
calc_data_keys = calc_data . values ( )
except ( AttributeError , TypeError ) :
calc_data_keys = [ ]
# if there are no data , leave it empty
try :
calc_outs_keys = calc_outs . values ( )
except ( AttributeError , TypeError ) :
calc_outs_keys = [ ]
# if there are no outputs , leave it empty
# copy returns and this calculations output arguments from output reg
data_reg_copy = reg_copy ( data_reg , calc_data_keys )
out_reg_copy = reg_copy ( out_reg , returns + calc_outs_keys )
# loop over first repeat arg values and enumerate numpy indices as n
for vals in rargvals :
rargs_keys = dict ( zip ( rargkeys , vals ) )
# this is the magic or garbage depending on how you look at it ,
# change the registry copies to only contain the values for this
# iteration of the repeats
# TODO : instead of using copies rewrite index _ registry to do this
# copies means that calculations can ' t use a registry backend that
# uses shared memory , which will limit ability to run asynchronously
for k , v in data_rargs . iteritems ( ) :
data_reg_copy [ v ] = rargs_keys [ k ]
for k , v in out_rargs . iteritems ( ) :
out_reg_copy [ v ] = rargs_keys [ k ]
# run base calculator to get retvals , var , unc and jac
base_calculator ( calc , formula_reg , data_reg_copy , out_reg_copy , timestep , idx )
# re - assign retvals for this index of repeats
for rv , rval in retvals . iteritems ( ) :
rval . append ( out_reg_copy [ rv ] . m )
# append magnitude to returns
retvalu [ rv ] = out_reg_copy [ rv ] . u
# save units for this repeat
# re - assign variance for this index of repeats
if out_reg_copy . variance . get ( rv ) is None :
continue
for rv2 , rval2 in ret_var . iteritems ( ) :
rval2 [ rv ] . append ( out_reg_copy . variance [ rv2 ] [ rv ] )
# uncertainty only on diagonal of variance
if rv == rv2 :
ret_unc [ rv ] [ rv2 ] . append ( out_reg_copy . uncertainty [ rv ] [ rv2 ] )
else : # FIXME : inefficient to get length every iteration !
unc_size = len ( out_reg_copy . uncertainty [ rv ] [ rv ] )
ret_unc [ rv ] [ rv2 ] . append ( Q_ ( [ 0. ] * unc_size , 'percent' ) )
# jacobian is dictionary of returns versus arguments
if ret_jac [ rv ] is None : # first time through create dictionary of sensitivities
ret_jac [ rv ] = { o : v for o , v in out_reg_copy . jacobian [ rv ] . iteritems ( ) }
else : # next time through , vstack the sensitivities to existing
for o , v in out_reg_copy . jacobian [ rv ] . iteritems ( ) :
ret_jac [ rv ] [ o ] = np . vstack ( ( ret_jac [ rv ] [ o ] , v ) )
LOGGER . debug ( 'ret_jac:\n%r' , ret_jac )
# TODO : handle jacobian for repeat args and for dynamic simulations
# apply units if they were
for k in retvals :
if retvalu [ k ] is not None :
if retvalu [ k ] == out_reg [ k ] . u :
retvals [ k ] = Q_ ( retvals [ k ] , retvalu [ k ] )
else :
retvals [ k ] = Q_ ( retvals [ k ] , retvalu [ k ] ) . to ( out_reg [ k ] . u )
# put return values into output registry
if idx is None :
out_reg . update ( retvals )
out_reg . variance . update ( ret_var )
out_reg . uncertainty . update ( ret_unc )
out_reg . jacobian . update ( ret_jac )
else :
for k , v in retvals :
out_reg [ k ] [ idx ] = v |
def _search_env ( keys ) :
"""Search the environment for the supplied keys , returning the first
one found or None if none was found .""" | matches = ( os . environ [ key ] for key in keys if key in os . environ )
return next ( matches , None ) |
def get_max_url_file_name_length ( savepath ) :
"""Determines the max length for any max . . . parts .
: param str savepath : absolute savepath to work on
: return : max . allowed number of chars for any of the max . . . parts""" | number_occurrences = savepath . count ( '%max_url_file_name' )
number_occurrences += savepath . count ( '%appendmd5_max_url_file_name' )
savepath_copy = savepath
size_without_max_url_file_name = len ( savepath_copy . replace ( '%max_url_file_name' , '' ) . replace ( '%appendmd5_max_url_file_name' , '' ) )
# Windows : max file path length is 260 characters including
# NULL ( string end )
max_size = 260 - 1 - size_without_max_url_file_name
max_size_per_occurrence = max_size / number_occurrences
return max_size_per_occurrence |
def _generate_consumer_tag ( self ) :
"""Generate a unique consumer tag .
: rtype string :""" | return "%s.%s%s" % ( self . __class__ . __module__ , self . __class__ . __name__ , self . _next_consumer_tag ( ) ) |
def first_cyclic_node ( head ) :
""": type head : Node
: rtype : Node""" | runner = walker = head
while runner and runner . next :
runner = runner . next . next
walker = walker . next
if runner is walker :
break
if runner is None or runner . next is None :
return None
walker = head
while runner is not walker :
runner , walker = runner . next , walker . next
return runner |
def rollback ( name , ** kwargs ) :
'''Rollbacks the committed changes .
. . code - block : : yaml
rollback the changes :
junos :
- rollback
- id : 5
Parameters :
Optional
* id :
The rollback id value [ 0-49 ] . ( default = 0)
* kwargs : Keyworded arguments which can be provided like -
* timeout :
Set NETCONF RPC timeout . Can be used for commands which
take a while to execute . ( default = 30 seconds )
* comment :
Provide a comment to the commit . ( default = None )
* confirm :
Provide time in minutes for commit confirmation . If this option is specified , the commit will be rollbacked in the given time unless the commit is confirmed .
* diffs _ file :
Path to the file where any diffs will be written . ( default = None )''' | ret = { 'name' : name , 'changes' : { } , 'result' : True , 'comment' : '' }
ret [ 'changes' ] = __salt__ [ 'junos.rollback' ] ( ** kwargs )
return ret |
def get_server_certificate ( self , host , port ) :
"""Gets the remote x . 509 certificate
: param host :
: param port :
: return :""" | logger . info ( "Fetching server certificate from %s:%s" % ( host , port ) )
try :
return get_server_certificate ( ( host , int ( port ) ) )
except Exception as e :
logger . error ( 'Error getting server certificate from %s:%s: %s' % ( host , port , e ) )
return False |
def _eval ( self , teaching ) :
"""Returns the evaluation .""" | # transform if someone called _ get directly
if isinstance ( teaching , string_types ) :
teaching = self . _validate_teaching ( None , teaching , namespaces = self . _namespaces )
return teaching ( self . _dataObject ) |
def report_onlysize ( bytes_so_far , total_size , speed , eta ) :
'''This callback for the download function is used when console width
is not enough to print the bar .
It prints only the sizes''' | percent = int ( bytes_so_far * 100 / total_size )
current = approximate_size ( bytes_so_far ) . center ( 10 )
total = approximate_size ( total_size ) . center ( 10 )
sys . stdout . write ( 'D: {0}% -{1}/{2}' . format ( percent , current , total ) + "eta {0}" . format ( eta ) )
sys . stdout . write ( "\r" )
sys . stdout . flush ( ) |
def tsort ( self ) :
"""Given a partial ordering , return a totally ordered list .
part is a dict of partial orderings . Each value is a set ,
which the key depends on .
The return value is a list of sets , each of which has only
dependencies on items in previous entries in the list .
raise ValueError if ordering is not possible ( check for circular or missing dependencies )""" | task_dict = { }
for key , task in self . tasks . iteritems ( ) :
task_dict [ task ] = task . dependencies
# parts = parts . copy ( )
parts = task_dict . copy ( )
result = [ ]
while True :
level = set ( [ name for name , deps in parts . iteritems ( ) if not deps ] )
if not level :
break
result . append ( level )
parts = dict ( [ ( name , deps - level ) for name , deps in parts . iteritems ( ) if name not in level ] )
if parts :
raise ValueError ( 'total ordering not possible (check for circular or missing dependencies)' )
return result |
def is_allowed_view ( perm ) :
"""Check if permission is in acl list .""" | # Check if permission is in excluded list
for view in ACL_EXCLUDED_VIEWS :
module , separator , view_name = view . partition ( '*' )
if view and perm . startswith ( module ) :
return False
# Check if permission is in acl list
for view in ACL_ALLOWED_VIEWS :
module , separator , view_name = view . partition ( '*' )
if separator and not module and not view_name :
return True
elif separator and module and perm . startswith ( module ) :
return True
elif separator and view_name and perm . endswith ( view_name ) :
return True
elif not separator and view == perm :
return True
return False |
def filter_extant_exports ( client , bucket , prefix , days , start , end = None ) :
"""Filter days where the bucket already has extant export keys .""" | end = end or datetime . now ( )
# days = [ start + timedelta ( i ) for i in range ( ( end - start ) . days ) ]
try :
tag_set = client . get_object_tagging ( Bucket = bucket , Key = prefix ) . get ( 'TagSet' , [ ] )
except ClientError as e :
if e . response [ 'Error' ] [ 'Code' ] != 'NoSuchKey' :
raise
tag_set = [ ]
tags = { t [ 'Key' ] : t [ 'Value' ] for t in tag_set }
if 'LastExport' not in tags :
return sorted ( days )
last_export = parse ( tags [ 'LastExport' ] )
if last_export . tzinfo is None :
last_export = last_export . replace ( tzinfo = tzutc ( ) )
return [ d for d in sorted ( days ) if d > last_export ] |
def unwind ( self , path , include_array_index = None , preserve_null_and_empty_arrays = False ) :
"""Adds an unwind stage to deconstruct an array
: param path : Field path to an array field
: param include _ array _ index : The name of a new field to hold the array index of the element .
: param preserve _ null _ and _ empty _ arrays :
If true , if the path is null , missing , or an empty array , $ unwind outputs the document .
If false , $ unwind does not output a document if the path is null , missing , or an empty array .
: return : The current object""" | unwind_query = { }
unwind_query [ '$unwind' ] = path if path [ 0 ] == '$' else '$' + path
if include_array_index :
unwind_query [ 'includeArrayIndex' ] = include_array_index
if preserve_null_and_empty_arrays :
unwind_query [ 'preserveNullAndEmptyArrays' ] = True
self . _q . append ( unwind_query )
return self |
def save ( self ) :
"""Saves current cursor position , so that it can be restored later""" | self . write ( self . term . save )
self . _saved = True |
def add_cron ( self , client , event , seconds = "*" , minutes = "*" , hours = "*" ) :
"""Add a cron entry .
The arguments for this event are :
1 . The name of the event to dispatch when the cron fires .
2 . What seconds to trigger on , as a timespec ( default " * " )
3 . What minutes to trigger on , as a timespec ( default " * " )
4 . What hours to trigger on , as a timespec ( default " * " )
Timespecs may be omitted in reverse order of frequency - if hours
is omitted , the previous timespecs will be applied every hour . If
both hours and minutes are omitted , the seconds timespec will be
applied every minute of every hour , and if all timespecs are omitted ,
the event will fire each second .
Timespecs are strings in the following formats :
Plain integer - specifies that exact value for the unit .
" ? " - specifies a random value from 0 to the unit max .
" ? / X " - specifies all multiples of X for this unit , randomly offset
by a fixed amount ( e . g . ? / 15 might become 4,19,34,49 ) .
" * " - specifies all values for the unix from 0 to max .
" * / X " - specifies all multiples of X for the unit .
Any number of these can be combined in a comma - separated list .
For instance , " * / 15 " would be the same as " 0,15,30,45 " if used
in the seconds field .""" | for cron in self . crons :
if cron . event == event :
_log . warning ( "Cron '%s' is already registered." , event )
return True
_log . info ( "Registering cron for '%s'." , event )
cron = Cron ( event , seconds , minutes , hours )
self . crons . append ( cron )
return True |
def get_access_token ( self , request , callback = None ) :
"""Fetch access token from callback request .""" | callback = request . build_absolute_uri ( callback or request . path )
if not self . check_application_state ( request ) :
logger . error ( 'Application state check failed.' )
return None
if 'code' in request . GET :
args = { 'client_id' : self . consumer_key , 'redirect_uri' : callback , 'client_secret' : self . consumer_secret , 'code' : request . GET [ 'code' ] , 'grant_type' : 'authorization_code' , }
else :
logger . error ( 'No code returned by the provider' )
return None
try :
response = self . request ( 'post' , self . access_token_url , data = args )
response . raise_for_status ( )
except RequestException as e :
logger . error ( 'Unable to fetch access token: {0}' . format ( e ) )
return None
else :
return response . text |
def _is_normal_karyotype ( karyotype ) :
"""This will default to true if no karyotype is provided .
This is assuming human karyotypes .
: param karyotype :
: return :""" | is_normal = True
if karyotype is not None :
karyotype = karyotype . strip ( )
if karyotype not in [ '46;XX' , '46;XY' , '' ] :
is_normal = False
return is_normal |
def find_items ( self , q , shape = ID_ONLY , depth = SHALLOW , additional_fields = None , order_fields = None , calendar_view = None , page_size = None , max_items = None , offset = 0 ) :
"""Private method to call the FindItem service
: param q : a Q instance containing any restrictions
: param shape : controls whether to return ( id , chanegkey ) tuples or Item objects . If additional _ fields is
non - null , we always return Item objects .
: param depth : controls the whether to return soft - deleted items or not .
: param additional _ fields : the extra properties we want on the return objects . Default is no properties . Be
aware that complex fields can only be fetched with fetch ( ) ( i . e . the GetItem service ) .
: param order _ fields : the SortOrder fields , if any
: param calendar _ view : a CalendarView instance , if any
: param page _ size : the requested number of items per page
: param max _ items : the max number of items to return
: param offset : the offset relative to the first item in the item collection
: return : a generator for the returned item IDs or items""" | if shape not in SHAPE_CHOICES :
raise ValueError ( "'shape' %s must be one of %s" % ( shape , SHAPE_CHOICES ) )
if depth not in ITEM_TRAVERSAL_CHOICES :
raise ValueError ( "'depth' %s must be one of %s" % ( depth , ITEM_TRAVERSAL_CHOICES ) )
if not self . folders :
log . debug ( 'Folder list is empty' )
return
if additional_fields :
for f in additional_fields :
self . validate_item_field ( field = f )
for f in additional_fields :
if f . field . is_complex :
raise ValueError ( "find_items() does not support field '%s'. Use fetch() instead" % f . field . name )
if calendar_view is not None and not isinstance ( calendar_view , CalendarView ) :
raise ValueError ( "'calendar_view' %s must be a CalendarView instance" % calendar_view )
# Build up any restrictions
if q . is_empty ( ) :
restriction = None
query_string = None
elif q . query_string :
restriction = None
query_string = Restriction ( q , folders = self . folders , applies_to = Restriction . ITEMS )
else :
restriction = Restriction ( q , folders = self . folders , applies_to = Restriction . ITEMS )
query_string = None
log . debug ( 'Finding %s items in folders %s (shape: %s, depth: %s, additional_fields: %s, restriction: %s)' , self . folders , self . account , shape , depth , additional_fields , restriction . q if restriction else None , )
items = FindItem ( account = self . account , folders = self . folders , chunk_size = page_size ) . call ( additional_fields = additional_fields , restriction = restriction , order_fields = order_fields , shape = shape , query_string = query_string , depth = depth , calendar_view = calendar_view , max_items = calendar_view . max_items if calendar_view else max_items , offset = offset , )
if shape == ID_ONLY and additional_fields is None :
for i in items :
yield i if isinstance ( i , Exception ) else Item . id_from_xml ( i )
else :
for i in items :
if isinstance ( i , Exception ) :
yield i
else :
yield Folder . item_model_from_tag ( i . tag ) . from_xml ( elem = i , account = self . account ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.