signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def gaussian1 ( height , x0 , y0 , a , b , c ) :
"""height - the amplitude of the gaussian
x0 , y0 , - center of the gaussian
a , b , c - ellipse parameters ( coefficients in the quadratic form )""" | return lambda x , y : height * np . exp ( - 0.5 * ( a * ( x - x0 ) ** 2 + b * ( x - x0 ) * ( y - y0 ) + c * ( y - y0 ) ** 2 ) ) |
def write_file ( writer , filename ) :
"""Write all of lines from file using the writer .""" | for line in txt_line_iterator ( filename ) :
writer . write ( line )
writer . write ( "\n" ) |
def get_thumbnail_image_url ( self , page = 1 ) :
"""Returns the URL for the thumbnail sized image of a single page .
The page kwarg specifies which page to return . One is the default .""" | template = self . resources . page . get ( 'image' )
return template . replace ( "{page}" , str ( page ) ) . replace ( "{size}" , "thumbnail" ) |
def to_json ( self , include_id : bool = False ) -> Mapping [ str , str ] :
"""Describe the namespaceEntry as dictionary of Namespace - Keyword and Name .
: param include _ id : If true , includes the model identifier""" | result = { NAMESPACE : self . namespace . keyword , }
if self . name :
result [ NAME ] = self . name
if self . identifier :
result [ IDENTIFIER ] = self . identifier
if include_id :
result [ 'id' ] = self . id
return result |
def _get_id_and_model ( self , id_or_model ) :
"""Get both the model and ID of an object that could be an ID or a model .
: param id _ or _ model :
The object that could be an ID string or a model object .
: param model _ collection :
The collection to which the model belongs .""" | if isinstance ( id_or_model , self . collection . model ) :
model = id_or_model
elif isinstance ( id_or_model , str ) : # Assume we have an ID string
model = self . collection . get ( id_or_model )
else :
raise TypeError ( 'Unexpected type {}, expected {} or {}' . format ( type ( id_or_model ) , str , self . collection . model ) )
return model . id , model |
def _time_to_string ( self , dt , conversion_string = "%Y %m %d %H %M" ) :
"""This converts a UTC time integer to a string""" | if self . output_timezone is not None :
dt = dt . replace ( tzinfo = utc ) . astimezone ( self . output_timezone )
return dt . strftime ( conversion_string ) |
def add_input_data_to_scoped_data ( self , dictionary ) :
"""Add a dictionary to the scoped data
As the input _ data dictionary maps names to values , the functions looks for the proper data _ ports keys in the
input _ data _ ports dictionary
: param dictionary : The dictionary that is added to the scoped data
: param state : The state to which the input _ data was passed ( should be self in most cases )""" | for dict_key , value in dictionary . items ( ) :
for input_data_port_key , data_port in list ( self . input_data_ports . items ( ) ) :
if dict_key == data_port . name :
self . scoped_data [ str ( input_data_port_key ) + self . state_id ] = ScopedData ( data_port . name , value , type ( value ) , self . state_id , ScopedVariable , parent = self )
# forward the data to scoped variables
for data_flow_key , data_flow in self . data_flows . items ( ) :
if data_flow . from_key == input_data_port_key and data_flow . from_state == self . state_id :
if data_flow . to_state == self . state_id and data_flow . to_key in self . scoped_variables :
current_scoped_variable = self . scoped_variables [ data_flow . to_key ]
self . scoped_data [ str ( data_flow . to_key ) + self . state_id ] = ScopedData ( current_scoped_variable . name , value , type ( value ) , self . state_id , ScopedVariable , parent = self ) |
def get_assessment_metadata ( self ) :
"""Gets the metadata for an assessment .
return : ( osid . Metadata ) - metadata for the assessment
* compliance : mandatory - - This method must be implemented . *""" | # Implemented from template for osid . resource . ResourceForm . get _ group _ metadata _ template
metadata = dict ( self . _mdata [ 'assessment' ] )
metadata . update ( { 'existing_id_values' : self . _my_map [ 'assessmentId' ] } )
return Metadata ( ** metadata ) |
def trees_by_issn ( self , issn ) :
"""Search trees by ` issn ` .
Args :
issn ( str ) : : attr : ` . Tree . issn ` property of : class : ` . Tree ` .
Returns :
set : Set of matching : class : ` Tree ` instances .""" | return set ( self . issn_db . get ( issn , OOSet ( ) ) . keys ( ) ) |
def resp_graph ( dataframe , image_name , dir = './' ) :
"""Response time graph for bucketed data
: param pandas . DataFrame dataframe : dataframe containing all data
: param str image _ name : the output file name
: param str dir : the output directory
: return : None""" | fig = pygal . TimeLine ( x_title = 'Elapsed Time In Test (secs)' , y_title = 'Response Time (secs)' , x_label_rotation = 25 , js = ( 'scripts/pygal-tooltip.min.js' , ) )
fig . add ( 'AVG' , [ ( get_local_time ( index ) , row [ 'mean' ] if pd . notnull ( row [ 'mean' ] ) else None ) for index , row in dataframe . iterrows ( ) ] )
fig . add ( '90%' , [ ( get_local_time ( index ) , row [ '90%' ] if pd . notnull ( row [ '90%' ] ) else None ) for index , row in dataframe . iterrows ( ) ] )
fig . add ( '80%' , [ ( get_local_time ( index ) , row [ '80%' ] if pd . notnull ( row [ '80%' ] ) else None ) for index , row in dataframe . iterrows ( ) ] )
fig . render_to_file ( filename = os . path . join ( dir , image_name ) ) |
def validate ( self ) :
"""Perform some basic checks to help ensure that the specification is valid .
Throws an exception if an invalid value is found .
Returns true if all checks were passed .
: return : boolean""" | # Check all values for None
for attr in self . __dict__ :
if self . __dict__ [ attr ] is None :
raise ValueError ( attr + " is not set" )
# Validate name
invalid_chars = GPTaskSpec . invalid_chars ( )
if any ( char in invalid_chars for char in self . name ) :
raise ValueError ( "module name includes invalid characters: " + self . name )
# Validate LSID
self . _valid_lsid ( )
# Validate categories
if not self . all_strings ( self . categories ) :
raise TypeError ( "categories contains non-string value: " + str ( self . categories ) )
# Validate file formats
if not self . all_strings ( self . file_format ) :
raise TypeError ( "file_format contains non-string value: " + str ( self . file_format ) )
# Validate support files
if not self . all_strings ( self . support_files ) :
raise TypeError ( "support_files contains non-string value: " + str ( self . support_files ) )
# Validate parameter list
if not self . _all_params ( self . parameters ) :
raise TypeError ( "parameters contains non-GPParamSpec value: " + str ( self . parameters ) )
# Validate individual parameters
for param in self . parameters :
param . validate ( )
# Return that everything validates
return True |
def _parse_dsn ( dsn ) :
"""Parse data source name .
This is a helper function to split the data source name provided in
the from _ dsn classmethod""" | conn_params = urlparse ( dsn )
init_args = { }
scheme_info = conn_params . scheme . split ( '+' )
if len ( scheme_info ) == 1 :
scheme = scheme_info [ 0 ]
modifier = None
else :
modifier , scheme = scheme_info
if scheme != 'influxdb' :
raise ValueError ( 'Unknown scheme "{0}".' . format ( scheme ) )
if modifier :
if modifier == 'udp' :
init_args [ 'use_udp' ] = True
elif modifier == 'https' :
init_args [ 'ssl' ] = True
else :
raise ValueError ( 'Unknown modifier "{0}".' . format ( modifier ) )
netlocs = conn_params . netloc . split ( ',' )
init_args [ 'hosts' ] = [ ]
for netloc in netlocs :
parsed = _parse_netloc ( netloc )
init_args [ 'hosts' ] . append ( ( parsed [ 'host' ] , int ( parsed [ 'port' ] ) ) )
init_args [ 'username' ] = parsed [ 'username' ]
init_args [ 'password' ] = parsed [ 'password' ]
if conn_params . path and len ( conn_params . path ) > 1 :
init_args [ 'database' ] = conn_params . path [ 1 : ]
return init_args |
def check_component_for_specific_sbo_term ( items , term ) :
r"""Identify model components that lack a specific SBO term ( s ) .
Parameters
items : list
A list of model components i . e . reactions to be checked for a specific
SBO term .
term : str or list of str
A string denoting a valid SBO term matching the regex ' ^ SBO : \ d { 7 } $ '
or a list containing such string elements .
Returns
list
The components without any or that specific SBO term annotation .""" | # check for multiple allowable SBO terms
if isinstance ( term , list ) :
return [ elem for elem in items if elem . annotation is None or 'sbo' not in elem . annotation or not any ( i in elem . annotation [ 'sbo' ] for i in term ) ]
else :
return [ elem for elem in items if elem . annotation is None or 'sbo' not in elem . annotation or term not in elem . annotation [ 'sbo' ] ] |
def ipv4_public ( self , network = False , address_class = None ) :
"""Returns a public IPv4 excluding private blocks .
: param network : Network address
: param address _ class : IPv4 address class ( a , b , or c )
: returns : Public IPv4""" | # compute public networks
public_networks = [ _IPv4Constants . _network_classes [ address_class or self . ipv4_network_class ( ) ] ]
# exclude private and excluded special networks
public_networks = self . _exclude_ipv4_networks ( public_networks , _IPv4Constants . _private_networks + _IPv4Constants . _excluded_networks , )
# choose random public network from the list
public_network = self . generator . random . choice ( public_networks )
return self . _random_ipv4_address_from_subnet ( public_network , network ) |
def netHours ( self ) :
'''For regular event staff , this is the net hours worked for financial purposes .
For Instructors , netHours is calculated net of any substitutes .''' | if self . specifiedHours is not None :
return self . specifiedHours
return self . event . duration - sum ( [ sub . netHours for sub in self . replacementFor . all ( ) ] ) |
def set_symbol ( self , symbol ) :
"""( symbol , bondorder ) - > set the bondsymbol
of the molecule""" | raise "Deprecated"
self . symbol , self . bondtype , bondorder , self . equiv_class = BONDLOOKUP [ symbol ]
if self . bondtype == 4 :
self . aromatic = 1
else :
self . aromatic = 0 |
def simplify ( cls , content_type ) :
"""The MIME types main - and sub - label can both start with < tt > x - < / tt > ,
which indicates that it is a non - registered name . Of course , after
registration this flag can disappear , adds to the confusing
proliferation of MIME types . The simplified string has the
< tt > x - < / tt > removed and are translated to lowercase .""" | matchdata = MEDIA_TYPE_RE . match ( content_type )
if matchdata is None :
return None
wrap = lambda s : re . sub ( UNREG_RE , '' , s . lower ( ) )
( media_type , subtype ) = matchdata . groups ( )
return '%s/%s' % ( wrap ( media_type ) , wrap ( subtype ) ) |
def _parse_simple_model ( topology , parent_scope , model , inputs , outputs ) :
'''Parse a model containing only one operator ( aka simple model ) .
Steps :
1 . Create local scope for allocating local variables and operators
2 . Create operator and then feed the model ' s inputs and outputs to the operator
3 . Connect local variables and their corresponding parent variables
Note :
1 . Notice that a CoreML operator can contain no input and output , so we directly use model ' s inputs ( outputs ) .
2 . Input and output names can be identical in CoreML , but they must be different for ONNX .''' | # Create local scope for the considered model
scope = topology . declare_scope ( 'single' , [ parent_scope ] + parent_scope . parent_scopes )
# Create operator for the considered model
this_operator = scope . declare_local_operator ( model . WhichOneof ( 'Type' ) , model )
# Allocate inputs for the operator and then connect them with inputs from outside
for var in model . description . input : # We assume that no duplicated raw name exists . Note that we set prepend = True because model inputs should
# not hide any intermediate variables .
variable = scope . declare_local_variable ( var . name , _parse_coreml_feature ( var , topology . target_opset , topology . default_batch_size ) , prepend = True )
this_operator . inputs . append ( variable )
# Connect local variables and variables passed into this scope . Our assumptions are described below .
# 1 . Assume a variable with ' A ' as its CoreML name is passed in . There must be at least one local variable gets a
# raw name ' A ' . That is , for each parent variable , at least one local duplicate is available .
# 2 . It ' s possible to find multiple local variables associated with the same raw name . For example , raw name ' A ' can
# be associated with ' A ' and ' A1 ' in ONNX . In this case , we connect the first one to parent input .
for parent_variable in inputs :
raw_name = parent_variable . raw_name
child_variable = scope . variables [ scope . variable_name_mapping [ raw_name ] [ 0 ] ]
operator = scope . declare_local_operator ( 'identity' )
operator . inputs . append ( parent_variable )
operator . outputs . append ( child_variable )
# Allocate outputs for the operator and then connect them with outputs from outside
for var in model . description . output : # We assume that no duplicated output raw name exists .
variable = scope . declare_local_variable ( var . name , _parse_coreml_feature ( var , topology . target_opset , topology . default_batch_size ) )
this_operator . outputs . append ( variable )
# Connect local variables and variables passed into this scope . Our assumptions are described below .
# 1 . Assume a variable with ' A ' as its CoreML name is passed in . There must be at least one local variable gets a
# raw name ' A ' . That is , for each parent variable , at least one local duplicate is available .
# 2 . It ' s possible to find multiple local variables associated with the same raw name . For example , raw name ' A ' can
# be associated with ' A ' and ' A1 ' in ONNX . In this case , we connect the last one to parent output .
for parent_variable in outputs :
raw_name = parent_variable . raw_name
child_variable = scope . variables [ scope . variable_name_mapping [ raw_name ] [ - 1 ] ]
operator = scope . declare_local_operator ( 'identity' )
operator . inputs . append ( child_variable )
operator . outputs . append ( parent_variable ) |
def monitor ( self , operation = '' , ** kw ) :
""": returns : a new Monitor instance""" | mon = self . _monitor ( operation , hdf5 = self . datastore . hdf5 )
self . _monitor . calc_id = mon . calc_id = self . datastore . calc_id
vars ( mon ) . update ( kw )
return mon |
def _find_duplicates ( seq ) :
"""Find the duplicate elements from a sequence .""" | seen = set ( )
return [ element for element in seq if seq . count ( element ) > 1 and element not in seen and seen . add ( element ) is None ] |
def register ( coordinator ) :
"""Registers this module as a worker with the given coordinator .""" | utils . verify_binary ( 'pdiff_compare_binary' , [ '-version' ] )
utils . verify_binary ( 'pdiff_composite_binary' , [ '-version' ] )
assert FLAGS . pdiff_threads > 0
assert FLAGS . queue_server_prefix
item = queue_worker . RemoteQueueWorkflow ( constants . PDIFF_QUEUE_NAME , DoPdiffQueueWorkflow , max_tasks = FLAGS . pdiff_threads , wait_seconds = FLAGS . pdiff_wait_seconds )
item . root = True
coordinator . input_queue . put ( item ) |
def make_datetime ( value ) :
"""Tries to convert the given value to a : class : ` datetime . datetime ` . If
no timezone is given , raises a ValueError .
Strings will be parsed as ISO 8601 timestamps .
If a number is provided , it will be interpreted as a UNIX
timestamp , which by definition is UTC .
If a ` dict ` is provided , does ` datetime . datetime ( * * value ) ` .
If a ` tuple ` or a ` list ` is provided , does
` datetime . datetime ( * value ) ` . Uses the timezone in the tuple or
list if provided .
: param value : something to convert
: type value : str | unicode | float | int | : class : ` datetime . datetime ` | dict | list | tuple
: return : the value after conversion
: rtype : : class : ` datetime . datetime `
: raises : ValueError | TypeError""" | result = _make_datetime ( value )
if not result . tzinfo :
raise ValueError ( "value was a timestamp, but no timezone was set! " "Value was a '%s' object: %s" "\n\n" "Converted to naive 'datetime.datetime' object: %s" % ( value . __class__ . __name__ , repr ( value ) , repr ( result ) , ) )
return result |
def run ( self , line ) :
"""Extract words from tweet
1 . Remove non - ascii characters
2 . Split line into individual words
3 . Clean up puncuation characters""" | words = [ ]
for word in self . clean_unicode ( line . lower ( ) ) . split ( ) :
if word . startswith ( 'http' ) :
continue
cleaned = self . clean_punctuation ( word )
if len ( cleaned ) > 1 and cleaned not in self . stopwords :
words . append ( cleaned )
return words |
def clean ( self ) :
"""Remove all data by dropping and recreating the configured database .
. . note : :
Only the configured database is removed . Any other databases
remain untouched .""" | self . exec_pg_success ( [ 'dropdb' , '-U' , self . user , self . database ] )
self . exec_pg_success ( [ 'createdb' , '-U' , self . user , self . database ] ) |
def create_header ( self ) :
"""return CSP header dict""" | encapsulate = re . compile ( "|" . join ( [ '^self' , '^none' , '^unsafe-inline' , '^unsafe-eval' , '^sha[\d]+-[\w=-]+' , '^nonce-[\w=-]+' ] ) )
csp = { }
for p , array in self . inputs . items ( ) :
csp [ p ] = ' ' . join ( [ "'%s'" % l if encapsulate . match ( l ) else l for l in array ] )
return { self . header : '; ' . join ( [ '%s %s' % ( k , v ) for k , v in csp . items ( ) if v != '' ] ) } |
def align ( fastq_file , pair_file , index_dir , names , align_dir , data ) :
"""Perform piped alignment of fastq input files , generating sorted , deduplicated BAM .
Pipes in input , handling paired and split inputs , using interleaving magic
from : https : / / biowize . wordpress . com / 2015/03/26 / the - fastest - darn - fastq - decoupling - procedure - i - ever - done - seen /
Then converts a tab delimited set of outputs into interleaved fastq .
awk changes spaces to underscores since SNAP only takes the initial name .
SNAP requires / 1 and / 2 at the end of read names . If these are not present
in the initial fastq may need to expand awk code to do this .""" | out_file = os . path . join ( align_dir , "{0}-sort.bam" . format ( dd . get_sample_name ( data ) ) )
num_cores = data [ "config" ] [ "algorithm" ] . get ( "num_cores" , 1 )
resources = config_utils . get_resources ( "snap" , data [ "config" ] )
rg_info = novoalign . get_rg_info ( names )
if data . get ( "align_split" ) :
final_file = out_file
out_file , data = alignprep . setup_combine ( final_file , data )
fastq_file , pair_file = alignprep . split_namedpipe_cls ( fastq_file , pair_file , data )
fastq_file = fastq_file [ 2 : - 1 ]
if pair_file :
pair_file = pair_file [ 2 : - 1 ]
stream_input = ( r"paste <({fastq_file} | paste - - - -) " r"<({pair_file} | paste - - - -) | " r"""awk 'BEGIN {{FS="\t"; OFS="\n"}} """ r"""{{ """ r"""split($1, P1, " "); split($5, P5, " "); """ r"""if ($1 !~ /\/1$/) $1 = P1[1]"/1"; if ($5 !~ /\/2$/) $5 = P5[1]"/2"; """ r"""gsub(" ", "_", $1); gsub(" ", "_", $5); """ r"""print $1, $2, "+", $4, $5, $6, "+", $8}}' """ )
else :
stream_input = fastq_file [ 2 : - 1 ]
else :
final_file = None
assert fastq_file . endswith ( ".gz" )
if pair_file :
stream_input = ( r"paste <(zcat {fastq_file} | paste - - - -) " r"<(zcat {pair_file} | paste - - - -) | " r"""awk 'BEGIN {{FS="\t"; OFS="\n"}} """ r"""{{ """ r"""split($1, P1, " "); split($5, P5, " "); """ r"""if ($1 !~ /\/1$/) $1 = P1[1]"/1"; if ($5 !~ /\/2$/) $5 = P5[1]"/2"; """ r"""gsub(" ", "_", $1); gsub(" ", "_", $5); """ r"""print $1, $2, "+", $4, $5, $6, "+", $8}}' """ )
else :
stream_input = "zcat {fastq_file}"
pair_file = pair_file if pair_file else ""
if not utils . file_exists ( out_file ) and ( final_file is None or not utils . file_exists ( final_file ) ) :
with postalign . tobam_cl ( data , out_file , pair_file is not None ) as ( tobam_cl , tx_out_file ) :
if pair_file :
sub_cmd = "paired"
input_cmd = "-pairedInterleavedFastq -"
else :
sub_cmd = "single"
input_cmd = "-fastq -"
stream_input = stream_input . format ( ** locals ( ) )
tmp_dir = os . path . dirname ( tx_out_file )
cmd = ( "export TMPDIR={tmp_dir} && unset JAVA_HOME && {stream_input} | " "snap-aligner {sub_cmd} {index_dir} {input_cmd} " "-R '{rg_info}' -t {num_cores} -M -o -sam - | " )
do . run ( cmd . format ( ** locals ( ) ) + tobam_cl , "SNAP alignment: %s" % names [ "sample" ] )
data [ "work_bam" ] = out_file
return data |
async def save ( self , db = None ) :
'''If object has _ id , then object will be created or fully rewritten .
If not , object will be inserted and _ id will be assigned .''' | self . _db = db or self . db
data = self . prepare_data ( )
# validate object
self . validate ( )
# connect to DB to save the model
for i in self . connection_retries ( ) :
try :
created = False if '_id' in data else True
result = await self . db [ self . get_collection_name ( ) ] . insert_one ( data )
self . _id = result . inserted_id
# emit post save
asyncio . ensure_future ( post_save . send ( sender = self . __class__ , db = self . db , instance = self , created = created ) )
break
except ConnectionFailure as ex :
exceed = await self . check_reconnect_tries_and_wait ( i , 'save' )
if exceed :
raise ex |
def patch ( ** on ) :
"""Globally patches certain system modules to be ' cooperaive ' .
The keyword arguments afford some control over which modules are patched .
If no keyword arguments are supplied , all possible modules are patched .
If keywords are set to True , only the specified modules are patched . E . g . ,
` ` monkey _ patch ( socket = True , select = True ) ` ` patches only the select and
socket modules . Most arguments patch the single module of the same name
( os , time , select ) . The exception is socket , which also patches the ssl
module if present .
It ' s safe to call monkey _ patch multiple times .""" | accepted_args = set ( ( 'select' , 'socket' , 'time' ) )
default_on = on . pop ( "all" , None )
for k in on . keys ( ) :
if k not in accepted_args :
raise TypeError ( "patch() got an unexpected keyword argument %r" % k )
if default_on is None :
default_on = not ( True in list ( on . values ( ) ) )
for modname in accepted_args :
on . setdefault ( modname , default_on )
modules_to_patch = [ ]
if on [ 'select' ] and not already_patched . get ( 'select' ) :
modules_to_patch += _select_modules ( )
already_patched [ 'select' ] = True
if on [ 'socket' ] and not already_patched . get ( 'socket' ) :
modules_to_patch += _socket_modules ( )
already_patched [ 'socket' ] = True
if on [ 'time' ] and not already_patched . get ( 'time' ) :
modules_to_patch += _time_modules ( )
already_patched [ 'time' ] = True
imp . acquire_lock ( )
try :
for name , mod in modules_to_patch :
orig_mod = sys . modules . get ( name )
if orig_mod is None :
orig_mod = __import__ ( name )
for attr_name in mod . __patched__ :
patched_attr = getattr ( mod , attr_name , None )
if patched_attr is not None :
setattr ( orig_mod , attr_name , patched_attr )
finally :
imp . release_lock ( ) |
def communicate ( self ) :
"""Retrieve information .""" | self . _communicate_first = True
self . _process . waitForFinished ( )
enco = self . _get_encoding ( )
if self . _partial_stdout is None :
raw_stdout = self . _process . readAllStandardOutput ( )
stdout = handle_qbytearray ( raw_stdout , enco )
else :
stdout = self . _partial_stdout
raw_stderr = self . _process . readAllStandardError ( )
stderr = handle_qbytearray ( raw_stderr , enco )
result = [ stdout . encode ( enco ) , stderr . encode ( enco ) ]
if PY2 :
stderr = stderr . decode ( )
result [ - 1 ] = ''
self . _result = result
if not self . _fired :
self . sig_finished . emit ( self , result [ 0 ] , result [ - 1 ] )
self . _fired = True
return result |
def show_vcs_output_vcs_nodes_vcs_node_info_node_swbd_number ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
show_vcs = ET . Element ( "show_vcs" )
config = show_vcs
output = ET . SubElement ( show_vcs , "output" )
vcs_nodes = ET . SubElement ( output , "vcs-nodes" )
vcs_node_info = ET . SubElement ( vcs_nodes , "vcs-node-info" )
node_swbd_number = ET . SubElement ( vcs_node_info , "node-swbd-number" )
node_swbd_number . text = kwargs . pop ( 'node_swbd_number' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def previous_visit ( self ) :
"""Returns the previous visit for this request or None .
Requires attr ` visit _ model _ cls ` .""" | previous_visit = None
if self . appointment :
appointment = self . appointment
while appointment . previous_by_timepoint :
try :
previous_visit = self . model . visit_model_cls ( ) . objects . get ( appointment = appointment . previous_by_timepoint )
except ObjectDoesNotExist :
pass
else :
break
appointment = appointment . previous_by_timepoint
return previous_visit |
def stop_capture ( self ) :
"""See base class documentation""" | if self . _process is None :
raise sniffer . InvalidOperationError ( "Trying to stop a non-started process" )
utils . stop_standing_subprocess ( self . _process , kill_signal = signal . SIGINT )
self . _post_process ( ) |
def create_mnist_model ( hyper_params , input_shape = ( H , W , 1 ) , num_classes = NUM_CLASSES ) :
'''Create simple convolutional model''' | layers = [ Conv2D ( 32 , kernel_size = ( 3 , 3 ) , activation = 'relu' , input_shape = input_shape ) , Conv2D ( 64 , ( 3 , 3 ) , activation = 'relu' ) , MaxPooling2D ( pool_size = ( 2 , 2 ) ) , Flatten ( ) , Dense ( 100 , activation = 'relu' ) , Dense ( num_classes , activation = 'softmax' ) ]
model = Sequential ( layers )
if hyper_params [ 'optimizer' ] == 'Adam' :
optimizer = keras . optimizers . Adam ( lr = hyper_params [ 'learning_rate' ] )
else :
optimizer = keras . optimizers . SGD ( lr = hyper_params [ 'learning_rate' ] , momentum = 0.9 )
model . compile ( loss = keras . losses . categorical_crossentropy , optimizer = optimizer , metrics = [ 'accuracy' ] )
return model |
def loads ( s : str , ** kwargs ) -> JsonObj :
"""Convert a json _ str into a JsonObj
: param s : a str instance containing a JSON document
: param kwargs : arguments see : json . load for details
: return : JsonObj representing the json string""" | if isinstance ( s , ( bytes , bytearray ) ) :
s = s . decode ( json . detect_encoding ( s ) , 'surrogatepass' )
return json . loads ( s , object_hook = lambda pairs : JsonObj ( ** pairs ) , ** kwargs ) |
def group_vectors ( vectors , angle = 1e-4 , include_negative = False ) :
"""Group vectors based on an angle tolerance , with the option to
include negative vectors .
Parameters
vectors : ( n , 3 ) float
Direction vector
angle : float
Group vectors closer than this angle in radians
include _ negative : bool
If True consider the same :
[0,0,1 ] and [ 0,0 , - 1]
Returns
new _ vectors : ( m , 3 ) float
Direction vector
groups : ( m , ) sequence of int
Indices of source vectors""" | vectors = np . asanyarray ( vectors , dtype = np . float64 )
angle = float ( angle )
if include_negative :
vectors = util . vector_hemisphere ( vectors )
spherical = util . vector_to_spherical ( vectors )
angles , groups = group_distance ( spherical , angle )
new_vectors = util . spherical_to_vector ( angles )
return new_vectors , groups |
def get_field_errors ( node ) :
"""return a list of FieldErrors if the specified securityData element has field errors""" | assert node . Name == 'securityData' and not node . IsArray
nodearr = node . GetElement ( 'fieldExceptions' )
if nodearr . NumValues > 0 :
secid = XmlHelper . get_child_value ( node , 'security' )
errors = XmlHelper . as_field_error ( nodearr , secid )
return errors
else :
return None |
def coroutine ( func : Callable [ ... , "Generator[Any, Any, _T]" ] ) -> Callable [ ... , "Future[_T]" ] :
"""Decorator for asynchronous generators .
For compatibility with older versions of Python , coroutines may
also " return " by raising the special exception ` Return ( value )
< Return > ` .
Functions with this decorator return a ` . Future ` .
. . warning : :
When exceptions occur inside a coroutine , the exception
information will be stored in the ` . Future ` object . You must
examine the result of the ` . Future ` object , or the exception
may go unnoticed by your code . This means yielding the function
if called from another coroutine , using something like
` . IOLoop . run _ sync ` for top - level calls , or passing the ` . Future `
to ` . IOLoop . add _ future ` .
. . versionchanged : : 6.0
The ` ` callback ` ` argument was removed . Use the returned
awaitable object instead .""" | @ functools . wraps ( func )
def wrapper ( * args , ** kwargs ) : # type : ( * Any , * * Any ) - > Future [ _ T ]
# This function is type - annotated with a comment to work around
# https : / / bitbucket . org / pypy / pypy / issues / 2868 / segfault - with - args - type - annotation - in
future = _create_future ( )
try :
result = func ( * args , ** kwargs )
except ( Return , StopIteration ) as e :
result = _value_from_stopiteration ( e )
except Exception :
future_set_exc_info ( future , sys . exc_info ( ) )
try :
return future
finally : # Avoid circular references
future = None
# type : ignore
else :
if isinstance ( result , Generator ) : # Inline the first iteration of Runner . run . This lets us
# avoid the cost of creating a Runner when the coroutine
# never actually yields , which in turn allows us to
# use " optional " coroutines in critical path code without
# performance penalty for the synchronous case .
try :
yielded = next ( result )
except ( StopIteration , Return ) as e :
future_set_result_unless_cancelled ( future , _value_from_stopiteration ( e ) )
except Exception :
future_set_exc_info ( future , sys . exc_info ( ) )
else : # Provide strong references to Runner objects as long
# as their result future objects also have strong
# references ( typically from the parent coroutine ' s
# Runner ) . This keeps the coroutine ' s Runner alive .
# We do this by exploiting the public API
# add _ done _ callback ( ) instead of putting a private
# attribute on the Future .
# ( Github issues # 1769 , # 2229 ) .
runner = Runner ( result , future , yielded )
future . add_done_callback ( lambda _ : runner )
yielded = None
try :
return future
finally : # Subtle memory optimization : if next ( ) raised an exception ,
# the future ' s exc _ info contains a traceback which
# includes this stack frame . This creates a cycle ,
# which will be collected at the next full GC but has
# been shown to greatly increase memory usage of
# benchmarks ( relative to the refcount - based scheme
# used in the absence of cycles ) . We can avoid the
# cycle by clearing the local variable after we return it .
future = None
# type : ignore
future_set_result_unless_cancelled ( future , result )
return future
wrapper . __wrapped__ = func
# type : ignore
wrapper . __tornado_coroutine__ = True
# type : ignore
return wrapper |
def write ( self , offset , value ) :
""". . _ write :
Writes the memory word at ` ` offset ` ` to ` ` value ` ` .
Might raise ReadOnlyError _ , if the device is read - only .
Might raise AddressError _ , if the offset exceeds the size of the device .""" | if ( not self . mode & 0b10 ) :
raise ReadOnlyError ( "Device is Read-Only" )
if ( offset >= self . size ) :
raise AddressError ( "Offset({}) not in address space({})" . format ( offset , self . size ) )
self . repr_ [ offset ] . setvalue ( value ) |
def stansummary ( fit , pars = None , probs = ( 0.025 , 0.25 , 0.5 , 0.75 , 0.975 ) , digits_summary = 2 ) :
"""Summary statistic table .
Parameters
fit : StanFit4Model object
pars : str or sequence of str , optional
Parameter names . By default use all parameters
probs : sequence of float , optional
Quantiles . By default , ( 0.025 , 0.25 , 0.5 , 0.75 , 0.975)
digits _ summary : int , optional
Number of significant digits . By default , 2
Returns
summary : string
Table includes mean , se _ mean , sd , probs _ 0 , . . . , probs _ n , n _ eff and Rhat .
Examples
> > > model _ code = ' parameters { real y ; } model { y ~ normal ( 0,1 ) ; } '
> > > m = StanModel ( model _ code = model _ code , model _ name = " example _ model " )
> > > fit = m . sampling ( )
> > > print ( stansummary ( fit ) )
Inference for Stan model : example _ model .
4 chains , each with iter = 2000 ; warmup = 1000 ; thin = 1;
post - warmup draws per chain = 1000 , total post - warmup draws = 4000.
mean se _ mean sd 2.5 % 25 % 50 % 75 % 97.5 % n _ eff Rhat
y 0.01 0.03 1.0 - 2.01 - 0.68 0.02 0.72 1.97 1330 1.0
lp _ _ - 0.5 0.02 0.68 - 2.44 - 0.66 - 0.24 - 0.05-5.5e - 4 1555 1.0
Samples were drawn using NUTS at Thu Aug 17 00:52:25 2017.
For each parameter , n _ eff is a crude measure of effective sample size ,
and Rhat is the potential scale reduction factor on split chains ( at
convergence , Rhat = 1 ) .""" | if fit . mode == 1 :
return "Stan model '{}' is of mode 'test_grad';\n" "sampling is not conducted." . format ( fit . model_name )
elif fit . mode == 2 :
return "Stan model '{}' does not contain samples." . format ( fit . model_name )
n_kept = [ s - w for s , w in zip ( fit . sim [ 'n_save' ] , fit . sim [ 'warmup2' ] ) ]
header = "Inference for Stan model: {}.\n" . format ( fit . model_name )
header += "{} chains, each with iter={}; warmup={}; thin={}; \n"
header = header . format ( fit . sim [ 'chains' ] , fit . sim [ 'iter' ] , fit . sim [ 'warmup' ] , fit . sim [ 'thin' ] , sum ( n_kept ) )
header += "post-warmup draws per chain={}, total post-warmup draws={}.\n\n"
header = header . format ( n_kept [ 0 ] , sum ( n_kept ) )
footer = "\n\nSamples were drawn using {} at {}.\n" "For each parameter, n_eff is a crude measure of effective sample size,\n" "and Rhat is the potential scale reduction factor on split chains (at \n" "convergence, Rhat=1)."
sampler = fit . sim [ 'samples' ] [ 0 ] [ 'args' ] [ 'sampler_t' ]
date = fit . date . strftime ( '%c' )
# % c is locale ' s representation
footer = footer . format ( sampler , date )
s = _summary ( fit , pars , probs )
body = _array_to_table ( s [ 'summary' ] , s [ 'summary_rownames' ] , s [ 'summary_colnames' ] , digits_summary )
return header + body + footer |
def pip_uninstall ( package , ** options ) :
"""Uninstall a python package""" | command = [ "uninstall" , "-q" , "-y" ]
available_options = ( 'proxy' , 'log' , )
for option in parse_options ( options , available_options ) :
command . append ( option )
if isinstance ( package , list ) :
command . extend ( package )
else :
command . append ( package )
log ( "Uninstalling {} package with options: {}" . format ( package , command ) )
pip_execute ( command ) |
def parse ( el , typ ) :
"""Parse a ` ` BeautifulSoup ` ` element as the given type .""" | if not el :
return typ ( )
txt = text ( el )
if not txt :
return typ ( )
return typ ( txt ) |
def get_major_minor ( ilo_ver_str ) :
"""Extract the major and minor number from the passed string
: param ilo _ ver _ str : the string that contains the version information
: returns : String of the form " < major > . < minor > " or None""" | if not ilo_ver_str :
return None
try : # Note ( vmud213 ) : This logic works for all strings
# that contain the version info as < major > . < minor >
# Formats of the strings :
# Release version - > " 2.50 Feb 18 2016"
# Debug version - > " iLO 4 v2.50"
# random version - > " XYZ ABC 2.30"
pattern = re . search ( ILO_VER_STR_PATTERN , ilo_ver_str )
if pattern :
matched = pattern . group ( 0 )
if matched :
return matched
return None
except Exception :
return None |
def get_installed_apps ( ) :
"""Return list of all installed apps""" | if django . VERSION >= ( 1 , 7 ) :
from django . apps import apps
return [ a . models_module for a in apps . get_app_configs ( ) if a . models_module is not None ]
else :
from django . db import models
return models . get_apps ( ) |
def from_rfc3339 ( rfc3339_text , with_nanos = False ) :
"""Parse a RFC 3339 date string format to datetime . date .
Example of accepted format : ' 1972-01-01T10:00:20.021-05:00'
- By default , the result is a datetime . datetime
- If with _ nanos is true , the result is a 2 - tuple , ( datetime . datetime ,
nanos ) , where the second field represents the possible nanosecond
resolution component of the second field .
Args :
rfc3339 _ text ( string ) : An rfc3339 formatted date string
with _ nanos ( bool ) : Determines if nanoseconds should be parsed from the
string
Raises :
ValueError : if ` ` rfc3339 _ text ` ` is invalid
Returns :
: class : ` datetime . datetime ` : when with _ nanos is False
tuple ( : class : ` datetime . datetime ` , int ) : when with _ nanos is True""" | timestamp = strict_rfc3339 . rfc3339_to_timestamp ( rfc3339_text )
result = datetime . datetime . utcfromtimestamp ( timestamp )
if with_nanos :
return ( result , int ( ( timestamp - int ( timestamp ) ) * 1e9 ) )
else :
return result |
def feature_union ( names , steps , weights ) :
"""Reconstruct a FeatureUnion from names , steps , and weights""" | steps , times = zip ( * map ( _maybe_timed , steps ) )
fit_time = sum ( times )
if any ( s is FIT_FAILURE for s in steps ) :
fit_est = FIT_FAILURE
else :
fit_est = FeatureUnion ( list ( zip ( names , steps ) ) , transformer_weights = weights )
return fit_est , fit_time |
def stats ( self , topic = None , channel = None , text = False ) :
"""Return internal instrumented statistics .
: param topic : ( optional ) filter to topic
: param channel : ( optional ) filter to channel
: param text : return the stats as a string ( default : ` ` False ` ` )""" | if text :
fields = { 'format' : 'text' }
else :
fields = { 'format' : 'json' }
if topic :
nsq . assert_valid_topic_name ( topic )
fields [ 'topic' ] = topic
if channel :
nsq . assert_valid_channel_name ( channel )
fields [ 'channel' ] = channel
return self . _request ( 'GET' , '/stats' , fields = fields ) |
def epcr_primer_file ( self , formattedprimers ) :
"""Create the ePCR - compatible primer file from the dictionaries of primer combinations""" | logging . info ( 'Creating re-PCR-compatible primer file' )
with open ( formattedprimers , 'w' ) as formatted : # Iterate through all the targets
for basename in sorted ( self . forward_dict ) : # Use enumerate to number the iterations for each forward and reverse primer in the lists
for forward_index , forward_primer in enumerate ( self . forward_dict [ basename ] ) :
for reverse_index , reverse_primer in enumerate ( self . reverse_dict [ basename ] ) : # Set the name of the primer using the target name , and the indices of the primers
# e . g . vtx1a _ 0_0
primer_name = '{bn}_{fi}_{ri}' . format ( bn = basename , fi = forward_index , ri = reverse_index )
# Create the string to write to the ePCR - compatible primer file
# e . g . vtx1a _ 0_0CCTTTCCAGGTACAACAGCGGTTGGAAACTCATCAGATGCCATTCTGG
output_string = '{pn}\t{fp}\t{rp}\n' . format ( pn = primer_name , fp = forward_primer , rp = reverse_primer )
# Write the string to file
formatted . write ( output_string ) |
def pathwaysKEGG ( organism ) :
"""Retrieves all pathways for a given organism .
: param organism : an organism as listed in organismsKEGG ( )
: returns df : a Pandas dataframe with the columns ' KEGGid ' , ' pathIDs ' , and ' pathName ' .
: returns df _ : a Pandas dataframe with a columns for ' KEGGid ' , and one column for each pathway with the corresponding gene ids below""" | print ( "KEGG API: http://rest.kegg.jp/list/pathway/" + organism )
sys . stdout . flush ( )
kegg_paths = urlopen ( "http://rest.kegg.jp/list/pathway/" + organism ) . read ( )
kegg_paths = kegg_paths . split ( "\n" )
final = [ ]
for k in kegg_paths :
final . append ( k . split ( "\t" ) )
df = pd . DataFrame ( final [ 0 : len ( final ) - 1 ] ) [ [ 0 , 1 ] ]
df . columns = [ 'pathID' , 'pathName' ]
print ( "KEGG API: http://rest.kegg.jp/link/" + organism + "/pathway/" )
sys . stdout . flush ( )
kegg_paths_genes = urlopen ( "http://rest.kegg.jp/link/" + organism + "/pathway/" ) . read ( )
kegg_paths_genes = kegg_paths_genes . split ( "\n" )
kegg_paths_genes = [ s . split ( "\t" ) for s in kegg_paths_genes ]
kegg_paths_genes = pd . DataFrame ( kegg_paths_genes )
kegg_paths_genes . columns = [ 'pathID' , 'KEGGid' ]
df = pd . merge ( kegg_paths_genes , df , on = [ "pathID" ] , how = "outer" )
def CombineAnn ( df ) :
return pd . Series ( dict ( KEGGid = ', ' . join ( [ s for s in list ( set ( df [ 'KEGGid' ] ) ) if str ( s ) != "nan" ] ) , pathIDs = ', ' . join ( [ s for s in list ( set ( df [ 'pathID' ] ) ) if str ( s ) != "nan" ] ) , pathName = ', ' . join ( [ s for s in list ( set ( df [ 'pathName' ] ) ) if str ( s ) != "nan" ] ) ) )
df = df . groupby ( 'KEGGid' , as_index = True ) . apply ( CombineAnn )
df . reset_index ( inplace = True , drop = True )
df_ = kegg_paths_genes [ [ 'KEGGid' ] ] . drop_duplicates ( )
for c in list ( set ( kegg_paths_genes [ "pathID" ] . tolist ( ) ) ) :
tmp = kegg_paths_genes [ kegg_paths_genes [ "pathID" ] == c ] [ [ "KEGGid" ] ] . drop_duplicates ( ) . dropna ( )
tmp . columns = [ c ]
df_ = pd . merge ( df_ , tmp , left_on = [ "KEGGid" ] , right_on = [ c ] , how = "outer" )
return df , df_ |
def DeleteAttachment ( self , attachment_link , options = None ) :
"""Deletes an attachment .
: param str attachment _ link :
The link to the attachment .
: param dict options :
The request options for the request .
: return :
The deleted Attachment .
: rtype :
dict""" | if options is None :
options = { }
path = base . GetPathFromLink ( attachment_link )
attachment_id = base . GetResourceIdOrFullNameFromLink ( attachment_link )
return self . DeleteResource ( path , 'attachments' , attachment_id , None , options ) |
def async_update ( self , event , reason = { } ) :
"""New event for sensor .
Check if state or config is part of event .
Signal that sensor has updated attributes .
Inform what attributes got changed values .""" | reason [ 'attr' ] = [ ]
for data in [ 'state' , 'config' ] :
changed_attr = self . update_attr ( event . get ( data , { } ) )
reason [ data ] = data in event
reason [ 'attr' ] += changed_attr
super ( ) . async_update ( event , reason ) |
def _filename ( self_or_cls , filename ) :
"Add the file extension if not already present" | if not filename . endswith ( self_or_cls . file_ext ) :
return '%s.%s' % ( filename , self_or_cls . file_ext )
else :
return filename |
def dataset_walker ( datasets ) :
"""Walk through * datasets * and their ancillary data .
Yields datasets and their parent .""" | for dataset in datasets :
yield dataset , None
for anc_ds in dataset . attrs . get ( 'ancillary_variables' , [ ] ) :
try :
anc_ds . attrs
yield anc_ds , dataset
except AttributeError :
continue |
def fire ( self , target , topic , content , callback = None ) :
"""Fires a message""" | message = self . __make_message ( topic , content )
if callback is not None :
self . __callbacks [ message [ 'uid' ] ] = ( 'fire' , callback )
self . __client . send_message ( target , json . dumps ( message ) , message [ 'uid' ] ) |
def _screaming_snake_case ( cls , text ) :
"""Transform text to SCREAMING _ SNAKE _ CASE
: param text :
: return :""" | if text . isupper ( ) :
return text
result = ''
for pos , symbol in enumerate ( text ) :
if symbol . isupper ( ) and pos > 0 :
result += '_' + symbol
else :
result += symbol . upper ( )
return result |
def analyze_quality_table ( self , obj , low_bound = None , high_bound = None ) :
"""Takes in an the object returned by the MDAL query , and analyzes the quality
of the data for each column in the df . Returns a df of data quality metrics
To Do
Need to make it specific for varying meters and label it for each type ,
Either separate functions or make the function broader
Parameters
obj : ? ? ?
the object returned by the MDAL Query
low _ bound : float
all data equal to or below this value will be interpreted as missing data
high _ bound : float
all data above this value will be interpreted as missing
Returns
pd . DataFrame ( )
returns data frame with % missing data , average duration of missing data
event and standard deviation of that duration for each column of data""" | data = obj . df
N_rows = 3
N_cols = data . shape [ 1 ]
d = pd . DataFrame ( np . zeros ( ( N_rows , N_cols ) ) , index = [ '% Missing' , 'AVG Length Missing' , 'Std dev. Missing' ] , columns = [ data . columns ] )
if low_bound :
data = data . where ( data >= low_bound )
if high_bound :
data = data . where ( data < high_bound )
for i in range ( N_cols ) :
data_per_meter = data . iloc [ : , [ i ] ]
data_missing , meter = self . identify_missing ( data_per_meter )
percentage = data_missing . sum ( ) / ( data . shape [ 0 ] ) * 100
data_gaps = self . diff_boolean ( data_missing , column_name = meter )
missing_mean = data_gaps . mean ( )
std_dev = data_gaps . std ( )
d . loc [ "% Missing" , meter ] = percentage [ meter ]
d . loc [ "AVG Length Missing" , meter ] = missing_mean [ 'duration' ]
d . loc [ "Std dev. Missing" , meter ] = std_dev [ 'duration' ]
return d |
def set_eng_float_format ( accuracy = 3 , use_eng_prefix = False ) :
"""Alter default behavior on how float is formatted in DataFrame .
Format float in engineering format . By accuracy , we mean the number of
decimal digits after the floating point .
See also EngFormatter .""" | set_option ( "display.float_format" , EngFormatter ( accuracy , use_eng_prefix ) )
set_option ( "display.column_space" , max ( 12 , accuracy + 9 ) ) |
def post ( self , path , payload , callback = None , timeout = None , no_response = False , ** kwargs ) : # pragma : no cover
"""Perform a POST on a certain path .
: param path : the path
: param payload : the request payload
: param callback : the callback function to invoke upon response
: param timeout : the timeout of the request
: return : the response""" | request = self . mk_request ( defines . Codes . POST , path )
request . token = generate_random_token ( 2 )
request . payload = payload
if no_response :
request . add_no_response ( )
request . type = defines . Types [ "NON" ]
for k , v in kwargs . items ( ) :
if hasattr ( request , k ) :
setattr ( request , k , v )
return self . send_request ( request , callback , timeout , no_response = no_response ) |
def GetHelper ( cls , type_indicator ) :
"""Retrieves the path specification resolver helper for the specified type .
Args :
type _ indicator ( str ) : type indicator .
Returns :
ResolverHelper : a resolver helper .
Raises :
KeyError : if resolver helper is not set for the corresponding type
indicator .""" | if type_indicator not in cls . _resolver_helpers :
raise KeyError ( 'Resolver helper not set for type indicator: {0:s}.' . format ( type_indicator ) )
return cls . _resolver_helpers [ type_indicator ] |
def get_vec_lr ( self ) :
"""Returns vector from left to right""" | return self . width * self . cos_a ( ) , - self . width * self . sin_a ( ) |
def random ( magnitude = 1 ) :
"""Create a unit vector pointing in a random direction .""" | theta = random . uniform ( 0 , 2 * math . pi )
return magnitude * Vector ( math . cos ( theta ) , math . sin ( theta ) ) |
def dispatch ( self , env ) :
"""very simple URL dispatch , a la Cake : / zelink maps to handle _ zelink""" | path = filter ( None , env [ 'PATH_INFO' ] . split ( '/' ) )
handler = getattr ( self , 'handle_%s' % path [ 0 ] , None )
if not handler :
return '404 Not Found' , '%(PATH_INFO)s not found' % env
return handler ( env ) |
def parse_buffer_to_png ( data ) :
"""Parse PNG file bytes to Pillow Image""" | images = [ ]
c1 = 0
c2 = 0
data_len = len ( data )
while c1 < data_len : # IEND can appear in a PNG without being the actual end
if data [ c2 : c2 + 4 ] == b'IEND' and ( c2 + 8 == data_len or data [ c2 + 9 : c2 + 12 ] == b'PNG' ) :
images . append ( Image . open ( BytesIO ( data [ c1 : c2 + 8 ] ) ) )
c1 = c2 + 8
c2 = c1
c2 += 1
return images |
def set_cursor_y ( self , y ) :
"""Set Screen Cursor Y Position""" | if y >= 0 and y <= self . server . server_info . get ( "screen_height" ) :
self . cursor_y = y
self . server . request ( "screen_set %s cursor_y %i" % ( self . ref , self . cursor_y ) ) |
def flip ( self , n_windows ) :
"""Flip the orientation of the page .""" | assert n_windows >= 0
self . page_index += ( self . step * n_windows )
self . cursor_index = n_windows
self . inverted = not self . inverted
self . top_item_height = None |
def GetTopLevel ( self , file_object ) :
"""Returns the deserialized content of a plist as a dictionary object .
Args :
file _ object ( dfvfs . FileIO ) : a file - like object to parse .
Returns :
dict [ str , object ] : contents of the plist .
Raises :
UnableToParseFile : when the file cannot be parsed .""" | try :
top_level_object = biplist . readPlist ( file_object )
except ( biplist . InvalidPlistException , biplist . NotBinaryPlistException ) as exception :
raise errors . UnableToParseFile ( 'Unable to parse plist with error: {0!s}' . format ( exception ) )
return top_level_object |
def get_parameter ( self ) :
"""Obtain list parameter object from the current widget state .
: returns : A DefaultValueParameter from the current state of widget
: rtype : DefaultValueParameter""" | # Set value for each key
for key , value in list ( self . _parameter . options . items ( ) ) :
if value . get ( 'type' ) == STATIC :
continue
elif value . get ( 'type' ) == SINGLE_DYNAMIC :
new_value = self . spin_boxes . get ( key ) . value ( )
self . _parameter . set_value_for_key ( key , new_value )
elif value . get ( 'type' ) == MULTIPLE_DYNAMIC : # Need to iterate through all items
items = [ ]
for index in range ( self . list_widget . count ( ) ) :
items . append ( self . list_widget . item ( index ) )
new_value = [ i . text ( ) for i in items ]
self . _parameter . set_value_for_key ( key , new_value )
# Get selected radio button
radio_button_checked_id = self . input_button_group . checkedId ( )
# No radio button checked , then default value = None
if radio_button_checked_id == - 1 :
self . _parameter . selected = None
else :
self . _parameter . selected = list ( self . _parameter . options . keys ( ) ) [ radio_button_checked_id ]
return self . _parameter |
def _set_enable ( self , v , load = False ) :
"""Setter method for enable , mapped from YANG variable / beacon / enable ( container )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ enable is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ enable ( ) directly .""" | if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = enable . enable , is_container = 'container' , presence = False , yang_name = "enable" , rest_name = "enable" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'Enable Chassis/Interface Beacon' , u'action' : u'chassis' } } , namespace = 'urn:brocade.com:mgmt:brocade-beacon' , defining_module = 'brocade-beacon' , yang_type = 'container' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """enable must be of a type compatible with container""" , 'defined-type' : "container" , 'generated-type' : """YANGDynClass(base=enable.enable, is_container='container', presence=False, yang_name="enable", rest_name="enable", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable Chassis/Interface Beacon', u'action': u'chassis'}}, namespace='urn:brocade.com:mgmt:brocade-beacon', defining_module='brocade-beacon', yang_type='container', is_config=True)""" , } )
self . __enable = t
if hasattr ( self , '_set' ) :
self . _set ( ) |
def Dir ( self , name , create = True ) :
"""Create a directory node named ' name ' relative to
the directory of this file .""" | return self . dir . Dir ( name , create = create ) |
def _get_annual_data ( self , p_p_id ) :
"""Get annual data .""" | params = { "p_p_id" : p_p_id , "p_p_lifecycle" : 2 , "p_p_state" : "normal" , "p_p_mode" : "view" , "p_p_resource_id" : "resourceObtenirDonneesConsommationAnnuelles" }
try :
raw_res = yield from self . _session . get ( PROFILE_URL , params = params , timeout = self . _timeout )
except OSError :
raise PyHydroQuebecAnnualError ( "Can not get annual data" )
try :
json_output = yield from raw_res . json ( content_type = 'text/json' )
except ( OSError , json . decoder . JSONDecodeError ) :
raise PyHydroQuebecAnnualError ( "Could not get annual data" )
if not json_output . get ( 'success' ) :
raise PyHydroQuebecAnnualError ( "Could not get annual data" )
if not json_output . get ( 'results' ) :
raise PyHydroQuebecAnnualError ( "Could not get annual data" )
if 'courant' not in json_output . get ( 'results' ) [ 0 ] :
raise PyHydroQuebecAnnualError ( "Could not get annual data" )
return json_output . get ( 'results' ) [ 0 ] [ 'courant' ] |
def options_string_builder ( option_mapping , args ) :
"""Return arguments for CLI invocation of kal .""" | options_string = ""
for option , flag in option_mapping . items ( ) :
if option in args :
options_string += str ( " %s %s" % ( flag , str ( args [ option ] ) ) )
return options_string |
def create_db ( self , instance_name , instance_type , admin_username , admin_password , security_groups = None , db_name = None , storage_size_gb = DEFAULT_STORAGE_SIZE_GB , timeout_s = DEFAULT_TIMEOUT_S ) :
"""Creates a database instance .
This method blocks until the db instance is active , or until
: attr : ` timeout _ s ` has elapsed .
By default , hpcloud * assigns * an automatically - generated set of
credentials for an admin user . In addition to launching the db
instance , this method uses the autogenerated credentials to login to
the server and create the intended admin user based on the credentials
supplied as method arguments .
: param str instance _ name : A name to assign to the db instance .
: param str instance _ type : The server instance type ( e . g . ` ` medium ` ` ) .
: param str admin _ username : The admin username .
: param str admin _ password : The admin password .
: param security _ groups : * Not used in hpcloud * .
: param str db _ name : The database name . If this is not specified , the
database will be named the same as the : attr : ` instance _ name ` .
: param int storage _ size _ gb : The size of the storage volume in GB .
: param float timeout _ s : The number of seconds to poll for an active
database server before failing . This value is also used when
attempting to connect to the running mysql server .
: rtype : : class : ` dict `""" | db = self . _create_db ( instance_name , instance_type , storage_size_gb )
# hang on to these . . . hpcloud only provides a way to generate a new
# set of username / password - there is no way to retrieve the originals .
default_creds = db . credential
log . debug ( 'Credentials for %s: %s' % ( instance_name , default_creds ) )
instance = self . _poll_instance_status ( db , timeout_s )
# we ' re taking advantage of a security bug in hpcloud ' s dbaas security
# group rules . the default * security * is to allow connections from
# everywhere in the world .
def connect ( ) :
try :
return pymysql . connect ( host = instance . hostname , port = instance . port , # db = self . database ,
user = default_creds [ 'username' ] , passwd = default_creds [ 'password' ] , connect_timeout = timeout_s , )
except :
log . warn ( "Could not connect to db, %s" % instance_name )
# log . debug ( " Connection exception " , exc _ info = True )
log . info ( "Connecting to %s..." % instance_name )
db = poll_with_timeout ( timeout_s , connect , 10 )
cur = db . cursor ( )
cur . execute ( "grant all privileges on *.* " "to '%s'@'%%' identified by '%s' " "with grant option" % ( admin_username , admin_password ) )
cur . execute ( "flush privileges" )
return db_to_dict ( instance ) |
def mkdir_p ( path ) :
"""mkdir - p path""" | if PY3 :
return os . makedirs ( path , exist_ok = True )
try :
os . makedirs ( path )
except OSError as exc :
if exc . errno == errno . EEXIST and os . path . isdir ( path ) :
pass
else :
raise |
def make_python_xref_nodes ( py_typestr , state , hide_namespace = False ) :
"""Make docutils nodes containing a cross - reference to a Python object .
Parameters
py _ typestr : ` str `
Name of the Python object . For example
` ` ' mypackage . mymodule . MyClass ' ` ` . If you have the object itself , or
its type , use the ` make _ python _ xref _ nodes _ for _ type ` function instead .
state : ` ` docutils . statemachine . State ` `
Usually the directive ' s ` ` state ` ` attribute .
hide _ namespace : ` bool ` , optional
If ` True ` , the namespace of the object is hidden in the rendered
cross reference . Internally , this uses ` ` : py : obj : ` ~ { py _ obj } ` ( note
tilde ) .
Returns
instance from ` ` docutils . nodes ` `
Docutils node representing the cross reference .
Examples
If called from within a directive :
. . code - block : : python
make _ python _ xref _ nodes ( ' numpy . sin ' , self . state )
See also
` make _ python _ xref _ nodes _ for _ type `""" | if hide_namespace :
template = ':py:obj:`~{}`\n'
else :
template = ':py:obj:`{}`\n'
xref_text = template . format ( py_typestr )
return parse_rst_content ( xref_text , state ) |
def run ( self , server = None , host = None , port = None , enable_pretty_logging = True ) :
"""运行 WeRoBot 。
: param server : 传递给 Bottle 框架 run 方法的参数 , 详情见 ` bottle 文档 < https : / / bottlepy . org / docs / dev / deployment . html # switching - the - server - backend > ` _
: param host : 运行时绑定的主机地址
: param port : 运行时绑定的主机端口
: param enable _ pretty _ logging : 是否开启 log 的输出格式优化""" | if enable_pretty_logging :
from werobot . logger import enable_pretty_logging
enable_pretty_logging ( self . logger )
if server is None :
server = self . config [ "SERVER" ]
if host is None :
host = self . config [ "HOST" ]
if port is None :
port = self . config [ "PORT" ]
try :
self . wsgi . run ( server = server , host = host , port = port )
except KeyboardInterrupt :
exit ( 0 ) |
async def query_firmware ( self ) :
"""Query the firmware versions .""" | _version = await self . request . get ( join_path ( self . _base_path , "/fwversion" ) )
_fw = _version . get ( "firmware" )
if _fw :
_main = _fw . get ( "mainProcessor" )
if _main :
self . _main_processor_version = self . _make_version ( _main )
_radio = _fw . get ( "radio" )
if _radio :
self . _radio_version = self . _make_version ( _radio ) |
def start ( transport = 'zmq' , address = '0.0.0.0' , port = 49017 , auth_address = '0.0.0.0' , auth_port = 49018 , disable_security = False , certificate = None , os_whitelist = None , os_blacklist = None , error_whitelist = None , error_blacklist = None , host_whitelist = None , host_blacklist = None ) :
'''Listen to napalm - logs and publish events into the Salt event bus .
transport : ` ` zmq ` `
Choose the desired transport .
. . note : :
Currently ` ` zmq ` ` is the only valid option .
address : ` ` 0.0.0.0 ` `
The address of the publisher , as configured on napalm - logs .
port : ` ` 49017 ` `
The port of the publisher , as configured on napalm - logs .
auth _ address : ` ` 0.0.0.0 ` `
The address used for authentication
when security is not disabled .
auth _ port : ` ` 49018 ` `
Port used for authentication .
disable _ security : ` ` False ` `
Trust unencrypted messages .
Strongly discouraged in production .
certificate : ` ` None ` `
Absolute path to the SSL certificate .
os _ whitelist : ` ` None ` `
List of operating systems allowed . By default everything is allowed .
os _ blacklist : ` ` None ` `
List of operating system to be ignored . Nothing ignored by default .
error _ whitelist : ` ` None ` `
List of errors allowed .
error _ blacklist : ` ` None ` `
List of errors ignored .
host _ whitelist : ` ` None ` `
List of hosts or IPs to be allowed .
host _ blacklist : ` ` None ` `
List of hosts of IPs to be ignored .''' | if not disable_security :
if not certificate :
log . critical ( 'Please use a certificate, or disable the security.' )
return
auth = napalm_logs . utils . ClientAuth ( certificate , address = auth_address , port = auth_port )
transport_recv_fun = _get_transport_recv ( name = transport , address = address , port = port )
if not transport_recv_fun :
log . critical ( 'Unable to start the engine' , exc_info = True )
return
master = False
if __opts__ [ '__role' ] == 'master' :
master = True
while True :
log . debug ( 'Waiting for napalm-logs to send anything...' )
raw_object = transport_recv_fun ( )
log . debug ( 'Received from napalm-logs:' )
log . debug ( raw_object )
if not disable_security :
dict_object = auth . decrypt ( raw_object )
else :
dict_object = napalm_logs . utils . unserialize ( raw_object )
try :
event_os = dict_object [ 'os' ]
if os_blacklist or os_whitelist :
valid_os = salt . utils . stringutils . check_whitelist_blacklist ( event_os , whitelist = os_whitelist , blacklist = os_blacklist )
if not valid_os :
log . info ( 'Ignoring NOS %s as per whitelist/blacklist' , event_os )
continue
event_error = dict_object [ 'error' ]
if error_blacklist or error_whitelist :
valid_error = salt . utils . stringutils . check_whitelist_blacklist ( event_error , whitelist = error_whitelist , blacklist = error_blacklist )
if not valid_error :
log . info ( 'Ignoring error %s as per whitelist/blacklist' , event_error )
continue
event_host = dict_object . get ( 'host' ) or dict_object . get ( 'ip' )
if host_blacklist or host_whitelist :
valid_host = salt . utils . stringutils . check_whitelist_blacklist ( event_host , whitelist = host_whitelist , blacklist = host_blacklist )
if not valid_host :
log . info ( 'Ignoring messages from %s as per whitelist/blacklist' , event_host )
continue
tag = 'napalm/syslog/{os}/{error}/{host}' . format ( os = event_os , error = event_error , host = event_host )
except KeyError as kerr :
log . warning ( 'Missing keys from the napalm-logs object:' , exc_info = True )
log . warning ( dict_object )
continue
# jump to the next object in the queue
log . debug ( 'Sending event %s' , tag )
log . debug ( raw_object )
if master :
event . get_master_event ( __opts__ , __opts__ [ 'sock_dir' ] ) . fire_event ( dict_object , tag )
else :
__salt__ [ 'event.send' ] ( tag , dict_object ) |
def change_issue_status ( self , issue_id , status_id : str ) :
"""Смета статуса тикета
: param issue _ id : int
: param status _ id : int""" | self . __metadb . update ( """
update meta.issue set
issue_status_id=:status_id,
assignee_user_id=valera_user_id(),
last_user_id=valera_user_id()
where id = :issue_id
""" , { "issue_id" : issue_id , "status_id" : status_id } ) |
def initialize ( self ) :
"""Set window layout .""" | self . grid ( )
self . respond = ttk . Button ( self , text = 'Get Response' , command = self . get_response )
self . respond . grid ( column = 0 , row = 0 , sticky = 'nesw' , padx = 3 , pady = 3 )
self . usr_input = ttk . Entry ( self , state = 'normal' )
self . usr_input . grid ( column = 1 , row = 0 , sticky = 'nesw' , padx = 3 , pady = 3 )
self . conversation_lbl = ttk . Label ( self , anchor = tk . E , text = 'Conversation:' )
self . conversation_lbl . grid ( column = 0 , row = 1 , sticky = 'nesw' , padx = 3 , pady = 3 )
self . conversation = ScrolledText . ScrolledText ( self , state = 'disabled' )
self . conversation . grid ( column = 0 , row = 2 , columnspan = 2 , sticky = 'nesw' , padx = 3 , pady = 3 ) |
def orthonormality ( V , ip_B = None ) :
"""Measure orthonormality of given basis .
: param V : a matrix : math : ` V = [ v _ 1 , \ ldots , v _ n ] ` with ` ` shape = = ( N , n ) ` ` .
: param ip _ B : ( optional ) the inner product to use , see : py : meth : ` inner ` .
: return : : math : ` \\ | I _ n - \\ langle V , V \\ rangle \\ | _ 2 ` .""" | return norm ( numpy . eye ( V . shape [ 1 ] ) - inner ( V , V , ip_B = ip_B ) ) |
def create ( self , unique_name = values . unset , date_expiry = values . unset , ttl = values . unset , mode = values . unset , status = values . unset , participants = values . unset ) :
"""Create a new SessionInstance
: param unicode unique _ name : An application - defined string that uniquely identifies the resource
: param datetime date _ expiry : The ISO 8601 date when the Session should expire
: param unicode ttl : When the session will expire
: param SessionInstance . Mode mode : The Mode of the Session
: param SessionInstance . Status status : Session status
: param dict participants : The Participant objects to include in the new session
: returns : Newly created SessionInstance
: rtype : twilio . rest . proxy . v1 . service . session . SessionInstance""" | data = values . of ( { 'UniqueName' : unique_name , 'DateExpiry' : serialize . iso8601_datetime ( date_expiry ) , 'Ttl' : ttl , 'Mode' : mode , 'Status' : status , 'Participants' : serialize . map ( participants , lambda e : serialize . object ( e ) ) , } )
payload = self . _version . create ( 'POST' , self . _uri , data = data , )
return SessionInstance ( self . _version , payload , service_sid = self . _solution [ 'service_sid' ] , ) |
def style_print ( * values , ** kwargs ) :
"""A convenience function that applies style _ format to text before printing""" | style = kwargs . pop ( "style" , None )
values = [ style_format ( value , style ) for value in values ]
print ( * values , ** kwargs ) |
def chunkComment ( self , text , start = 0 ) :
"""Return a list of chunks of comments .""" | # Build a list of comments
comm , out = self . nextComment ( text , start ) , [ ]
while comm :
out . append ( comm . group ( 0 ) )
comm = self . nextComment ( text , comm . start ( 0 ) + 1 )
# Collect the comments according to whether they are line
# comments or block comments .
out = [ list ( g ) for ( _ , g ) in groupby ( out , self . isLineComment ) ]
# Filter out seperator lines .
out = [ i for i in out if i != [ '' ] ]
return out |
def _sysv_enabled ( name , root ) :
'''A System - V style service is assumed disabled if the " startup " symlink
( starts with " S " ) to its script is found in / etc / init . d in the current
runlevel .''' | # Find exact match ( disambiguate matches like " S01anacron " for cron )
rc = _root ( '/etc/rc{}.d/S*{}' . format ( _runlevel ( ) , name ) , root )
for match in glob . glob ( rc ) :
if re . match ( r'S\d{,2}%s' % name , os . path . basename ( match ) ) :
return True
return False |
def get_lockout_response ( self ) :
""": return :""" | return render_to_response ( self . template_name , { 'user_attempts' : self . last_attempt_instance , 'lockout_time' : self . block_login_seconds , 'ip_address' : self . ip } , context_instance = RequestContext ( self . request ) ) |
def _setsizes ( self , cursor = None ) :
"""Set stored input and output sizes for cursor execution .""" | if cursor is None :
cursor = self . _cursor
if self . _inputsizes :
cursor . setinputsizes ( self . _inputsizes )
for column , size in self . _outputsizes . items ( ) :
if column is None :
cursor . setoutputsize ( size )
else :
cursor . setoutputsize ( size , column ) |
def get_input ( prompt , default = None , choices = None , option_value = None ) :
"""If option _ value is not None , then return it . Otherwise get the result from
input .""" | if option_value is not None :
return option_value
choices = choices or [ ]
while 1 :
r = input ( prompt + ' ' ) . strip ( )
if not r and default is not None :
return default
if choices :
if r not in choices :
r = None
else :
break
else :
break
return r |
def _rm_get_reference_coords_from_header ( parts ) :
"""extract the reference ( genomic sequence match ) coordinates of a repeat
occurrence from a repeatmakser header line . An example header line is : :
239 29.42 1.92 0.97 chr1 11 17 ( 41 ) C XX # YY ( 74 ) 104 1 m _ b1s502i1 4
the genomic start and end are always at positions 5 and 6 resepctively . In
the repeatmasker format , the end is inclusive , but in pyokit end coordinates
are exclusive , so we adjust it when we parse here .
: param parts : the header line , as a tokenized list .
: return : tuple of ( start , end )""" | s = int ( parts [ 5 ] )
e = int ( parts [ 6 ] ) + 1
if ( s >= e ) :
raise AlignmentIteratorError ( "invalid repeatmakser header: " + " " . join ( parts ) )
return ( s , e ) |
def add ( self , X , y ) :
"""Add data about known tunable hyperparameter configurations and scores .
Refits model with all data .
Args :
X ( Union [ Dict [ str , object ] , List [ Dict [ str , object ] ] ] ) : dict or list of dicts of
hyperparameter combinations . Keys may only be the name of a tunable , and the
dictionary must contain values for all tunables .
y ( Union [ float , List [ float ] ] ) : float or list of floats of scores of the hyperparameter
combinations . Order of scores must match the order of the hyperparameter
dictionaries that the scores corresponds""" | if isinstance ( X , dict ) :
X = [ X ]
y = [ y ]
# transform the list of dictionaries into a np array X _ raw
for i in range ( len ( X ) ) :
each = X [ i ]
# update best score and hyperparameters
if y [ i ] > self . _best_score :
self . _best_score = y [ i ]
self . _best_hyperparams = X [ i ]
vectorized = [ ]
for tunable in self . tunables :
vectorized . append ( each [ tunable [ 0 ] ] )
if self . X_raw is not None :
self . X_raw = np . append ( self . X_raw , np . array ( [ vectorized ] , dtype = object ) , axis = 0 , )
else :
self . X_raw = np . array ( [ vectorized ] , dtype = object )
self . y_raw = np . append ( self . y_raw , y )
# transforms each hyperparameter based on hyperparameter type
x_transformed = np . array ( [ ] , dtype = np . float64 )
if len ( self . X_raw . shape ) > 1 and self . X_raw . shape [ 1 ] > 0 :
x_transformed = self . tunables [ 0 ] [ 1 ] . fit_transform ( self . X_raw [ : , 0 ] , self . y_raw , ) . astype ( float )
for i in range ( 1 , self . X_raw . shape [ 1 ] ) :
transformed = self . tunables [ i ] [ 1 ] . fit_transform ( self . X_raw [ : , i ] , self . y_raw , ) . astype ( float )
x_transformed = np . column_stack ( ( x_transformed , transformed ) )
self . fit ( x_transformed , self . y_raw ) |
def previous ( self ) :
"""Moves the ' Cursor ' to & returns the previous ' Node ' . Raises
' GameTreeEndError ' if the start of a branch is exceeded .""" | if self . index - 1 >= 0 : # more main line ?
self . index = self . index - 1
elif self . stack : # were we in a variation ?
self . gametree = self . stack . pop ( )
self . index = len ( self . gametree ) - 1
else :
raise GameTreeEndError
self . node = self . gametree [ self . index ]
self . nodenum = self . nodenum - 1
self . _setChildren ( )
self . _setFlags ( )
return self . node |
def slicing ( args , length ) :
"""Internally used .""" | if isinstance ( args , tuple ) :
for arg in args :
yield from slicing_singlevalue ( arg , length )
else :
yield from slicing_singlevalue ( args , length ) |
def _process ( self , input ) :
'''Takes in html - mixed body text as a string and returns a list of strings ,
lower case and with punctuation given spacing .
Called by self . _ gen _ sentence ( )
Args :
inpnut ( string ) : body text''' | input = re . sub ( "<[^>]*>" , " " , input )
punct = list ( string . punctuation )
for symbol in punct :
input = input . replace ( symbol , " %s " % symbol )
input = filter ( lambda x : x != u'' , input . lower ( ) . split ( ' ' ) )
return input |
def cursor ( self , offset = 0 , limit = None , order_by = None , as_dict = False ) :
"""See expression . fetch ( ) for input description .
: return : query cursor""" | if offset and limit is None :
raise DataJointError ( 'limit is required when offset is set' )
sql = self . make_sql ( )
if order_by is not None :
sql += ' ORDER BY ' + ', ' . join ( order_by )
if limit is not None :
sql += ' LIMIT %d' % limit + ( ' OFFSET %d' % offset if offset else "" )
logger . debug ( sql )
return self . connection . query ( sql , as_dict = as_dict ) |
def dup_finder ( file_path , directory = "." , enable_scandir = False ) :
"""Check a directory for duplicates of the specified file . This is meant
for a single file only , for checking a directory for dups , use
directory _ duplicates .
This is designed to be as fast as possible by doing lighter checks
before progressing to
more extensive ones , in order they are :
1 . File size
2 . First twenty bytes
3 . Full SHA256 compare
. . code : : python
list ( reusables . dup _ finder (
" test _ structure \\ files _ 2 \\ empty _ file " ) )
# [ ' C : \\ Reusables \\ test \\ data \\ fake _ dir ' ,
# ' C : \\ Reusables \\ test \\ data \\ test _ structure \\ Files \\ empty _ file _ 1 ' ,
# ' C : \\ Reusables \\ test \\ data \\ test _ structure \\ Files \\ empty _ file _ 2 ' ,
# ' C : \\ Reusables \\ test \\ data \\ test _ structure \\ files _ 2 \\ empty _ file ' ]
: param file _ path : Path to file to check for duplicates of
: param directory : Directory to dig recursively into to look for duplicates
: param enable _ scandir : on python < 3.5 enable external scandir package
: return : generators""" | size = os . path . getsize ( file_path )
if size == 0 :
for empty_file in remove_empty_files ( directory , dry_run = True ) :
yield empty_file
else :
with open ( file_path , 'rb' ) as f :
first_twenty = f . read ( 20 )
file_sha256 = file_hash ( file_path , "sha256" )
for root , directories , files in _walk ( directory , enable_scandir = enable_scandir ) :
for each_file in files :
test_file = os . path . join ( root , each_file )
if os . path . getsize ( test_file ) == size :
try :
with open ( test_file , 'rb' ) as f :
test_first_twenty = f . read ( 20 )
except OSError :
logger . warning ( "Could not open file to compare - " "{0}" . format ( test_file ) )
else :
if first_twenty == test_first_twenty :
if file_hash ( test_file , "sha256" ) == file_sha256 :
yield os . path . abspath ( test_file ) |
def recv ( self , nbytes ) :
"""Receive data from the channel . The return value is a string
representing the data received . The maximum amount of data to be
received at once is specified by C { nbytes } . If a string of length zero
is returned , the channel stream has closed .
@ param nbytes : maximum number of bytes to read .
@ type nbytes : int
@ return : data .
@ rtype : str
@ raise socket . timeout : if no data is ready before the timeout set by
L { settimeout } .""" | try :
out = self . in_buffer . read ( nbytes , self . timeout )
except PipeTimeout , e :
raise socket . timeout ( )
ack = self . _check_add_window ( len ( out ) )
# no need to hold the channel lock when sending this
if ack > 0 :
m = Message ( )
m . add_byte ( chr ( MSG_CHANNEL_WINDOW_ADJUST ) )
m . add_int ( self . remote_chanid )
m . add_int ( ack )
self . transport . _send_user_message ( m )
return out |
def match_date ( self , value , strict = False ) :
"""if value is a date""" | value = stringify ( value )
try :
parse ( value )
except Exception :
self . shout ( 'Value %r is not a valid date' , strict , value ) |
def bin ( args ) :
"""% prog bin data . tsv
Conver tsv to binary format .""" | p = OptionParser ( bin . __doc__ )
p . add_option ( "--dtype" , choices = ( "float32" , "int32" ) , help = "dtype of the matrix" )
opts , args = p . parse_args ( args )
if len ( args ) != 1 :
sys . exit ( not p . print_help ( ) )
tsvfile , = args
dtype = opts . dtype
if dtype is None : # Guess
dtype = np . int32 if "data" in tsvfile else np . float32
else :
dtype = np . int32 if dtype == "int32" else np . float32
print ( "dtype: {}" . format ( dtype ) , file = sys . stderr )
fp = open ( tsvfile )
next ( fp )
arrays = [ ]
for i , row in enumerate ( fp ) :
a = np . fromstring ( row , sep = "\t" , dtype = dtype )
a = a [ 1 : ]
arrays . append ( a )
print ( i , a , file = sys . stderr )
print ( "Merging" , file = sys . stderr )
b = np . concatenate ( arrays )
print ( "Binary shape: {}" . format ( b . shape ) , file = sys . stderr )
binfile = tsvfile . rsplit ( "." , 1 ) [ 0 ] + ".bin"
b . tofile ( binfile ) |
def s3_bucket ( self ) :
"""Connect to the user defined Amazon S3 bucket .
Called on demand by : func : ` get ( ) ` and : func : ` put ( ) ` . Caches its
return value so that only a single connection is created .
: returns : A : class : ` boto . s3 . bucket . Bucket ` object .
: raises : : exc : ` . CacheBackendDisabledError ` when the user hasn ' t
defined : attr : ` . Config . s3 _ cache _ bucket ` .
: raises : : exc : ` . CacheBackendError ` when the connection to the Amazon
S3 bucket fails .""" | if not hasattr ( self , 'cached_bucket' ) :
self . check_prerequisites ( )
with PatchedBotoConfig ( ) :
from boto . exception import BotoClientError , BotoServerError , S3ResponseError
# The following try / except block translates unexpected exceptions
# raised by Boto into a CacheBackendError exception .
try : # The following try / except block handles the expected exception
# raised by Boto when an Amazon S3 bucket does not exist .
try :
logger . debug ( "Connecting to Amazon S3 bucket: %s" , self . config . s3_cache_bucket )
self . cached_bucket = self . s3_connection . get_bucket ( self . config . s3_cache_bucket )
except S3ResponseError as e :
if e . status == 404 and self . config . s3_cache_create_bucket :
logger . info ( "Amazon S3 bucket doesn't exist yet, creating it now: %s" , self . config . s3_cache_bucket )
self . s3_connection . create_bucket ( self . config . s3_cache_bucket )
self . cached_bucket = self . s3_connection . get_bucket ( self . config . s3_cache_bucket )
else : # Don ' t swallow exceptions we can ' t handle .
raise
except ( BotoClientError , BotoServerError ) :
raise CacheBackendError ( """
Failed to connect to the configured Amazon S3 bucket
{bucket}! Are you sure the bucket exists and is accessible
using the provided credentials? The Amazon S3 cache backend
will be disabled for now.
""" , bucket = repr ( self . config . s3_cache_bucket ) )
return self . cached_bucket |
def _reloader_child ( server , app , interval ) :
'''Start the server and check for modified files in a background thread .
As soon as an update is detected , KeyboardInterrupt is thrown in
the main thread to exit the server loop . The process exists with status
code 3 to request a reload by the observer process . If the lockfile
is not modified in 2 * interval second or missing , we assume that the
observer process died and exit with status code 1 or 2.''' | lockfile = os . environ . get ( 'BOTTLE_LOCKFILE' )
bgcheck = FileCheckerThread ( lockfile , interval )
try :
bgcheck . start ( )
server . run ( app )
except KeyboardInterrupt :
pass
bgcheck . status , status = 5 , bgcheck . status
bgcheck . join ( )
# bgcheck . status = = 5 - - > silent exit
if status :
sys . exit ( status ) |
def parse_bookmark_data ( data ) :
"""Return iterator for bookmarks of the form ( url , name , line number ) .
Bookmarks are not sorted .""" | name = None
lineno = 0
for line in data . splitlines ( ) :
lineno += 1
line = line . strip ( )
if line . startswith ( "NAME=" ) :
name = line [ 5 : ]
elif line . startswith ( "URL=" ) :
url = line [ 4 : ]
if url and name is not None :
yield ( url , name , lineno )
else :
name = None |
def interpolate_single ( start , end , coefficient , how = 'linear' ) :
"""Interpolate single value between start and end in given number of steps""" | return INTERP_SINGLE_DICT [ how ] ( start , end , coefficient ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.