signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def inheritdocstring ( name , bases , attrs ) :
"""Use as metaclass to inherit class and method docstrings from parent .
Adapted from http : / / stackoverflow . com / questions / 13937500 / inherit - a - parent - class - docstring - as - doc - attribute
Use this on classes defined in solver - specific interfaces to inherit docstrings from the high - level interface .""" | if '__doc__' not in attrs or not attrs [ "__doc__" ] : # create a temporary ' parent ' to ( greatly ) simplify the MRO search
temp = type ( 'temporaryclass' , bases , { } )
for cls in inspect . getmro ( temp ) :
if cls . __doc__ is not None :
attrs [ '__doc__' ] = cls . __doc__
break
for attr_name , attr in attrs . items ( ) :
if not attr . __doc__ :
for cls in inspect . getmro ( temp ) :
try :
if getattr ( cls , attr_name ) . __doc__ is not None :
attr . __doc__ = getattr ( cls , attr_name ) . __doc__
break
except ( AttributeError , TypeError ) :
continue
return type ( name , bases , attrs ) |
def add_isoquant_data ( peptides , quantpeptides , quantacc , quantfields ) :
"""Runs through a peptide table and adds quant data from ANOTHER peptide
table that contains that data .""" | for peptide in base_add_isoquant_data ( peptides , quantpeptides , peptabledata . HEADER_PEPTIDE , quantacc , quantfields ) :
yield peptide |
def is_true ( entity , prop , name ) :
"bool : True if the value of a property is True ." | return is_not_empty ( entity , prop , name ) and name in entity . _data and bool ( getattr ( entity , name ) ) |
def to_dict ( self ) :
"""Convert back to the pstats dictionary representation ( used for saving back as pstats binary file )""" | if self . subcall is not None :
if isinstance ( self . subcall , dict ) :
subcalls = self . subcall
else :
subcalls = { }
for s in self . subcall :
subcalls . update ( s . to_dict ( ) )
return { ( self . filename , self . line_number , self . name ) : ( self . ncalls , self . nonrecursive_calls , self . own_time_s , self . cummulative_time_s , subcalls ) }
else :
return { ( self . filename , self . line_number , self . name ) : ( self . ncalls , self . nonrecursive_calls , self . own_time_s , self . cummulative_time_s ) } |
def getPeers ( self ) :
'''getPeers - Get elements who share a parent with this element
@ return - TagCollection of elements''' | parentNode = self . parentNode
# If no parent , no peers
if not parentNode :
return None
peers = parentNode . children
# Otherwise , get all children of parent excluding this node
return TagCollection ( [ peer for peer in peers if peer is not self ] ) |
def rule110_network ( ) :
"""A network of three elements which follows the logic of the Rule 110
cellular automaton with current and previous state ( 0 , 0 , 0 ) .""" | tpm = np . array ( [ [ 0 , 0 , 0 ] , [ 1 , 0 , 1 ] , [ 1 , 1 , 0 ] , [ 1 , 1 , 1 ] , [ 0 , 1 , 1 ] , [ 1 , 1 , 1 ] , [ 1 , 1 , 1 ] , [ 0 , 0 , 0 ] ] )
return Network ( tpm , node_labels = LABELS [ : tpm . shape [ 1 ] ] ) |
def get_sell ( self , account_id , sell_id , ** params ) :
"""https : / / developers . coinbase . com / api / v2 # show - a - sell""" | response = self . _get ( 'v2' , 'accounts' , account_id , 'sells' , sell_id , params = params )
return self . _make_api_object ( response , Sell ) |
def push_broks_to_broker ( self ) : # pragma : no cover - not used !
"""Send all broks from arbiter internal list to broker
The arbiter get some broks and then pushes them to all the brokers .
: return : None""" | someone_is_concerned = False
sent = False
for broker_link in self . conf . brokers : # Send only if the broker is concerned . . .
if not broker_link . manage_arbiters :
continue
someone_is_concerned = True
if broker_link . reachable :
logger . debug ( "Sending %d broks to the broker %s" , len ( self . broks ) , broker_link . name )
if broker_link . push_broks ( self . broks ) :
statsmgr . counter ( 'broks.pushed.count' , len ( self . broks ) )
sent = True
if not someone_is_concerned or sent : # No one is anymore interested with . . .
del self . broks [ : ] |
def get_stackdelta ( op ) :
"""Returns the number of elements that the instruction * op * adds to the stack .
# Arguments
op ( dis . Instruction ) : The instruction to retrieve the stackdelta value for .
# Raises
KeyError : If the instruction * op * is not supported .""" | res = opstackd [ op . opname ]
if callable ( res ) :
res = res ( op )
return res |
def verify_token ( id_token , request , audience = None , certs_url = _GOOGLE_OAUTH2_CERTS_URL ) :
"""Verifies an ID token and returns the decoded token .
Args :
id _ token ( Union [ str , bytes ] ) : The encoded token .
request ( google . auth . transport . Request ) : The object used to make
HTTP requests .
audience ( str ) : The audience that this token is intended for . If None
then the audience is not verified .
certs _ url ( str ) : The URL that specifies the certificates to use to
verify the token . This URL should return JSON in the format of
` ` { ' key id ' : ' x509 certificate ' } ` ` .
Returns :
Mapping [ str , Any ] : The decoded token .""" | certs = _fetch_certs ( request , certs_url )
return jwt . decode ( id_token , certs = certs , audience = audience ) |
def set_exclude_replies ( self , exclude ) :
"""Sets ' exclude _ replies ' parameter used to prevent replies from appearing in the returned timeline
: param exclude : Boolean triggering the usage of the parameter
: raises : TwitterSearchException""" | if not isinstance ( exclude , bool ) :
raise TwitterSearchException ( 1008 )
self . arguments . update ( { 'exclude_replies' : 'true' if exclude else 'false' } ) |
def _find_assert_stmt ( filename , linenumber , leading = 1 , following = 2 , module_globals = None ) :
'''Given a Python module name , filename and line number , find
the lines that are part of the statement containing that line .
Python stacktraces , when reporting which line they ' re on , always
show the last line of the statement . This can be confusing if
the statement spans multiple lines . This function helps
reconstruct the whole statement , and is used by
: meth : ` marbles . core . ContextualAssertionError . assert _ stmt ` .
Returns a tuple of the range of lines spanned by the source
being returned , the number of the line on which the interesting
statement starts .
We may need the ` ` module _ globals ` ` in order to tell
: mod : ` linecache ` how to find the file , if it comes from inside
an egg . In that case , ` ` module _ globals ` ` should contain a key
` ` _ _ loader _ _ ` ` which knows how to read from that file .''' | lines = linecache . getlines ( filename , module_globals = module_globals )
_source = '' . join ( lines )
_tree = ast . parse ( _source )
finder = _StatementFinder ( linenumber )
finder . visit ( _tree )
line_range = range ( finder . found - leading , linenumber + following )
return line_range , finder . found |
def get_connection ( self , * args , ** kwargs ) :
"""Ensure assert _ hostname is set correctly on our pool
We already take care of a normal poolmanager via init _ poolmanager
But we still need to take care of when there is a proxy poolmanager""" | conn = super ( SSLHTTPAdapter , self ) . get_connection ( * args , ** kwargs )
if conn . assert_hostname != self . assert_hostname :
conn . assert_hostname = self . assert_hostname
return conn |
def decompress_amount ( x ) :
"""Undo the value compression performed by x = compress _ amount ( n ) . The input
x matches one of the following patterns :
x = n = 0
x = 1 + 10 * ( 9 * n + d - 1 ) + e
x = 1 + 10 * ( n - 1 ) + 9""" | if not x :
return 0 ;
x = x - 1 ;
# x = 10 * ( 9 * n + d - 1 ) + e
x , e = divmod ( x , 10 ) ;
n = 0 ;
if e < 9 : # x = 9 * n + d - 1
x , d = divmod ( x , 9 )
d = d + 1
# x = n
n = x * 10 + d
else :
n = x + 1
return n * 10 ** e |
def failover ( self , name ) :
"""Force a failover of a named master .""" | fut = self . execute ( b'FAILOVER' , name )
return wait_ok ( fut ) |
def import_name ( mod_name ) :
"""Import a module by module name .
@ param mod _ name : module name .""" | try :
mod_obj_old = sys . modules [ mod_name ]
except KeyError :
mod_obj_old = None
if mod_obj_old is not None :
return mod_obj_old
__import__ ( mod_name )
mod_obj = sys . modules [ mod_name ]
return mod_obj |
def make_bernstein_vazirani_circuit ( input_qubits , output_qubit , oracle ) :
"""Solves for factors in f ( a ) = a · factors + bias ( mod 2 ) with one query .""" | c = cirq . Circuit ( )
# Initialize qubits .
c . append ( [ cirq . X ( output_qubit ) , cirq . H ( output_qubit ) , cirq . H . on_each ( * input_qubits ) , ] )
# Query oracle .
c . append ( oracle )
# Measure in X basis .
c . append ( [ cirq . H . on_each ( * input_qubits ) , cirq . measure ( * input_qubits , key = 'result' ) ] )
return c |
def post ( self , request , * args , ** kwargs ) :
"""The only circumstances when we POST is to submit the main form , both
updating translations ( if any changed ) and advancing to the next page of
messages .
There is no notion of validation of this content ; as implemented , unknown
fields are ignored and a generic failure message is shown .
Submitted changes are saved out to the specified . po file on the
filesystem if that file is writable , otherwise the cached version of the
file is updated ( so it can be downloaded ) . Then the user is redirected
to the next page of messages ( if there is one ; otherwise they ' re
redirected back to the current page ) .""" | # The message text inputs are captured as hashes of their initial
# contents , preceded by " m _ " . Messages with plurals end with their
# variation number .
single_text_input_regex = re . compile ( r'^m_([0-9a-f]+)$' )
plural_text_input_regex = re . compile ( r'^m_([0-9a-f]+)_([0-9]+)$' )
file_change = False
for field_name , new_msgstr in request . POST . items ( ) :
md5hash = None
if plural_text_input_regex . match ( field_name ) :
md5hash , plural_id = plural_text_input_regex . match ( field_name ) . groups ( )
md5hash = str ( md5hash )
# polib parses . po files into unicode strings , but
# doesn ' t bother to convert plural indexes to int ,
# so we need unicode here .
plural_id = six . text_type ( plural_id )
# Above no longer true as of Polib 1.0.4
if plural_id and plural_id . isdigit ( ) :
plural_id = int ( plural_id )
elif single_text_input_regex . match ( field_name ) :
md5hash = str ( single_text_input_regex . match ( field_name ) . groups ( ) [ 0 ] )
plural_id = None
if md5hash is not None : # Empty string should be processed !
entry = self . po_file . find ( md5hash , 'md5hash' )
# If someone did a makemessage , some entries might
# have been removed , so we need to check .
if entry :
old_msgstr = entry . msgstr
if plural_id is not None : # 0 is ok !
entry . msgstr_plural [ plural_id ] = self . fix_nls ( entry . msgid_plural , new_msgstr )
else :
entry . msgstr = self . fix_nls ( entry . msgid , new_msgstr )
is_fuzzy = bool ( self . request . POST . get ( 'f_%s' % md5hash , False ) )
old_fuzzy = 'fuzzy' in entry . flags
if old_fuzzy and not is_fuzzy :
entry . flags . remove ( 'fuzzy' )
elif not old_fuzzy and is_fuzzy :
entry . flags . append ( 'fuzzy' )
file_change = True
if old_msgstr != new_msgstr or old_fuzzy != is_fuzzy :
entry_changed . send ( sender = entry , user = request . user , old_msgstr = old_msgstr , old_fuzzy = old_fuzzy , pofile = self . po_file_path , language_code = self . language_id , )
else :
messages . error ( self . request , _ ( "Some items in your last translation block couldn't " "be saved: this usually happens when the catalog file " "changes on disk after you last loaded it." ) , )
if file_change and self . po_file_is_writable :
try :
self . po_file . metadata [ 'Last-Translator' ] = unicodedata . normalize ( 'NFKD' , u"%s %s <%s>" % ( getattr ( self . request . user , 'first_name' , 'Anonymous' ) , getattr ( self . request . user , 'last_name' , 'User' ) , getattr ( self . request . user , 'email' , 'anonymous@user.tld' ) ) ) . encode ( 'ascii' , 'ignore' )
self . po_file . metadata [ 'X-Translated-Using' ] = u"django-rosetta %s" % ( get_rosetta_version ( ) )
self . po_file . metadata [ 'PO-Revision-Date' ] = timestamp_with_timezone ( )
except UnicodeDecodeError :
pass
try :
self . po_file . save ( )
po_filepath , ext = os . path . splitext ( self . po_file_path )
if rosetta_settings . AUTO_COMPILE :
self . po_file . save_as_mofile ( po_filepath + '.mo' )
post_save . send ( sender = None , language_code = self . language_id , request = self . request )
# Try auto - reloading via the WSGI daemon mode reload mechanism
should_try_wsgi_reload = ( rosetta_settings . WSGI_AUTO_RELOAD and 'mod_wsgi.process_group' in self . request . environ and self . request . environ . get ( 'mod_wsgi.process_group' , None ) and 'SCRIPT_FILENAME' in self . request . environ and int ( self . request . environ . get ( 'mod_wsgi.script_reloading' , 0 ) ) )
if should_try_wsgi_reload :
try :
os . utime ( self . request . environ . get ( 'SCRIPT_FILENAME' ) , None )
except OSError :
pass
# Try auto - reloading via uwsgi daemon reload mechanism
if rosetta_settings . UWSGI_AUTO_RELOAD :
try :
import uwsgi
uwsgi . reload ( )
# pretty easy right ?
except :
pass
# we may not be running under uwsgi : P
# XXX : It would be nice to add a success message here !
except Exception as e :
messages . error ( self . request , e )
if file_change and not self . po_file_is_writable :
storage = get_storage ( self . request )
storage . set ( self . po_file_cache_key , self . po_file )
# Reconstitute url to redirect to . Start with determining whether the
# page number can be incremented .
paginator = Paginator ( self . get_entries ( ) , rosetta_settings . MESSAGES_PER_PAGE )
try :
page = int ( self . _request_request ( 'page' , 1 ) )
except ValueError :
page = 1
# fall back to page 1
else :
if not ( 0 < page <= paginator . num_pages ) :
page = 1
if page < paginator . num_pages :
page += 1
query_string_args = { 'msg_filter' : self . msg_filter , 'query' : self . query , 'ref_lang' : self . ref_lang , 'page' : page , }
# Winnow down the query string args to non - blank ones
query_string_args = { k : v for k , v in query_string_args . items ( ) if v }
return HttpResponseRedirect ( "{url}?{qs}" . format ( url = reverse ( 'rosetta-form' , kwargs = self . kwargs ) , qs = urlencode_safe ( query_string_args ) ) ) |
def rename_dimension ( x , old_name , new_name ) :
"""Reshape a Tensor , renaming one dimension .
Args :
x : a Tensor
old _ name : a string
new _ name : a string
Returns :
a Tensor""" | return reshape ( x , x . shape . rename_dimension ( old_name , new_name ) ) |
def hid_device_path_exists ( device_path , guid = None ) :
"""Test if required device _ path is still valid
( HID device connected to host )""" | # expecing HID devices
if not guid :
guid = winapi . GetHidGuid ( )
info_data = winapi . SP_DEVINFO_DATA ( )
info_data . cb_size = sizeof ( winapi . SP_DEVINFO_DATA )
with winapi . DeviceInterfaceSetInfo ( guid ) as h_info :
for interface_data in winapi . enum_device_interfaces ( h_info , guid ) :
test_device_path = winapi . get_device_path ( h_info , interface_data , byref ( info_data ) )
if test_device_path == device_path :
return True
# Not any device now with that path
return False |
def set_title ( self , table = None , title = None , verbose = None ) :
"""Changes the visible identifier of a single table .
: param table ( string , optional ) : Specifies a table by table name . If the pr
efix SUID : is used , the table corresponding the SUID will be returne
: param title ( string , optional ) : The name of the table used in the current
network""" | PARAMS = set_param ( [ 'table' , 'title' ] , [ table , title ] )
response = api ( url = self . __url + "/set title" , PARAMS = PARAMS , method = "POST" , verbose = verbose )
return response |
def validate_schema ( sconf ) :
"""Return True if config schema is correct .
Parameters
sconf : dict
session configuration
Returns
bool""" | # verify session _ name
if 'session_name' not in sconf :
raise exc . ConfigError ( 'config requires "session_name"' )
if 'windows' not in sconf :
raise exc . ConfigError ( 'config requires list of "windows"' )
for window in sconf [ 'windows' ] :
if 'window_name' not in window :
raise exc . ConfigError ( 'config window is missing "window_name"' )
if 'panes' not in window :
raise exc . ConfigError ( 'config window %s requires list of panes' % window [ 'window_name' ] )
return True |
def loadJSON ( self , jdata ) :
"""Initializes the information for this class from the given JSON data blob .
: param jdata : < dict >""" | # required params
self . __name = jdata [ 'name' ]
self . __field = jdata [ 'field' ]
# optional fields
self . __display = jdata . get ( 'display' ) or self . __display
self . __flags = jdata . get ( 'flags' ) or self . __flags
self . __defaultOrder = jdata . get ( 'defaultOrder' ) or self . __defaultOrder
self . __default = jdata . get ( 'default' ) or self . __default |
def service_timeouts ( self ) :
"""run callbacks on all expired timers
Called from the event thread
: return : next end time , or None""" | queue = self . _queue
if self . _new_timers :
new_timers = self . _new_timers
while new_timers :
heappush ( queue , new_timers . pop ( ) )
if queue :
now = time . time ( )
while queue :
try :
timer = queue [ 0 ] [ 1 ]
if timer . finish ( now ) :
heappop ( queue )
else :
return timer . end
except Exception :
log . exception ( "Exception while servicing timeout callback: " ) |
def add_paths ( paths , base_path , operations ) :
"""Add paths to swagger .""" | for operation , ns , rule , func in operations :
path = build_path ( operation , ns )
if not path . startswith ( base_path ) :
continue
method = operation . value . method . lower ( )
# If there is no version number or prefix , we ' d expect the base path to be " "
# However , OpenAPI requires the minimal base path to be " / "
# This means we need branching logic for that special case
suffix_start = 0 if len ( base_path ) == 1 else len ( base_path )
paths . setdefault ( path [ suffix_start : ] , swagger . PathItem ( ) , ) [ method ] = build_operation ( operation , ns , rule , func ) |
def _analyze ( self ) :
'''Run - once function to generate analysis over all series , considering both full and partial data .
Initializes the self . analysis dict which maps :
( non - reference ) column / series - > ' full ' and / or ' partial ' - > stats dict returned by get _ xy _ dataset _ statistics''' | if not self . analysis :
for dseries in self . data_series : # Count number of non - NaN rows
dseries_count = self . df [ dseries ] . count ( )
assert ( len ( self . df_pruned ) <= dseries_count <= len ( self . df ) or dseries_count )
self . analysis [ dseries ] = dict ( partial = None , full = None , )
# Compute the statistics for the common records
stats = get_xy_dataset_statistics_pandas ( self . df_pruned , self . reference_series , dseries , fcorrect_x_cutoff = 1.0 , fcorrect_y_cutoff = 1.0 , bootstrap_data = False , x_fuzzy_range = 0.1 , y_scalar = 1.0 , ignore_null_values = True )
if ( len ( self . df_pruned ) == len ( self . df ) ) : # There are no pruned records so these are actually the full stats
self . analysis [ dseries ] [ 'full' ] = dict ( data = stats , description = format_stats ( stats , floating_point_format = '%0.3f' , sci_notation_format = '%.2E' , return_string = True ) )
else : # Store the results for the partial dataset
self . analysis [ dseries ] [ 'partial' ] = dict ( data = stats , description = format_stats ( stats , floating_point_format = '%0.3f' , sci_notation_format = '%.2E' , return_string = True ) )
if dseries_count > len ( self . df_pruned ) : # This dataset has records which are not in the pruned dataset
stats = get_xy_dataset_statistics_pandas ( self . df , self . reference_series , dseries , fcorrect_x_cutoff = 1.0 , fcorrect_y_cutoff = 1.0 , bootstrap_data = False , x_fuzzy_range = 0.1 , y_scalar = 1.0 , ignore_null_values = True )
self . analysis [ dseries ] [ 'full' ] = dict ( data = stats , description = format_stats ( stats , floating_point_format = '%0.3f' , sci_notation_format = '%.2E' , return_string = True ) )
return self . analysis |
def _extract_cell ( args , cell_body ) :
"""Implements the BigQuery extract magic used to extract query or table data to GCS .
The supported syntax is :
% bq extract < args >
Args :
args : the arguments following ' % bigquery extract ' .""" | env = google . datalab . utils . commands . notebook_environment ( )
config = google . datalab . utils . commands . parse_config ( cell_body , env , False ) or { }
parameters = config . get ( 'parameters' )
if args [ 'table' ] :
table = google . datalab . bigquery . Query . resolve_parameters ( args [ 'table' ] , parameters )
source = _get_table ( table )
if not source :
raise Exception ( 'Could not find table %s' % table )
csv_delimiter = args [ 'delimiter' ] if args [ 'format' ] == 'csv' else None
path = google . datalab . bigquery . Query . resolve_parameters ( args [ 'path' ] , parameters )
job = source . extract ( path , format = args [ 'format' ] , csv_delimiter = csv_delimiter , csv_header = args [ 'header' ] , compress = args [ 'compress' ] )
elif args [ 'query' ] or args [ 'view' ] :
source_name = args [ 'view' ] or args [ 'query' ]
source = google . datalab . utils . commands . get_notebook_item ( source_name )
if not source :
raise Exception ( 'Could not find ' + ( 'view ' + args [ 'view' ] if args [ 'view' ] else 'query ' + args [ 'query' ] ) )
query = source if args [ 'query' ] else bigquery . Query . from_view ( source )
query_params = get_query_parameters ( args , cell_body ) if args [ 'query' ] else None
output_options = QueryOutput . file ( path = args [ 'path' ] , format = args [ 'format' ] , csv_delimiter = args [ 'delimiter' ] , csv_header = args [ 'header' ] , compress = args [ 'compress' ] , use_cache = not args [ 'nocache' ] )
context = google . datalab . utils . _utils . _construct_context_for_args ( args )
job = query . execute ( output_options , context = context , query_params = query_params )
else :
raise Exception ( 'A query, table, or view is needed to extract' )
if job . failed :
raise Exception ( 'Extract failed: %s' % str ( job . fatal_error ) )
elif job . errors :
raise Exception ( 'Extract completed with errors: %s' % str ( job . errors ) )
return job . result ( ) |
def main ( ) :
"""Small run usage exemple""" | # TODO : need to be mv in . rst doc
from reliure . pipeline import Composable
@ Composable
def doc_analyse ( docs ) :
for doc in docs :
yield { "title" : doc , "url" : "http://lost.com/%s" % doc , }
@ Composable
def print_ulrs ( docs ) :
for doc in docs :
print ( doc [ "url" ] )
yield doc
pipeline = doc_analyse | print_ulrs
documents = ( "doc_%s" % d for d in xrange ( 20 ) )
res = run_parallel ( pipeline , documents , ncpu = 2 , chunksize = 5 )
print ( res ) |
def right_join ( self , table , one = None , operator = None , two = None ) :
"""Add a right join to the query
: param table : The table to join with , can also be a JoinClause instance
: type table : str or JoinClause
: param one : The first column of the join condition
: type one : str
: param operator : The operator of the join condition
: type operator : str
: param two : The second column of the join condition
: type two : str
: return : The current QueryBuilder instance
: rtype : QueryBuilder""" | if isinstance ( table , JoinClause ) :
table . type = "right"
return self . join ( table , one , operator , two , "right" ) |
def _makeBaseDir ( basedir , quiet ) :
"""Make worker base directory if needed .
@ param basedir : worker base directory relative path
@ param quiet : if True , don ' t print info messages
@ raise CreateWorkerError : on error making base directory""" | if os . path . exists ( basedir ) :
if not quiet :
print ( "updating existing installation" )
return
if not quiet :
print ( "mkdir" , basedir )
try :
os . mkdir ( basedir )
except OSError as exception :
raise CreateWorkerError ( "error creating directory {0}: {1}" . format ( basedir , exception . strerror ) ) |
def release ( self ) :
"""Release the lock .""" | if self . is_locked_by_me ( ) :
os . remove ( self . lock_filename )
logger . debug ( 'The lock {} is released by me (pid: {}).' . format ( self . lock_filename , self . pid ) )
if self . fd :
os . close ( self . fd )
self . fd = None |
def __tokenize_segments ( self ) :
"""tokenizes every RS3 segment ( i . e . an RST nucleus or satellite ) .
for each token , a node is added to the graph , as well as an edge from
the segment node to the token node . the token node IDs are also added
to ` ` self . tokens ` ` .""" | for seg_node_id in self . segments :
segment_toks = self . node [ seg_node_id ] [ self . ns + ':text' ] . split ( )
for i , tok in enumerate ( segment_toks ) :
tok_node_id = '{0}:{1}_{2}' . format ( self . ns , seg_node_id , i )
self . add_node ( tok_node_id , layers = { self . ns , self . ns + ':token' } , attr_dict = { self . ns + ':token' : tok , 'label' : tok } )
self . tokens . append ( tok_node_id )
self . add_edge ( seg_node_id , tok_node_id , layers = { 'rst' , 'rst:token' } , edge_type = EdgeTypes . spanning_relation ) |
def is_resource_class_resource_attribute ( rc , attr_name ) :
"""Checks if the given attribute name is a resource attribute ( i . e . , either
a member or a collection attribute ) of the given registered resource .""" | attr = get_resource_class_attribute ( rc , attr_name )
return attr != RESOURCE_ATTRIBUTE_KINDS . TERMINAL |
def setup_model ( x , y , model_type = 'random_forest' , seed = None , ** kwargs ) :
"""Initializes a machine learning model
Args :
x : Pandas DataFrame , X axis of features
y : Pandas Series , Y axis of targets
model _ type : Machine Learning model to use
Valid values : ' random _ forest '
seed : Random state to use when splitting sets and creating the model
* * kwargs : Scikit Learn ' s RandomForestClassifier kwargs
Returns :
Trained model instance of model _ type""" | assert len ( x ) > 1 and len ( y ) > 1 , 'Not enough data objects to train on (minimum is at least two, you have (x: {0}) and (y: {1}))' . format ( len ( x ) , len ( y ) )
sets = namedtuple ( 'Datasets' , [ 'train' , 'test' ] )
x_train , x_test , y_train , y_test = train_test_split ( x , y , random_state = seed , shuffle = False )
x = sets ( x_train , x_test )
y = sets ( y_train , y_test )
if model_type == 'random_forest' or model_type == 'rf' :
model = rf . RandomForest ( x , y , random_state = seed , ** kwargs )
elif model_type == 'deep_neural_network' or model_type == 'dnn' :
model = dnn . DeepNeuralNetwork ( x , y , ** kwargs )
else :
raise ValueError ( 'Invalid model type kwarg' )
return model |
def aromatize ( self ) :
"""convert structure to aromatic form
: return : number of processed rings""" | rings = [ x for x in self . sssr if 4 < len ( x ) < 7 ]
if not rings :
return 0
total = 0
while True :
c = self . _quinonize ( rings , 'order' )
if c :
total += c
elif total :
break
c = self . _aromatize ( rings , 'order' )
if not c :
break
total += c
if total :
self . flush_cache ( )
return total |
def union_setadd ( dict1 , dict2 ) :
"""Similar to dictlib . union ( ) , but following a setadd logic ( with strings and ints ) ,
and a union ( with dictionaries ) . Assumption is that all elements of the list
are of the same type ( i . e . if first element is a dict , it tries to union all
elements )
NOT data safe , it mangles both dict1 and dict2
> > > a = dict ( a = [ { " b " : 1 , " c " : 2 } , { " a " : 1 } ] , b = dict ( z = dict ( y = 1 ) ) , e = [ 1 ] )
> > > b = dict ( a = [ { " b " : 1 , " d " : 3 } ] , b = dict ( z = dict ( y = - 1 ) ) , e = [ 1,2 ] )
> > > # sorted json so that it is predictably the same
> > > import json
> > > json . dumps ( union _ setadd ( a , b ) , sort _ keys = True )
' { " a " : [ { " b " : 1 , " c " : 2 , " d " : 3 } , { " a " : 1 } ] , " b " : { " z " : { " y " : - 1 } } , " e " : [ 1 , 2 ] } '
> > > a [ ' a ' ] [ 0 ] [ " d " ] = 4
> > > json . dumps ( b , sort _ keys = True )
' { " a " : [ { " b " : 1 , " d " : 3 } ] , " b " : { " z " : { " y " : - 1 } } , " e " : [ 1 , 2 ] } '
> > > json . dumps ( a , sort _ keys = True )
' { " a " : [ { " b " : 1 , " c " : 2 , " d " : 4 } , { " a " : 1 } ] , " b " : { " z " : { " y " : - 1 } } , " e " : [ 1 , 2 ] } '""" | for key2 , val2 in dict2 . items ( ) : # if key is in both places , do a union
if key2 in dict1 : # if dict2 val2 is a dict , assume dict1 val2 is as well
if isinstance ( val2 , dict ) :
dict1 [ key2 ] = union_setadd ( dict1 [ key2 ] , val2 )
# if dict2 val2 is a list , things get uglier
elif isinstance ( val2 , list ) :
val1 = dict1 [ key2 ]
# both dict1 / dict2 need to be lists
if not isinstance ( val1 , list ) :
raise TypeError ( "dict1[{}] is not a list where dict2[{}] is." . format ( key2 , key2 ) )
# ignore zero length val2 ( string or list )
if not len ( val2 ) :
continue
# if val2 ' s first element is a dict , assume they are all dicts
if isinstance ( val2 [ 0 ] , dict ) :
for xelem in range ( 0 , len ( val2 ) ) :
if xelem < len ( val1 ) :
val1 [ xelem ] = union_setadd ( val1 [ xelem ] , val2 [ xelem ] )
else :
val1 . append ( val2 [ xelem ] )
# otherwise just setadd the elements by value ; order can get wonky
else :
for elem in val2 :
if elem not in val1 : # inefficient
val1 . append ( elem )
dict1 [ key2 ] = val1
# any other type : just assign
else :
dict1 [ key2 ] = val2
# or just define it
else :
dict1 [ key2 ] = val2
return dict1 |
def split_option ( self , section , option ) :
"""Return list of strings that are made by splitting coma - separated option value . Method returns
empty list if option value is empty string
: param section : option section name
: param option : option name
: return : list of strings""" | value = self [ section ] [ option ] . strip ( )
if value == "" :
return [ ]
return [ x . strip ( ) for x in ( value . split ( "," ) ) ] |
def set_libs_flags ( self , env , arch ) :
'''Takes care to properly link libraries with python depending on our
requirements and the attribute : attr : ` opt _ depends ` .''' | def add_flags ( include_flags , link_dirs , link_libs ) :
env [ 'CPPFLAGS' ] = env . get ( 'CPPFLAGS' , '' ) + include_flags
env [ 'LDFLAGS' ] = env . get ( 'LDFLAGS' , '' ) + link_dirs
env [ 'LIBS' ] = env . get ( 'LIBS' , '' ) + link_libs
if 'sqlite3' in self . ctx . recipe_build_order :
info ( 'Activating flags for sqlite3' )
recipe = Recipe . get_recipe ( 'sqlite3' , self . ctx )
add_flags ( ' -I' + recipe . get_build_dir ( arch . arch ) , ' -L' + recipe . get_lib_dir ( arch ) , ' -lsqlite3' )
if 'libffi' in self . ctx . recipe_build_order :
info ( 'Activating flags for libffi' )
recipe = Recipe . get_recipe ( 'libffi' , self . ctx )
# In order to force the correct linkage for our libffi library , we
# set the following variable to point where is our libffi . pc file ,
# because the python build system uses pkg - config to configure it .
env [ 'PKG_CONFIG_PATH' ] = recipe . get_build_dir ( arch . arch )
add_flags ( ' -I' + ' -I' . join ( recipe . get_include_dirs ( arch ) ) , ' -L' + join ( recipe . get_build_dir ( arch . arch ) , '.libs' ) , ' -lffi' )
if 'openssl' in self . ctx . recipe_build_order :
info ( 'Activating flags for openssl' )
recipe = Recipe . get_recipe ( 'openssl' , self . ctx )
add_flags ( recipe . include_flags ( arch ) , recipe . link_dirs_flags ( arch ) , recipe . link_libs_flags ( ) )
return env |
def initialize_service_agreement ( did , agreement_id , service_definition_id , signature , account_address , consume_endpoint ) :
"""Send a request to the service provider ( consume _ endpoint ) to initialize the service
agreement for the asset identified by ` did ` .
: param did : id of the asset includes the ` did : op : ` prefix , str
: param agreement _ id : id of the agreement , hex str
: param service _ definition _ id : identifier of the service inside the asset DDO , str
: param signature : signed agreement hash , hex str
: param account _ address : ethereum address of the consumer signing this agreement , hex str
: param consume _ endpoint : url of the service provider , str
: return : bool""" | payload = Brizo . _prepare_consume_payload ( did , agreement_id , service_definition_id , signature , account_address )
response = Brizo . _http_client . post ( consume_endpoint , data = payload , headers = { 'content-type' : 'application/json' } )
if response and hasattr ( response , 'status_code' ) :
if response . status_code != 201 :
msg = ( f'Initialize service agreement failed at the consumeEndpoint ' f'{consume_endpoint}, reason {response.text}, status {response.status_code}' )
logger . error ( msg )
raise OceanInitializeServiceAgreementError ( msg )
logger . info ( f'Service agreement initialized successfully, service agreement id {agreement_id},' f' consumeEndpoint {consume_endpoint}' )
return True |
def numpy ( self , single_components = False ) :
"""Get a numpy array copy representing the underlying image data . Altering
this ndarray will have NO effect on the underlying image data .
Arguments
single _ components : boolean ( default is False )
if True , keep the extra component dimension in returned array even
if image only has one component ( i . e . self . has _ components = = False )
Returns
ndarray""" | array = np . array ( self . view ( single_components = single_components ) , copy = True , dtype = self . dtype )
if self . has_components or ( single_components == True ) :
array = np . rollaxis ( array , 0 , self . dimension + 1 )
return array |
def clean_str ( string ) :
"""Tokenization / string cleaning for all datasets except for SST .
Original taken from https : / / github . com / yoonkim / CNN _ sentence / blob / master / process _ data . py""" | string = re . sub ( r"[^A-Za-z0-9(),!?\'\`]" , " " , string )
string = re . sub ( r"\'s" , " \'s" , string )
string = re . sub ( r"\'ve" , " \'ve" , string )
string = re . sub ( r"n\'t" , " n\'t" , string )
string = re . sub ( r"\'re" , " \'re" , string )
string = re . sub ( r"\'d" , " \'d" , string )
string = re . sub ( r"\'ll" , " \'ll" , string )
string = re . sub ( r"," , " , " , string )
string = re . sub ( r"!" , " ! " , string )
string = re . sub ( r"\(" , r" \( " , string )
string = re . sub ( r"\)" , r" \) " , string )
string = re . sub ( r"\?" , r" \? " , string )
string = re . sub ( r"\s{2,}" , " " , string )
return string . strip ( ) . lower ( ) |
def check_config_mode ( self , check_string = "" , pattern = "" ) :
"""Checks if the device is in configuration mode or not .
: param check _ string : Identification of configuration mode from the device
: type check _ string : str
: param pattern : Pattern to terminate reading of channel
: type pattern : str""" | self . write_channel ( self . RETURN )
# You can encounter an issue here ( on router name changes ) prefer delay - based solution
if not pattern :
output = self . _read_channel_timing ( )
else :
output = self . read_until_pattern ( pattern = pattern )
return check_string in output |
def is_monotonic ( full_list ) :
"""Determine whether elements in a list are monotonic . ie . unique
elements are clustered together .
ie . [ 5,5,3,4 ] is , [ 5,3,5 ] is not .""" | prev_elements = set ( { full_list [ 0 ] } )
prev_item = full_list [ 0 ]
for item in full_list :
if item != prev_item :
if item in prev_elements :
return False
prev_item = item
prev_elements . add ( item )
return True |
def _handle_tag_removeobject2 ( self ) :
"""Handle the RemoveObject2 tag .""" | obj = _make_object ( "RemoveObject2" )
obj . Depth = unpack_ui16 ( self . _src )
return obj |
def loadValues ( self , values ) :
"""Loads the values from the inputed dictionary to the widget .
: param values | < dict >""" | table = self . tableType ( )
if table :
schema = table . schema ( )
else :
schema = None
process = [ ]
for widget in self . findChildren ( QWidget ) :
prop = widget . property ( 'columnName' )
if not prop :
continue
order = widget . property ( 'columnOrder' )
if order :
order = unwrapVariant ( order )
else :
order = 10000000
process . append ( ( order , widget , prop ) )
process . sort ( )
for order , widget , prop in process :
columnName = nativestring ( unwrapVariant ( prop , '' ) )
if not columnName :
continue
if isinstance ( widget , XEnumBox ) and schema :
column = schema . column ( columnName )
if column . enum ( ) is not None :
widget . setEnum ( column . enum ( ) )
if columnName in values :
projexui . setWidgetValue ( widget , values . get ( columnName ) ) |
def _proxy ( self ) :
"""Generate an instance context for the instance , the context is capable of
performing various actions . All instance actions are proxied to the context
: returns : IpAddressContext for this IpAddressInstance
: rtype : twilio . rest . api . v2010 . account . sip . ip _ access _ control _ list . ip _ address . IpAddressContext""" | if self . _context is None :
self . _context = IpAddressContext ( self . _version , account_sid = self . _solution [ 'account_sid' ] , ip_access_control_list_sid = self . _solution [ 'ip_access_control_list_sid' ] , sid = self . _solution [ 'sid' ] , )
return self . _context |
def tokenize ( self : object , untokenized_string : str , include_blanks = False ) :
"""Tokenize lines by ' \n ' .
: type untokenized _ string : str
: param untokenized _ string : A string containing one of more sentences .
: param include _ blanks : Boolean ; If True , blanks will be preserved by " " in returned list of strings ; Default is False .
: rtype : list of strings""" | # load tokenizer
assert isinstance ( untokenized_string , str ) , 'Incoming argument must be a string.'
# make list of tokenized sentences
if include_blanks :
tokenized_lines = untokenized_string . splitlines ( )
else :
tokenized_lines = [ line for line in untokenized_string . splitlines ( ) if line != '' ]
return tokenized_lines |
def auto_str ( __repr__ = False ) :
"""Use this decorator to auto implement _ _ str _ _ ( ) and optionally _ _ repr _ _ ( ) methods on classes .
Args :
_ _ repr _ _ ( bool ) : If set to true , the decorator will auto - implement the _ _ repr _ _ ( ) method as
well .
Returns :
callable : Decorating function .
Note :
There are known issues with self referencing ( self . s = self ) . Recursion will be identified
by the python interpreter and will do no harm , but it will actually not work .
A eval ( class . _ _ repr _ _ ( ) ) will obviously not work , when there are attributes that are not
part of the _ _ init _ _ ' s arguments .
Example :
> > > @ auto _ str ( _ _ repr _ _ = True )
. . . class Demo ( object ) :
. . . def _ _ init _ _ ( self , i = 0 , s = " a " , l = None , t = None ) :
. . . self . i = i
. . . self . s = s
. . . self . l = l
. . . self . t = t
> > > dut = Demo ( 10 , ' abc ' , [ 1 , 2 , 3 ] , ( 1,2,3 ) )
> > > print ( dut . _ _ str _ _ ( ) )
Demo ( i = 10 , l = [ 1 , 2 , 3 ] , s = ' abc ' , t = ( 1 , 2 , 3 ) )
> > > print ( eval ( dut . _ _ repr _ _ ( ) ) . _ _ str _ _ ( ) )
Demo ( i = 10 , l = [ 1 , 2 , 3 ] , s = ' abc ' , t = ( 1 , 2 , 3 ) )
> > > print ( dut . _ _ repr _ _ ( ) )
Demo ( i = 10 , l = [ 1 , 2 , 3 ] , s = ' abc ' , t = ( 1 , 2 , 3 ) )""" | def _decorator ( cls ) :
def __str__ ( self ) :
items = [ "{name}={value}" . format ( name = name , value = vars ( self ) [ name ] . __repr__ ( ) ) for name in [ key for key in sorted ( vars ( self ) ) ] if name not in get_field_mro ( self . __class__ , '__auto_str_ignore__' ) ]
# pylint : disable = bad - continuation
return "{clazz}({items})" . format ( clazz = str ( type ( self ) . __name__ ) , items = ', ' . join ( items ) )
cls . __str__ = __str__
if __repr__ :
cls . __repr__ = __str__
return cls
return _decorator |
def parse ( raw_email ) : # type : ( six . string _ types ) - > Tuple [ six . string _ types , six . string _ types ]
"""Extract email from a full address . Example :
' John Doe < jdoe + github @ foo . com > ' - > jdoe @ foo . com
> > > parse ( " John Doe < me + github . com @ someorg . com " )
( ' me ' , ' someorg . com ' )
> > > parse ( 42 ) # doctest : + IGNORE _ EXCEPTION _ DETAIL
Traceback ( most recent call last ) :
InvalidEmail : ' Invalid email : 42'
> > > parse ( None ) # doctest : + IGNORE _ EXCEPTION _ DETAIL
Traceback ( most recent call last ) :
InvalidEmail : ' None or NaN is not a valid email address '""" | if not isinstance ( raw_email , six . string_types ) :
raise InvalidEmail ( "Invalid email: %s" % raw_email )
if not raw_email or pd . isnull ( raw_email ) :
raise InvalidEmail ( "None or NaN is not a valid email address" )
email = raw_email . split ( "<" , 1 ) [ - 1 ] . split ( ">" , 1 ) [ 0 ]
chunks = email . split ( "@" , 3 )
# git - svn generates emails with several @ , e . g . :
# < rossberg @ chromium . org @ ce2b1a6d - e550-0410 - aec6-3dcde31c8c00 >
if len ( chunks ) < 2 :
raise InvalidEmail ( "Invalid email" )
uname = chunks [ 0 ] . rsplit ( " " , 1 ) [ - 1 ]
addr_domain = chunks [ 1 ] . split ( " " , 1 ) [ 0 ]
return uname . split ( "+" , 1 ) [ 0 ] , addr_domain |
def getCoeffs ( date ) :
""": param gh : list from loadCoeffs
: param date : float
: return : list : g , list : h""" | if date < 1900.0 or date > 2025.0 :
print ( 'This subroutine will not work with a date of ' + str ( date ) )
print ( 'Date must be in the range 1900.0 <= date <= 2025.0' )
print ( 'On return [], []' )
return [ ] , [ ]
elif date >= 2015.0 :
if date > 2020.0 : # not adapt for the model but can calculate
print ( 'This version of the IGRF is intended for use up to 2020.0.' )
print ( 'values for ' + str ( date ) + ' will be computed but may be of reduced accuracy' )
t = date - 2015.0
tc = 1.0
# pointer for last coefficient in pen - ultimate set of MF coefficients . . .
ll = 3060
nmx = 13
nc = nmx * ( nmx + 2 )
else :
t = 0.2 * ( date - 1900.0 )
ll = int ( t )
t = t - ll
# SH models before 1995.0 are only to degree 10
if date < 1995.0 :
nmx = 10
nc = nmx * ( nmx + 2 )
ll = nc * ll
else :
nmx = 13
nc = nmx * ( nmx + 2 )
ll = round ( 0.2 * ( date - 1995.0 ) )
# 19 is the number of SH models that extend to degree 10
ll = 120 * 19 + nc * ll
tc = 1.0 - t
g , h = [ ] , [ ]
temp = ll - 1
for n in range ( nmx + 1 ) :
g . append ( [ ] )
h . append ( [ ] )
if n == 0 :
g [ 0 ] . append ( None )
for m in range ( n + 1 ) :
if m != 0 :
g [ n ] . append ( tc * gh [ temp ] + t * gh [ temp + nc ] )
h [ n ] . append ( tc * gh [ temp + 1 ] + t * gh [ temp + nc + 1 ] )
temp += 2
# print ( n , m , g [ n ] [ m ] , h [ n ] [ m ] )
else :
g [ n ] . append ( tc * gh [ temp ] + t * gh [ temp + nc ] )
h [ n ] . append ( None )
temp += 1
# print ( n , m , g [ n ] [ m ] , h [ n ] [ m ] )
return g , h |
def woe ( df , feature_name , target_name ) :
"""Calculate weight of evidence .
Parameters
df : Dataframe
feature _ name : str
Column name to encode .
target _ name : str
Target column name .
Returns
Series""" | def group_woe ( group ) :
event = float ( group . sum ( ) )
non_event = group . shape [ 0 ] - event
rel_event = event / event_total
rel_non_event = non_event / non_event_total
return np . log ( rel_non_event / rel_event ) * 100
if df [ target_name ] . nunique ( ) > 2 :
raise ValueError ( 'Target column should be binary (1/0).' )
event_total = float ( df [ df [ target_name ] == 1.0 ] . shape [ 0 ] )
non_event_total = float ( df . shape [ 0 ] - event_total )
woe_vals = df . groupby ( feature_name ) [ target_name ] . transform ( group_woe )
return woe_vals |
async def become ( self , layer_type : Type [ L ] , request : 'Request' ) :
"""Transforms the translatable string into an actual string and put it
inside a RawText .""" | if layer_type != RawText :
super ( Text , self ) . become ( layer_type , request )
return RawText ( await render ( self . text , request ) ) |
def partition_all ( s , sep ) :
"""Uses str . partition ( ) to split every occurrence of sep in s . The returned list does not contain empty strings .
If sep is a list , all separators are evaluated .
: param s : The string to split .
: param sep : A separator string or a list of separator strings .
: return : A list of parts split by sep""" | if isinstance ( sep , list ) :
parts = _partition_all_internal ( s , sep [ 0 ] )
sep = sep [ 1 : ]
for s in sep :
tmp = [ ]
for p in parts :
tmp . extend ( _partition_all_internal ( p , s ) )
parts = tmp
return parts
else :
return _partition_all_internal ( s , sep ) |
def _request ( self , msg_type , msg_data = None ) :
"""Helper function to wrap msg w / msg _ type .""" | msg = { }
msg [ 'type' ] = msg_type
if msg_data :
msg [ 'data' ] = msg_data
done = False
tries = 0
while not done and tries < MAX_RETRIES :
try :
MessageSocket . send ( self , self . sock , msg )
done = True
except socket . error as e :
tries += 1
if tries >= MAX_RETRIES :
raise
print ( "Socket error: {}" . format ( e ) )
self . sock . close ( )
self . sock = socket . socket ( socket . AF_INET , socket . SOCK_STREAM )
self . sock . connect ( self . server_addr )
logging . debug ( "sent: {0}" . format ( msg ) )
resp = MessageSocket . receive ( self , self . sock )
logging . debug ( "received: {0}" . format ( resp ) )
return resp |
def _wait_until ( obj , att , desired , callback , interval , attempts , verbose , verbose_atts ) :
"""Loops until either the desired value of the attribute is reached , or the
number of attempts is exceeded .""" | if not isinstance ( desired , ( list , tuple ) ) :
desired = [ desired ]
if verbose_atts is None :
verbose_atts = [ ]
if not isinstance ( verbose_atts , ( list , tuple ) ) :
verbose_atts = [ verbose_atts ]
infinite = ( attempts == 0 )
attempt = 0
start = time . time ( )
while infinite or ( attempt < attempts ) :
try : # For servers :
obj . get ( )
except AttributeError :
try : # For other objects that don ' t support . get ( )
obj = obj . manager . get ( obj . id )
except AttributeError : # punt
raise exc . NoReloadError ( "The 'wait_until' method is not " "supported for '%s' objects." % obj . __class__ )
attval = getattr ( obj , att )
if verbose :
elapsed = time . time ( ) - start
msgs = [ "Current value of %s: %s (elapsed: %4.1f seconds)" % ( att , attval , elapsed ) ]
for vatt in verbose_atts :
vattval = getattr ( obj , vatt , None )
msgs . append ( "%s=%s" % ( vatt , vattval ) )
print ( " " . join ( msgs ) )
if attval in desired :
return obj
time . sleep ( interval )
attempt += 1
return obj |
def touch ( fname , times = None ) :
'''Emulates the UNIX touch command .''' | with io . open ( fname , 'a' ) :
os . utime ( fname , times ) |
def parsebam ( self ) :
"""Parse the dictionaries of the sorted bam files extracted using pysam""" | # Threading is actually the worst - need multiprocessing to make this work at all
logging . info ( 'Parsing BAM files' )
# The sample objects are too big to get pickled . To hack our way around this , try to dump the sample object to
# json , and have the processing function turn the object into a dictionary .
json_files = list ( )
with tempfile . TemporaryDirectory ( ) as tmpdir :
best_assemblies = list ( )
sample_names = list ( )
for sample in self . runmetadata :
json_name = os . path . join ( tmpdir , '{sn}.json' . format ( sn = sample . name ) )
best_assemblies . append ( sample . general . bestassemblyfile )
sample_names . append ( sample . name )
with open ( json_name , 'w' ) as f :
json . dump ( sample [ self . analysistype ] . dump ( ) , f , sort_keys = True , indent = 4 )
json_files . append ( json_name )
p = multiprocessing . Pool ( processes = self . cpus )
analysis_type_list = [ self . analysistype ] * len ( self . runmetadata )
iupac_list = [ self . iupac ] * len ( self . runmetadata )
cutoff_list = [ self . cutoff ] * len ( self . runmetadata )
depth_list = [ self . averagedepth ] * len ( self . runmetadata )
allow_soft_clip_list = [ self . allow_soft_clips ] * len ( self . runmetadata )
sample_results = p . starmap ( Sippr . parse_one_sample , zip ( json_files , sample_names , best_assemblies , analysis_type_list , iupac_list , cutoff_list , depth_list , allow_soft_clip_list ) )
p . close ( )
p . join ( )
# Since we had to json - ize the sample objects , we now need to update the metadata for everything .
for sample in self . runmetadata :
sample [ self . analysistype ] . faidict = dict ( )
sample [ self . analysistype ] . results = dict ( )
sample [ self . analysistype ] . avgdepth = dict ( )
sample [ self . analysistype ] . resultssnp = dict ( )
sample [ self . analysistype ] . snplocations = dict ( )
sample [ self . analysistype ] . resultsgap = dict ( )
sample [ self . analysistype ] . gaplocations = dict ( )
sample [ self . analysistype ] . sequences = dict ( )
sample [ self . analysistype ] . maxcoverage = dict ( )
sample [ self . analysistype ] . mincoverage = dict ( )
sample [ self . analysistype ] . standarddev = dict ( )
# Figure out which of the sample results to use .
for sample_result in sample_results :
if sample_result [ 'name' ] == sample . name :
sample [ self . analysistype ] . faidict = sample_result [ 'faidict' ]
sample [ self . analysistype ] . results = sample_result [ 'results' ]
sample [ self . analysistype ] . avgdepth = sample_result [ 'avgdepth' ]
sample [ self . analysistype ] . resultssnp = sample_result [ 'resultssnp' ]
sample [ self . analysistype ] . snplocations = sample_result [ 'snplocations' ]
sample [ self . analysistype ] . resultsgap = sample_result [ 'resultsgap' ]
sample [ self . analysistype ] . gaplocations = sample_result [ 'gaplocations' ]
sample [ self . analysistype ] . sequences = sample_result [ 'sequences' ]
sample [ self . analysistype ] . maxcoverage = sample_result [ 'maxcoverage' ]
sample [ self . analysistype ] . mincoverage = sample_result [ 'mincoverage' ]
sample [ self . analysistype ] . standarddev = sample_result [ 'standarddev' ]
logging . info ( 'Done parsing BAM files' ) |
def sign_cert_request ( filepath , cert_req , ca_crt , ca_key , days , extfile , silent = False ) :
"""generate self signed ssl certificate , i . e . a private CA certificate
: param filepath : file path to the key file
: param keyfile : file path to the private key
: param days : valid duration for the certificate
: param silent : whether to suppress output""" | message = 'sign certificate request'
cmd = ( 'openssl x509 -req -in {} -CA {} -CAkey {} -CAcreateserial' ' -out {} -days {} -extfile {} -extensions v3_req' ) . format ( cert_req , ca_crt , ca_key , filepath , days , extfile )
call_openssl ( cmd , message , silent ) |
def forbid_web_access ( f ) :
"""Forbids running task using http request .
: param f : Callable
: return Callable""" | @ wraps ( f )
def wrapper_fn ( * args , ** kwargs ) :
if isinstance ( JobContext . get_current_context ( ) , WebJobContext ) :
raise ForbiddenError ( 'Access forbidden from web.' )
return f ( * args , ** kwargs )
return wrapper_fn |
def list_pools_on_lbaas_agent ( self , lbaas_agent , ** _params ) :
"""Fetches a list of pools hosted by the loadbalancer agent .""" | return self . get ( ( self . agent_path + self . LOADBALANCER_POOLS ) % lbaas_agent , params = _params ) |
def subscribe ( self , peer_jid ) :
"""Request presence subscription with the given ` peer _ jid ` .
This is deliberately not a coroutine ; we don ’ t know whether the peer is
online ( usually ) and they may defer the confirmation very long , if they
confirm at all . Use : meth : ` on _ subscribed ` to get notified when a peer
accepted a subscription request .""" | self . client . enqueue ( stanza . Presence ( type_ = structs . PresenceType . SUBSCRIBE , to = peer_jid ) ) |
def batch_find ( self , * dbqueries ) :
"""Returns array of results from queries dbqueries .
: param dbqueries : Array of individual queries as dictionaries
: type dbqueries : ` ` array ` ` of ` ` dict ` `
: return : Results of each query
: rtype : ` ` array ` ` of ` ` array ` `""" | if len ( dbqueries ) < 1 :
raise Exception ( 'Must have at least one query.' )
data = json . dumps ( dbqueries )
return json . loads ( self . _post ( 'batch_find' , headers = KVStoreCollectionData . JSON_HEADER , body = data ) . body . read ( ) . decode ( 'utf-8' ) ) |
def files ( self ) :
"""Return the names of files to be created .""" | files_description = [ [ self . project_name , 'bootstrap' , 'BootstrapScriptFileTemplate' ] , [ self . project_name , 'CHANGES.txt' , 'PythonPackageCHANGESFileTemplate' ] , [ self . project_name , 'LICENSE.txt' , 'GPL3FileTemplate' ] , [ self . project_name , 'MANIFEST.in' , 'PythonPackageMANIFESTFileTemplate' ] , [ self . project_name , 'README.txt' , 'READMEReSTFileTemplate' ] , [ self . project_name , 'setup.py' , 'PythonPackageSetupFileTemplate' ] , [ self . project_name + '/' + self . project_name . lower ( ) , '__init__.py' , None ] , [ self . project_name + '/docs' , 'index.rst' , None ] , ]
return files_description |
def from_etree ( cls , etree_element ) :
"""creates a ` ` SaltLayer ` ` instance from the etree representation of an
< layers > element from a SaltXMI file .""" | ins = SaltElement . from_etree ( etree_element )
# TODO : this looks dangerous , ask Stackoverflow about it !
# convert SaltElement into SaltLayer
ins . __class__ = SaltLayer . mro ( ) [ 0 ]
# add nodes and edges that belong to this layer ( if any )
for element in ( 'nodes' , 'edges' ) :
elem_list = [ ]
xpath_result = etree_element . xpath ( '@' + element )
if xpath_result :
val_str = xpath_result [ 0 ]
elem_list . extend ( int ( elem_id ) for elem_id in DIGITS . findall ( val_str ) )
setattr ( ins , element , elem_list )
return ins |
def symmetric_difference ( self , other ) :
"""Combine with another Region by performing the symmetric difference of their pixlists .
Requires both regions to have the same maxdepth .
Parameters
other : : class : ` AegeanTools . regions . Region `
The region to be combined .""" | # work only on the lowest level
# TODO : Allow this to be done for regions with different depths .
if not ( self . maxdepth == other . maxdepth ) :
raise AssertionError ( "Regions must have the same maxdepth" )
self . _demote_all ( )
opd = set ( other . get_demoted ( ) )
self . pixeldict [ self . maxdepth ] . symmetric_difference_update ( opd )
self . _renorm ( )
return |
def cast_out ( self , klass ) :
"""Interpret the content as a particular class .""" | if _debug :
Any . _debug ( "cast_out %r" , klass )
global _sequence_of_classes , _list_of_classes
# check for a sequence element
if ( klass in _sequence_of_classes ) or ( klass in _list_of_classes ) : # build a sequence helper
helper = klass ( )
# make a copy of the tag list
t = TagList ( self . tagList [ : ] )
# let it decode itself
helper . decode ( t )
# make sure everything was consumed
if len ( t ) != 0 :
raise DecodingError ( "incomplete cast" )
# return what was built
return helper . value
# check for an array element
elif klass in _array_of_classes : # build a sequence helper
helper = klass ( )
# make a copy of the tag list
t = TagList ( self . tagList [ : ] )
# let it decode itself
helper . decode ( t )
# make sure everything was consumed
if len ( t ) != 0 :
raise DecodingError ( "incomplete cast" )
# return what was built with Python list semantics
return helper . value [ 1 : ]
elif issubclass ( klass , ( Atomic , AnyAtomic ) ) : # make sure there ' s only one piece
if len ( self . tagList ) == 0 :
raise DecodingError ( "missing cast component" )
if len ( self . tagList ) > 1 :
raise DecodingError ( "too many cast components" )
if _debug :
Any . _debug ( " - building helper: %r" , klass )
# a helper cooperates between the atomic value and the tag
helper = klass ( self . tagList [ 0 ] )
# return the value
return helper . value
else :
if _debug :
Any . _debug ( " - building value: %r" , klass )
# build an element
value = klass ( )
# make a copy of the tag list
t = TagList ( self . tagList [ : ] )
# let it decode itself
value . decode ( t )
# make sure everything was consumed
if len ( t ) != 0 :
raise DecodingError ( "incomplete cast" )
# return what was built
return value |
def getResourceFile ( self , pid , filename , destination = None ) :
"""Get a file within a resource .
: param pid : The HydroShare ID of the resource
: param filename : String representing the name of the resource file to get .
: param destination : String representing the directory to save the resource file to . If None , a stream
to the resource file will be returned instead .
: return : The path of the downloaded file ( if destination was specified ) , or a stream to the resource
file .
: raises : HydroShareArgumentException if any parameters are invalid .
: raises : HydroShareNotAuthorized if user is not authorized to perform action .
: raises : HydroShareNotFound if the resource was not found .
: raises : HydroShareHTTPException if an unexpected HTTP response code is encountered .""" | url = "{url_base}/resource/{pid}/files/{filename}" . format ( url_base = self . url_base , pid = pid , filename = filename )
if destination :
if not os . path . isdir ( destination ) :
raise HydroShareArgumentException ( "{0} is not a directory." . format ( destination ) )
if not os . access ( destination , os . W_OK ) :
raise HydroShareArgumentException ( "You do not have write permissions to directory '{0}'." . format ( destination ) )
r = self . _request ( 'GET' , url , stream = True )
if r . status_code != 200 :
if r . status_code == 403 :
raise HydroShareNotAuthorized ( ( 'GET' , url ) )
elif r . status_code == 404 :
raise HydroShareNotFound ( ( pid , filename ) )
else :
raise HydroShareHTTPException ( ( url , 'GET' , r . status_code ) )
if destination is None :
return r . iter_content ( STREAM_CHUNK_SIZE )
else :
filepath = os . path . join ( destination , filename )
with open ( filepath , 'wb' ) as fd :
for chunk in r . iter_content ( STREAM_CHUNK_SIZE ) :
fd . write ( chunk )
return filepath |
def find_machine ( self , name_or_id ) :
"""Attempts to find a virtual machine given its name or UUID .
Inaccessible machines cannot be found by name , only by UUID , because their name
cannot safely be determined .
in name _ or _ id of type str
What to search for . This can either be the UUID or the name of a virtual machine .
return machine of type : class : ` IMachine `
Machine object , if found .
raises : class : ` VBoxErrorObjectNotFound `
Could not find registered machine matching @ a nameOrId .""" | if not isinstance ( name_or_id , basestring ) :
raise TypeError ( "name_or_id can only be an instance of type basestring" )
machine = self . _call ( "findMachine" , in_p = [ name_or_id ] )
machine = IMachine ( machine )
return machine |
def alter ( self , id_filter , name , description ) :
"""Change Filter by the identifier .
: param id _ filter : Identifier of the Filter . Integer value and greater than zero .
: param name : Name . String with a maximum of 50 characters and respect [ a - zA - Z \ _ - ]
: param description : Description . String with a maximum of 50 characters and respect [ a - zA - Z \ _ - ]
: return : None
: raise InvalidParameterError : Filter identifier is null and invalid .
: raise InvalidParameterError : The value of name or description is invalid .
: raise FilterNotFoundError : Filter not registered .
: raise DataBaseError : Networkapi failed to access the database .
: raise XMLError : Networkapi failed to generate the XML response .""" | if not is_valid_int_param ( id_filter ) :
raise InvalidParameterError ( u'The identifier of Filter is invalid or was not informed.' )
filter_map = dict ( )
filter_map [ 'name' ] = name
filter_map [ 'description' ] = description
url = 'filter/' + str ( id_filter ) + '/'
code , xml = self . submit ( { 'filter' : filter_map } , 'PUT' , url )
return self . response ( code , xml ) |
def render ( self , ** kwargs ) :
"""Plots the curve and the control points polygon .""" | # Calling parent function
super ( VisCurve3D , self ) . render ( ** kwargs )
# Initialize a list to store VTK actors
vtk_actors = [ ]
# Start plotting
for plot in self . _plots : # Plot control points
if plot [ 'type' ] == 'ctrlpts' and self . vconf . display_ctrlpts : # Points as spheres
pts = np . array ( plot [ 'ptsarr' ] , dtype = np . float )
# Handle 2 - dimensional data
if pts . shape [ 1 ] == 2 :
pts = np . c_ [ pts , np . zeros ( pts . shape [ 0 ] , dtype = np . float ) ]
vtkpts = numpy_to_vtk ( pts , deep = False , array_type = VTK_FLOAT )
vtkpts . SetName ( plot [ 'name' ] )
actor1 = vtkh . create_actor_pts ( pts = vtkpts , color = vtkh . create_color ( plot [ 'color' ] ) , name = plot [ 'name' ] , idx = plot [ 'idx' ] )
vtk_actors . append ( actor1 )
# Lines
actor2 = vtkh . create_actor_polygon ( pts = vtkpts , color = vtkh . create_color ( plot [ 'color' ] ) , name = plot [ 'name' ] , index = plot [ 'idx' ] , size = self . vconf . line_width )
vtk_actors . append ( actor2 )
# Plot evaluated points
if plot [ 'type' ] == 'evalpts' and self . vconf . display_evalpts :
pts = np . array ( plot [ 'ptsarr' ] , dtype = np . float )
# Handle 2 - dimensional data
if pts . shape [ 1 ] == 2 :
pts = np . c_ [ pts , np . zeros ( pts . shape [ 0 ] , dtype = np . float ) ]
vtkpts = numpy_to_vtk ( pts , deep = False , array_type = VTK_FLOAT )
vtkpts . SetName ( plot [ 'name' ] )
actor1 = vtkh . create_actor_polygon ( pts = vtkpts , color = vtkh . create_color ( plot [ 'color' ] ) , name = plot [ 'name' ] , index = plot [ 'idx' ] , size = self . vconf . line_width * 2 )
vtk_actors . append ( actor1 )
# Render actors
return vtkh . create_render_window ( vtk_actors , dict ( KeyPressEvent = ( self . vconf . keypress_callback , 1.0 ) ) , figure_size = self . vconf . figure_size ) |
def abort ( self ) :
"""Abort the SBI ( and associated PBs ) .""" | self . set_status ( 'aborted' )
DB . remove_from_list ( '{}:active' . format ( self . _type ) , self . _id )
DB . append_to_list ( '{}:aborted' . format ( self . _type ) , self . _id )
sbi_pb_ids = ast . literal_eval ( DB . get_hash_value ( self . _key , 'processing_block_ids' ) )
for pb_id in sbi_pb_ids :
pb = ProcessingBlock ( pb_id )
pb . abort ( ) |
def Page ( self , text = None , show_percent = None ) :
"""Page text .
Continues to page through any text supplied in the constructor . Also , any
text supplied to this method will be appended to the total text to be
displayed . The method returns when all available text has been displayed to
the user , or the user quits the pager .
Args :
text : A string , extra text to be paged .
show _ percent : A boolean , if True , indicate how much is displayed so far .
If None , this behaviour is ' text is None ' .
Returns :
A boolean . If True , more data can be displayed to the user . False
implies that the user has quit the pager .""" | if text is not None :
self . _text += text
if show_percent is None :
show_percent = text is None
self . _show_percent = show_percent
text = LineWrap ( self . _text ) . splitlines ( )
while True : # Get a list of new lines to display .
self . _newlines = text [ self . _displayed : self . _displayed + self . _lines_to_show ]
for line in self . _newlines :
sys . stdout . write ( line + '\n' )
if self . _delay and self . _lastscroll > 0 :
time . sleep ( 0.005 )
self . _displayed += len ( self . _newlines )
self . _currentpagelines += len ( self . _newlines )
if self . _currentpagelines >= self . _lines_to_show :
self . _currentpagelines = 0
wish = self . _AskUser ( )
if wish == 'q' : # Quit pager .
return False
elif wish == 'g' : # Display till the end .
self . _Scroll ( len ( text ) - self . _displayed + 1 )
elif wish == '\r' : # Enter , down a line .
self . _Scroll ( 1 )
elif wish == '\033[B' : # Down arrow , down a line .
self . _Scroll ( 1 )
elif wish == '\033[A' : # Up arrow , up a line .
self . _Scroll ( - 1 )
elif wish == 'b' : # Up a page .
self . _Scroll ( 0 - self . _cli_lines )
else : # Next page .
self . _Scroll ( )
if self . _displayed >= len ( text ) :
break
return True |
def to_XML ( self , xml_declaration = True , xmlns = True ) :
"""Dumps object fields to an XML - formatted string . The ' xml _ declaration '
switch enables printing of a leading standard XML line containing XML
version and encoding . The ' xmlns ' switch enables printing of qualified
XMLNS prefixes .
: param XML _ declaration : if ` ` True ` ` ( default ) prints a leading XML
declaration line
: type XML _ declaration : bool
: param xmlns : if ` ` True ` ` ( default ) prints full XMLNS prefixes
: type xmlns : bool
: returns : an XML - formatted string""" | root_node = self . _to_DOM ( )
if xmlns :
xmlutils . annotate_with_XMLNS ( root_node , OBSERVATION_XMLNS_PREFIX , OBSERVATION_XMLNS_URL )
return xmlutils . DOM_node_to_XML ( root_node , xml_declaration ) |
def get_marshaller_for_type_string ( self , type_string ) :
"""Gets the appropriate marshaller for a type string .
Retrieves the marshaller , if any , that can be used to read / write
a Python object with the given type string . The modules it
requires , if available , will be loaded .
Parameters
type _ string : str
Type string for a Python object .
Returns
marshaller : marshaller or None
The marshaller that can read / write the type to
file . ` ` None ` ` if no appropriate marshaller is found .
has _ required _ modules : bool
Whether the required modules for reading the type are
present or not .
See Also
hdf5storage . Marshallers . TypeMarshaller . python _ type _ strings""" | if type_string in self . _type_strings :
index = self . _type_strings [ type_string ]
m = self . _marshallers [ index ]
if self . _imported_required_modules [ index ] :
return m , True
if not self . _has_required_modules [ index ] :
return m , False
success = self . _import_marshaller_modules ( m )
self . _has_required_modules [ index ] = success
self . _imported_required_modules [ index ] = success
return m , success
else :
return None , False |
def retry_ex ( callback , times = 3 , cap = 120000 ) :
"""Retry a callback function if any exception is raised .
: param function callback : The function to call
: keyword int times : Number of times to retry on initial failure
: keyword int cap : Maximum wait time in milliseconds
: returns : The return value of the callback
: raises Exception : If the callback raises an exception after
exhausting all retries""" | for attempt in range ( times + 1 ) :
if attempt > 0 :
time . sleep ( retry_wait_time ( attempt , cap ) / 1000.0 )
try :
return callback ( )
except :
if attempt == times :
raise |
def find_one ( self ) :
"""Return the first index of an entry that is either one or DC .
If no item is found , return None .""" | num = quotient = 0
while num < self . _len :
chunk = self . data [ quotient ]
if chunk & self . one_mask :
remainder = 0
while remainder < self . width and num < self . _len :
item = ( chunk >> remainder ) & 3
if item == PC_ONE :
return num
remainder += 2
num += 1
else :
num += ( self . width >> 1 )
quotient += 1
return None |
def add_tag ( self , tag , value ) :
"""as tags are kept in a sorted order , a bisection is a fastest way to identify a correct position
of or a new tag to be added . An additional check is required to make sure w don ' t add duplicates""" | index = bisect_left ( self . tags , ( tag , value ) )
contains = False
if index < len ( self . tags ) :
contains = self . tags [ index ] == ( tag , value )
if not contains :
self . tags . insert ( index , ( tag , value ) ) |
def save_data ( self , idx ) :
"""Call method ` save _ data | ` of all handled | IOSequences |
objects registered under | OutputSequencesABC | .""" | for subseqs in self :
if isinstance ( subseqs , abctools . OutputSequencesABC ) :
subseqs . save_data ( idx ) |
def command ( name , mode ) :
"""Label a method as a command with name .""" | def decorator ( fn ) :
commands [ name ] = fn . __name__
_Client . _addMethod ( fn . __name__ , name , mode )
return fn
return decorator |
def mark_offer_as_lose ( self , offer_id ) :
"""Mark offer as lose
: param offer _ id : the offer id
: return Response""" | return self . _create_put_request ( resource = OFFERS , billomat_id = offer_id , command = LOSE , ) |
def sendmail ( subject , text , mailto , sender = None ) :
"""Sends an e - mail with unix sendmail .
Args :
subject : String with the subject of the mail .
text : String with the body of the mail .
mailto : String or list of string with the recipients .
sender : string with the sender address .
If sender is None , username @ hostname is used .
Returns :
Exit status""" | def user_at_host ( ) :
from socket import gethostname
return os . getlogin ( ) + "@" + gethostname ( )
# Body of the message .
try :
sender = user_at_host ( ) if sender is None else sender
except OSError :
sender = 'abipyscheduler@youknowwhere'
if is_string ( mailto ) :
mailto = [ mailto ]
from email . mime . text import MIMEText
mail = MIMEText ( text )
mail [ "Subject" ] = subject
mail [ "From" ] = sender
mail [ "To" ] = ", " . join ( mailto )
msg = mail . as_string ( )
# sendmail works much better than the python interface .
# Note that sendmail is available only on Unix - like OS .
from subprocess import Popen , PIPE
import sys
sendmail = which ( "sendmail" )
if sendmail is None :
return - 1
if sys . version_info [ 0 ] < 3 :
p = Popen ( [ sendmail , "-t" ] , stdin = PIPE , stderr = PIPE )
else : # msg is string not bytes so must use universal _ newlines
p = Popen ( [ sendmail , "-t" ] , stdin = PIPE , stderr = PIPE , universal_newlines = True )
outdata , errdata = p . communicate ( msg )
return len ( errdata ) |
def getalignedtarget ( self , index ) :
"""Returns target range only if source index aligns to a single consecutive range of target tokens .""" | targetindices = [ ]
target = None
foundindex = - 1
for sourceindex , targetindex in self . alignment :
if sourceindex == index :
targetindices . append ( targetindex )
if len ( targetindices ) > 1 :
for i in range ( 1 , len ( targetindices ) ) :
if abs ( targetindices [ i ] - targetindices [ i - 1 ] ) != 1 :
break
# not consecutive
foundindex = ( min ( targetindices ) , max ( targetindices ) )
target = ' ' . join ( self . target [ min ( targetindices ) : max ( targetindices ) + 1 ] )
elif targetindices :
foundindex = targetindices [ 0 ]
target = self . target [ foundindex ]
return target , foundindex |
def monitor_session_span_command_direction ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
monitor = ET . SubElement ( config , "monitor" , xmlns = "urn:brocade.com:mgmt:brocade-span" )
session = ET . SubElement ( monitor , "session" )
session_number_key = ET . SubElement ( session , "session-number" )
session_number_key . text = kwargs . pop ( 'session_number' )
span_command = ET . SubElement ( session , "span-command" )
direction = ET . SubElement ( span_command , "direction" )
direction . text = kwargs . pop ( 'direction' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def edit_record ( self , new_record ) :
"""Update a record in ArchivesSpace using the provided new _ record .
The format of new _ record is identical to the format returned by get _ resource _ component _ and _ children and related methods ; consult the documentation for that method in ArchivistsToolkitClient to see the format .
This means it ' s possible , for example , to request a record , modify the returned dict , and pass that dict to this method to update the server .
Currently supported fields are :
* title
* targetfield
* notes
* start _ date
* end _ date
* date _ expression
: raises ValueError : if the ' id ' field isn ' t specified , or no fields to edit were specified .""" | try :
record_id = new_record [ "id" ]
except KeyError :
raise ValueError ( "No record ID provided!" )
record = self . get_record ( record_id )
# TODO : add more fields ?
field_map = { "title" : "title" , "level" : "levelOfDescription" }
fields_updated = False
for field , targetfield in field_map . items ( ) :
try :
record [ targetfield ] = new_record [ field ]
fields_updated = True
except KeyError :
continue
if self . _process_notes ( record , new_record ) :
fields_updated = True
# Create dates object if any of the date fields is populated
if ( "start_date" in new_record or "end_date" in new_record or "date_expression" in new_record ) :
date = { "jsonmodel_type" : "date" , "date_type" : "inclusive" , "label" : "creation" , }
if "date_expression" in new_record :
date [ "expression" ] = new_record [ "date_expression" ]
if "start_date" in new_record :
date [ "begin" ] = new_record [ "start_date" ]
if "end_date" in new_record :
date [ "end" ] = new_record [ "end_date" ]
if len ( record [ "dates" ] ) == 0 :
record [ "dates" ] = [ date ]
else :
record [ "dates" ] [ 0 ] = date
fields_updated = True
if not fields_updated :
raise ValueError ( "No fields to update specified!" )
self . _post ( record_id , data = json . dumps ( record ) ) |
def create_transform ( ctx , transform ) :
"""Creates a new transform in the specified directory and auto - updates dependencies .""" | from canari . commands . create_transform import create_transform
create_transform ( ctx . project , transform ) |
def click_link_text ( self , link_text , timeout = settings . SMALL_TIMEOUT ) :
"""This method clicks link text on a page""" | # If using phantomjs , might need to extract and open the link directly
if self . timeout_multiplier and timeout == settings . SMALL_TIMEOUT :
timeout = self . __get_new_timeout ( timeout )
if self . browser == 'phantomjs' :
if self . is_link_text_visible ( link_text ) :
element = self . wait_for_link_text_visible ( link_text , timeout = timeout )
element . click ( )
return
self . open ( self . __get_href_from_link_text ( link_text ) )
return
if not self . is_link_text_present ( link_text ) :
self . wait_for_link_text_present ( link_text , timeout = timeout )
pre_action_url = self . get_current_url ( )
try :
element = self . wait_for_link_text_visible ( link_text , timeout = 0.2 )
self . __demo_mode_highlight_if_active ( link_text , by = By . LINK_TEXT )
try :
element . click ( )
except ( StaleElementReferenceException , ENI_Exception ) :
self . wait_for_ready_state_complete ( )
time . sleep ( 0.05 )
element = self . wait_for_link_text_visible ( link_text , timeout = timeout )
element . click ( )
except Exception :
found_css = False
text_id = self . get_link_attribute ( link_text , "id" , False )
if text_id :
link_css = '[id="%s"]' % link_text
found_css = True
if not found_css :
href = self . __get_href_from_link_text ( link_text , False )
if href :
if href . startswith ( '/' ) or page_utils . is_valid_url ( href ) :
link_css = '[href="%s"]' % href
found_css = True
if not found_css :
ngclick = self . get_link_attribute ( link_text , "ng-click" , False )
if ngclick :
link_css = '[ng-click="%s"]' % ngclick
found_css = True
if not found_css :
onclick = self . get_link_attribute ( link_text , "onclick" , False )
if onclick :
link_css = '[onclick="%s"]' % onclick
found_css = True
success = False
if found_css :
if self . is_element_visible ( link_css ) :
self . click ( link_css )
success = True
else : # The link text might be hidden under a dropdown menu
success = self . __click_dropdown_link_text ( link_text , link_css )
if not success :
element = self . wait_for_link_text_visible ( link_text , timeout = settings . MINI_TIMEOUT )
element . click ( )
if settings . WAIT_FOR_RSC_ON_CLICKS :
self . wait_for_ready_state_complete ( )
if self . demo_mode :
if self . driver . current_url != pre_action_url :
self . __demo_mode_pause_if_active ( )
else :
self . __demo_mode_pause_if_active ( tiny = True ) |
def _parse_tagfile ( self ) :
"""Parse the tagfile and yield tuples of tag _ name , list of rule ASTs .""" | rules = None
tag = None
for line in self . original :
match = self . TAG_DECL_LINE . match ( line )
if match :
if tag and rules :
yield tag , rules
rules = [ ]
tag = match . group ( 1 )
continue
match = self . TAG_RULE_LINE . match ( line )
if match :
source = match . group ( 1 )
rules . append ( self . _parse_query ( source ) ) |
def make_qscan_plot ( workflow , ifo , trig_time , out_dir , injection_file = None , data_segments = None , time_window = 100 , tags = None ) :
"""Generate a make _ qscan node and add it to workflow .
This function generates a single node of the singles _ timefreq executable
and adds it to the current workflow . Parent / child relationships are set by
the input / output files automatically .
Parameters
workflow : pycbc . workflow . core . Workflow
The workflow class that stores the jobs that will be run .
ifo : str
Which interferometer are we using ?
trig _ time : int
The time of the trigger being followed up .
out _ dir : str
Location of directory to output to
injection _ file : pycbc . workflow . File ( optional , default = None )
If given , add the injections in the file to strain before making the
plot .
data _ segments : ligo . segments . segmentlist ( optional , default = None )
The list of segments for which data exists and can be read in . If given
the start / end times given to singles _ timefreq will be adjusted if
[ trig _ time - time _ window , trig _ time + time _ window ] does not completely
lie within a valid data segment . A ValueError will be raised if the
trig _ time is not within a valid segment , or if it is not possible to
find 2 * time _ window ( plus the padding ) of continuous data around the
trigger . This * * must * * be coalesced .
time _ window : int ( optional , default = None )
The amount of data ( not including padding ) that will be read in by the
singles _ timefreq job . The default value of 100s should be fine for most
cases .
tags : list ( optional , default = None )
List of tags to add to the created nodes , which determine file naming .""" | tags = [ ] if tags is None else tags
makedir ( out_dir )
name = 'plot_qscan'
curr_exe = PlotQScanExecutable ( workflow . cp , name , ifos = [ ifo ] , out_dir = out_dir , tags = tags )
node = curr_exe . create_node ( )
# Determine start / end times , using data segments if needed .
# Begin by choosing " optimal " times
start = trig_time - time_window
end = trig_time + time_window
# Then if data _ segments is available , check against that , and move if
# needed
if data_segments is not None : # Assumes coalesced , so trig _ time can only be within one segment
for seg in data_segments :
if trig_time in seg :
data_seg = seg
break
elif trig_time == - 1.0 :
node . add_opt ( '--gps-start-time' , int ( trig_time ) )
node . add_opt ( '--gps-end-time' , int ( trig_time ) )
node . add_opt ( '--center-time' , trig_time )
caption_string = "'No trigger in %s'" % ifo
node . add_opt ( '--plot-caption' , caption_string )
node . new_output_file_opt ( workflow . analysis_time , '.png' , '--output-file' )
workflow += node
return node . output_files
else :
err_msg = "Trig time {} " . format ( trig_time )
err_msg += "does not seem to lie within any data segments. "
err_msg += "This shouldn't be possible, please ask for help!"
raise ValueError ( err_msg )
# Check for pad - data
if curr_exe . has_opt ( 'pad-data' ) :
pad_data = int ( curr_exe . get_opt ( 'pad-data' ) )
else :
pad_data = 0
# We only read data that ' s available . The code must handle the case
# of not much data being available .
if end > ( data_seg [ 1 ] - pad_data ) :
end = data_seg [ 1 ] - pad_data
if start < ( data_seg [ 0 ] + pad_data ) :
start = data_seg [ 0 ] + pad_data
node . add_opt ( '--gps-start-time' , int ( start ) )
node . add_opt ( '--gps-end-time' , int ( end ) )
node . add_opt ( '--center-time' , trig_time )
if injection_file is not None :
node . add_input_opt ( '--injection-file' , injection_file )
node . new_output_file_opt ( workflow . analysis_time , '.png' , '--output-file' )
workflow += node
return node . output_files |
def _reshape_n_vecs ( self ) :
"""return list of arrays , each array represents a different m mode""" | lst = [ ]
sl = slice ( None , None , None )
lst . append ( self . __getitem__ ( ( sl , 0 ) ) )
for m in xrange ( 1 , self . mmax + 1 ) :
lst . append ( self . __getitem__ ( ( sl , - m ) ) )
lst . append ( self . __getitem__ ( ( sl , m ) ) )
return lst |
def process_args ( ) :
"""Parse command - line arguments .""" | parser = argparse . ArgumentParser ( description = "A file for converting NeuroML v2 files into POVRay files for 3D rendering" )
parser . add_argument ( 'neuroml_file' , type = str , metavar = '<NeuroML file>' , help = 'NeuroML (version 2 beta 3+) file to be converted to PovRay format (XML or HDF5 format)' )
parser . add_argument ( '-split' , action = 'store_true' , default = False , help = "If this is specified, generate separate pov files for cells & network. Default is false" )
parser . add_argument ( '-background' , type = str , metavar = '<background colour>' , default = _WHITE , help = 'Colour of background, e.g. <0,0,0,0.55>' )
parser . add_argument ( '-movie' , action = 'store_true' , default = False , help = "If this is specified, generate a ini file for generating a sequence of frames for a movie of the 3D structure" )
parser . add_argument ( '-inputs' , action = 'store_true' , default = False , help = "If this is specified, show the locations of (synaptic, current clamp, etc.) inputs into the cells of the network" )
parser . add_argument ( '-conns' , action = 'store_true' , default = False , help = "If this is specified, show the connections present in the network with lines" )
parser . add_argument ( '-conn_points' , action = 'store_true' , default = False , help = "If this is specified, show the end points of the connections present in the network" )
parser . add_argument ( '-v' , action = 'store_true' , default = False , help = "Verbose output" )
parser . add_argument ( '-frames' , type = int , metavar = '<frames>' , default = 36 , help = 'Number of frames in movie' )
parser . add_argument ( '-posx' , type = float , metavar = '<position offset x>' , default = 0 , help = 'Offset position in x dir (0 is centre, 1 is top)' )
parser . add_argument ( '-posy' , type = float , metavar = '<position offset y>' , default = 0 , help = 'Offset position in y dir (0 is centre, 1 is top)' )
parser . add_argument ( '-posz' , type = float , metavar = '<position offset z>' , default = 0 , help = 'Offset position in z dir (0 is centre, 1 is top)' )
parser . add_argument ( '-viewx' , type = float , metavar = '<view offset x>' , default = 0 , help = 'Offset viewing point in x dir (0 is centre, 1 is top)' )
parser . add_argument ( '-viewy' , type = float , metavar = '<view offset y>' , default = 0 , help = 'Offset viewing point in y dir (0 is centre, 1 is top)' )
parser . add_argument ( '-viewz' , type = float , metavar = '<view offset z>' , default = 0 , help = 'Offset viewing point in z dir (0 is centre, 1 is top)' )
parser . add_argument ( '-scalex' , type = float , metavar = '<scale position x>' , default = 1 , help = 'Scale position from network in x dir' )
parser . add_argument ( '-scaley' , type = float , metavar = '<scale position y>' , default = 1.5 , help = 'Scale position from network in y dir' )
parser . add_argument ( '-scalez' , type = float , metavar = '<scale position z>' , default = 1 , help = 'Scale position from network in z dir' )
parser . add_argument ( '-mindiam' , type = float , metavar = '<minimum diameter dendrites/axons>' , default = 0 , help = 'Minimum diameter for dendrites/axons (to improve visualisations)' )
parser . add_argument ( '-plane' , action = 'store_true' , default = False , help = "If this is specified, add a 2D plane below cell/network" )
parser . add_argument ( '-segids' , action = 'store_true' , default = False , help = "Show segment ids" )
return parser . parse_args ( ) |
def min ( self , axis = None , skipna = True , * args , ** kwargs ) :
"""Return the minimum value of the Array or minimum along
an axis .
See Also
numpy . ndarray . min
Index . min : Return the minimum value in an Index .
Series . min : Return the minimum value in a Series .""" | nv . validate_min ( args , kwargs )
nv . validate_minmax_axis ( axis )
result = nanops . nanmin ( self . asi8 , skipna = skipna , mask = self . isna ( ) )
if isna ( result ) : # Period . _ from _ ordinal does not handle np . nan gracefully
return NaT
return self . _box_func ( result ) |
async def amiUsage ( self , * args , ** kwargs ) :
"""See the list of AMIs and their usage
List AMIs and their usage by returning a list of objects in the form :
region : string
volumetype : string
lastused : timestamp
This method is ` ` experimental ` `""" | return await self . _makeApiCall ( self . funcinfo [ "amiUsage" ] , * args , ** kwargs ) |
def get_downstream_causal_subgraph ( graph , nbunch : Union [ BaseEntity , Iterable [ BaseEntity ] ] ) :
"""Induce a sub - graph from all of the downstream causal entities of the nodes in the nbunch .
: type graph : pybel . BELGraph
: rtype : pybel . BELGraph""" | return get_subgraph_by_edge_filter ( graph , build_downstream_edge_predicate ( nbunch ) ) |
def left ( self , speed = 1 ) :
"""Make the robot turn left by running the right motor forward and left
motor backward .
: param float speed :
Speed at which to drive the motors , as a value between 0 ( stopped )
and 1 ( full speed ) . The default is 1.""" | self . right_motor . forward ( speed )
self . left_motor . backward ( speed ) |
def append ( self , s ) :
"""Append the bytestring ` s ` to the compressor state and return the
final compressed output .""" | if self . _compressor is None :
return zlib . compress ( self . s + s , 9 )
else :
compressor = self . _compressor . copy ( )
out = self . _out
out += compressor . compress ( s )
return out + compressor . flush ( ) |
def get_cds_ranges_for_transcript ( self , transcript_id ) :
"""obtain the sequence for a transcript from ensembl""" | headers = { "content-type" : "application/json" }
self . attempt = 0
ext = "/overlap/id/{}?feature=cds" . format ( transcript_id )
r = self . ensembl_request ( ext , headers )
cds_ranges = [ ]
for cds_range in json . loads ( r ) :
if cds_range [ "Parent" ] != transcript_id :
continue
start = cds_range [ "start" ]
end = cds_range [ "end" ]
cds_ranges . append ( ( start , end ) )
return cds_ranges |
def pin ( self , mask : str = '####' ) -> str :
"""Generate a random PIN code .
: param mask : Mask of pin code .
: return : PIN code .""" | return self . random . custom_code ( mask = mask ) |
def fetch_hg_push_log ( repo_name , repo_url ) :
"""Run a HgPushlog etl process""" | newrelic . agent . add_custom_parameter ( "repo_name" , repo_name )
process = HgPushlogProcess ( )
process . run ( repo_url + '/json-pushes/?full=1&version=2' , repo_name ) |
def get_value ( self ) -> ScalarType :
"""Returns the value of a Scalar node .
Use is _ scalar ( type ) to check which type the node has .""" | if self . yaml_node . tag == 'tag:yaml.org,2002:str' :
return self . yaml_node . value
if self . yaml_node . tag == 'tag:yaml.org,2002:int' :
return int ( self . yaml_node . value )
if self . yaml_node . tag == 'tag:yaml.org,2002:float' :
return float ( self . yaml_node . value )
if self . yaml_node . tag == 'tag:yaml.org,2002:bool' :
return self . yaml_node . value in [ 'TRUE' , 'True' , 'true' ]
if self . yaml_node . tag == 'tag:yaml.org,2002:null' :
return None
raise RuntimeError ( 'This node with tag {} is not of the right type' ' for get_value()' . format ( self . yaml_node . tag ) ) |
def _construct_deutsch_jozsa_circuit ( self ) :
"""Builds the Deutsch - Jozsa circuit . Which can determine whether a function f mapping
: math : ` \ { 0,1 \ } ^ n \t o \ { 0,1 \ } ` is constant or balanced , provided that it is one of them .
: return : A program corresponding to the desired instance of Deutsch Jozsa ' s Algorithm .
: rtype : Program""" | dj_prog = Program ( )
# Put the first ancilla qubit ( query qubit ) into minus state
dj_prog . inst ( X ( self . ancillas [ 0 ] ) , H ( self . ancillas [ 0 ] ) )
# Apply Hadamard , Oracle , and Hadamard again
dj_prog . inst ( [ H ( qubit ) for qubit in self . computational_qubits ] )
# Build the oracle
oracle_prog = Program ( )
oracle_prog . defgate ( ORACLE_GATE_NAME , self . unitary_matrix )
scratch_bit = self . ancillas [ 1 ]
qubits_for_funct = [ scratch_bit ] + self . computational_qubits
oracle_prog . inst ( tuple ( [ ORACLE_GATE_NAME ] + qubits_for_funct ) )
dj_prog += oracle_prog
# Here the oracle does not leave the computational qubits unchanged , so we use a CNOT to
# to move the result to the query qubit , and then we uncompute with the dagger .
dj_prog . inst ( CNOT ( self . _qubits [ 0 ] , self . ancillas [ 0 ] ) )
dj_prog += oracle_prog . dagger ( )
dj_prog . inst ( [ H ( qubit ) for qubit in self . computational_qubits ] )
return dj_prog |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.