signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def parse_conditional_derived_variable ( self , node ) :
"""Parses < ConditionalDerivedVariable >
@ param node : Node containing the < ConditionalDerivedVariable > element
@ type node : xml . etree . Element
@ raise ParseError : Raised when no name or value is specified for the conditional derived variable .""" | if 'name' in node . lattrib :
name = node . lattrib [ 'name' ]
elif 'exposure' in node . lattrib :
name = node . lattrib [ 'exposure' ]
else :
self . raise_error ( '<ConditionalDerivedVariable> must specify a name' )
if 'exposure' in node . lattrib :
exposure = node . lattrib [ 'exposure' ]
else :
exposure = None
if 'dimension' in node . lattrib :
dimension = node . lattrib [ 'dimension' ]
else :
dimension = None
conditional_derived_variable = ConditionalDerivedVariable ( name , dimension , exposure )
self . current_regime . add_conditional_derived_variable ( conditional_derived_variable )
self . current_conditional_derived_variable = conditional_derived_variable
self . process_nested_tags ( node ) |
async def addreaction ( self , ctx , * , reactor = "" ) :
"""Interactively adds a custom reaction""" | if not reactor :
await self . bot . say ( "What should I react to?" )
response = await self . bot . wait_for_message ( author = ctx . message . author )
reactor = response . content
data = self . config . get ( ctx . message . server . id , { } )
keyword = data . get ( reactor , { } )
if keyword :
await self . bot . responses . failure ( message = "Reaction '{}' already exists." . format ( reactor ) )
return
await self . bot . say ( "Okay, I'll react to '{}'. What do you want me to say? (Type $none for no response)" . format ( reactor ) )
response = await self . bot . wait_for_message ( author = ctx . message . author )
reactions = [ ]
def check ( reaction , user ) :
if str ( reaction . emoji ) != "\U000023f9" :
reactions . append ( reaction . emoji )
return False
else :
return user == ctx . message . author
msg = await self . bot . say ( "Awesome! Now react to this message any reactions I should have to '{}'. (React \U000023f9 to stop)" . format ( reactor ) )
await self . bot . wait_for_reaction ( message = msg , check = check )
for i , reaction in enumerate ( reactions ) :
reaction = reaction if isinstance ( reaction , str ) else reaction . name + ":" + str ( reaction . id )
await self . bot . add_reaction ( ctx . message , reaction )
reactions [ i ] = reaction
if response :
keyword [ "response" ] = response . content if response . content . lower ( ) != "$none" else ""
keyword [ "reaction" ] = reactions
data [ reactor ] = keyword
await self . config . put ( ctx . message . server . id , data )
await self . bot . responses . success ( message = "Reaction '{}' has been added." . format ( reactor ) ) |
def weighted_round_robin ( iterable ) :
'''Takes an iterable of tuples of < item > , < weight > and cycles around them ,
returning heavier ( integer ) weighted items more frequently .''' | cyclable_list = [ ]
assigned_weight = 0
still_to_process = [ ( item , weight ) for item , weight in sorted ( iterable , key = lambda tup : tup [ 1 ] , reverse = True ) ]
while still_to_process :
for i , ( item , weight ) in enumerate ( still_to_process ) :
if weight > assigned_weight :
cyclable_list . append ( item )
else :
del still_to_process [ i ]
assigned_weight += 1
return cycle ( cyclable_list ) |
def _decode_messages ( self , messages ) :
'''Take the zmq messages , decrypt / decode them into a payload
: param list messages : A list of messages to be decoded''' | messages_len = len ( messages )
# if it was one message , then its old style
if messages_len == 1 :
payload = self . serial . loads ( messages [ 0 ] )
# 2 includes a header which says who should do it
elif messages_len == 2 :
if ( self . opts . get ( '__role' ) != 'syndic' and messages [ 0 ] not in ( 'broadcast' , self . hexid ) ) or ( self . opts . get ( '__role' ) == 'syndic' and messages [ 0 ] not in ( 'broadcast' , 'syndic' ) ) :
log . debug ( 'Publish received for not this minion: %s' , messages [ 0 ] )
raise tornado . gen . Return ( None )
payload = self . serial . loads ( messages [ 1 ] )
else :
raise Exception ( ( 'Invalid number of messages ({0}) in zeromq pub' 'message from master' ) . format ( len ( messages_len ) ) )
# Yield control back to the caller . When the payload has been decoded , assign
# the decoded payload to ' ret ' and resume operation
ret = yield self . _decode_payload ( payload )
raise tornado . gen . Return ( ret ) |
def _call_variants ( example_dir , region_bed , data , out_file ) :
"""Call variants from prepared pileup examples , creating tensorflow record file .""" | tf_out_file = "%s-tfrecord.gz" % utils . splitext_plus ( out_file ) [ 0 ]
if not utils . file_exists ( tf_out_file ) :
with file_transaction ( data , tf_out_file ) as tx_out_file :
model = "wes" if strelka2 . coverage_interval_from_bed ( region_bed ) == "targeted" else "wgs"
cmd = [ "dv_call_variants.py" , "--cores" , dd . get_num_cores ( data ) , "--outfile" , tx_out_file , "--examples" , example_dir , "--sample" , dd . get_sample_name ( data ) , "--model" , model ]
do . run ( cmd , "DeepVariant call_variants %s" % dd . get_sample_name ( data ) )
return tf_out_file |
def load ( cls , filename , project = None ) :
r"""Loads data onto the given network from an appropriately formatted
' mat ' file ( i . e . MatLAB output ) .
Parameters
filename : string ( optional )
The name of the file containing the data to import . The formatting
of this file is outlined below .
project : OpenPNM Project object
A GenericNetwork is created and added to the specified Project .
If no Project object is supplied then one will be created and
returned .
Returns
If no project object is supplied then one will be created and returned .""" | filename = cls . _parse_filename ( filename = filename , ext = 'mat' )
data = spio . loadmat ( filename )
# Reinsert the ' . ' separator into the array names
for item in list ( data . keys ( ) ) :
if item in [ '__header__' , '__version__' , '__globals__' ] :
data . pop ( item )
continue
elif '_pore_' in item :
path , prop = item . split ( '_pore_' )
new_key = path + '|pore.' + prop
elif '_throat_' in item :
path , prop = item . split ( '_throat_' )
new_key = path + '|throat.' + prop
data [ new_key ] = data . pop ( item )
if project is None :
project = ws . new_project ( )
project = Dict . from_dict ( data , project = project , delim = '|' )
project = cls . _convert_data ( project )
return project |
def error_log ( self , msg = '' , level = 20 , traceback = False ) :
"""Write error message to log .
Args :
msg ( str ) : error message
level ( int ) : logging level
traceback ( bool ) : add traceback to output or not""" | # Override this in subclasses as desired
sys . stderr . write ( msg + '\n' )
sys . stderr . flush ( )
if traceback :
tblines = traceback_ . format_exc ( )
sys . stderr . write ( tblines )
sys . stderr . flush ( ) |
def handle ( self ) :
"""Handles a request ignoring dropped connections .""" | rv = None
try :
rv = BaseHTTPRequestHandler . handle ( self )
except ( _ConnectionError , socket . timeout ) as e :
self . connection_dropped ( e )
except Exception as e :
if self . server . ssl_context is None or not is_ssl_error ( e ) :
raise
if self . server . shutdown_signal :
self . initiate_shutdown ( )
return rv |
def expand_includes ( text , path = '.' ) :
"""Recursively expands includes in given text .""" | def read_and_expand ( match ) :
filename = match . group ( 'filename' )
filename = join ( path , filename )
text = read ( filename )
return expand_includes ( text , path = join ( path , dirname ( filename ) ) )
return re . sub ( r'^\.\. include:: (?P<filename>.*)$' , read_and_expand , text , flags = re . MULTILINE ) |
def is_pdf ( document ) :
"""Check if a document is a PDF file and return True if is is .""" | if not executable_exists ( 'pdftotext' ) :
current_app . logger . warning ( "GNU file was not found on the system. " "Switching to a weak file extension test." )
if document . lower ( ) . endswith ( ".pdf" ) :
return True
return False
# Tested with file version > = 4.10 . First test is secure and works
# with file version 4.25 . Second condition is tested for file
# version 4.10.
file_output = os . popen ( 'file ' + re . escape ( document ) ) . read ( )
try :
filetype = file_output . split ( ":" ) [ - 1 ]
except IndexError :
current_app . logger . error ( "Your version of the 'file' utility seems to be unsupported." )
raise IncompatiblePDF2Text ( 'Incompatible pdftotext' )
pdf = filetype . find ( "PDF" ) > - 1
# This is how it should be done however this is incompatible with
# file version 4.10.
# os . popen ( ' file - bi ' + document ) . read ( ) . find ( " application / pdf " )
return pdf |
def get_crl ( self , expires = 86400 , encoding = None , algorithm = None , password = None , scope = None , ** kwargs ) :
"""Generate a Certificate Revocation List ( CRL ) .
The ` ` full _ name ` ` and ` ` relative _ name ` ` parameters describe how to retrieve the CRL and are used in
the ` Issuing Distribution Point extension < https : / / tools . ietf . org / html / rfc5280 . html # section - 5.2.5 > ` _ .
The former defaults to the ` ` crl _ url ` ` field , pass ` ` None ` ` to not include the value . At most one of
the two may be set .
Parameters
expires : int
The time in seconds when this CRL expires . Note that you should generate a new CRL until then .
encoding : : py : class : ` ~ cg : cryptography . hazmat . primitives . serialization . Encoding ` or str , optional
The encoding format for the CRL , passed to : py : func : ` ~ django _ ca . utils . parse _ encoding ` . The default
value is ` ` " PEM " ` ` .
algorithm : : py : class : ` ~ cg : cryptography . hazmat . primitives . hashes . Hash ` or str , optional
The hash algorithm to use , passed to : py : func : ` ~ django _ ca . utils . parse _ hash _ algorithm ` . The default
is to use : ref : ` CA _ DIGEST _ ALGORITHM < settings - ca - digest - algorithm > ` .
password : bytes , optional
Password used to load the private key of the certificate authority . If not passed , the private key
is assumed to be unencrypted .
scope : { None , ' ca ' , ' user ' , ' attribute ' } , optional
What to include in the CRL : Use ` ` " ca " ` ` to include only revoked certificate authorities and
` ` " user " ` ` to include only certificates or ` ` None ` ` ( the default ) to include both .
` ` " attribute " ` ` is reserved for future use and always produces an empty CRL .
full _ name : list of str or : py : class : ` ~ cg : cryptography . x509 . GeneralName ` , optional
List of general names to use in the Issuing Distribution Point extension . If not passed , use
` ` crl _ url ` ` if set .
relative _ name : : py : class : ` ~ cg : cryptography . x509 . RelativeDistinguishedName ` , optional
Used in Issuing Distribution Point extension , retrieve the CRL relative to the issuer .
Returns
bytes
The CRL in the requested format .""" | if scope is not None and scope not in [ 'ca' , 'user' , 'attribute' ] :
raise ValueError ( 'Scope must be either None, "ca", "user" or "attribute"' )
encoding = parse_encoding ( encoding )
now = now_builder = timezone . now ( )
algorithm = parse_hash_algorithm ( algorithm )
if timezone . is_aware ( now_builder ) :
now_builder = timezone . make_naive ( now , pytz . utc )
builder = x509 . CertificateRevocationListBuilder ( )
builder = builder . issuer_name ( self . x509 . subject )
builder = builder . last_update ( now_builder )
builder = builder . next_update ( now_builder + timedelta ( seconds = expires ) )
if 'full_name' in kwargs :
full_name = kwargs [ 'full_name' ]
full_name = [ parse_general_name ( n ) for n in full_name ]
elif self . crl_url :
crl_url = [ url . strip ( ) for url in self . crl_url . split ( ) ]
full_name = [ x509 . UniformResourceIdentifier ( c ) for c in crl_url ]
else :
full_name = None
# Keyword arguments for the IssuingDistributionPoint extension
idp_kwargs = { 'only_contains_ca_certs' : False , 'only_contains_user_certs' : False , 'indirect_crl' : False , 'only_contains_attribute_certs' : False , 'only_some_reasons' : None , 'full_name' : full_name , 'relative_name' : kwargs . get ( 'relative_name' ) , }
ca_qs = self . children . filter ( expires__gt = now ) . revoked ( )
cert_qs = self . certificate_set . filter ( expires__gt = now ) . revoked ( )
if scope == 'ca' :
certs = ca_qs
idp_kwargs [ 'only_contains_ca_certs' ] = True
elif scope == 'user' :
certs = cert_qs
idp_kwargs [ 'only_contains_user_certs' ] = True
elif scope == 'attribute' : # sorry , nothing we support right now
certs = [ ]
idp_kwargs [ 'only_contains_attribute_certs' ] = True
else :
certs = itertools . chain ( ca_qs , cert_qs )
for cert in certs :
builder = builder . add_revoked_certificate ( cert . get_revocation ( ) )
if ca_settings . CRYPTOGRAPHY_HAS_IDP : # pragma : no branch , pragma : only cryptography > = 2.5
builder = builder . add_extension ( x509 . IssuingDistributionPoint ( ** idp_kwargs ) , critical = True )
# TODO : Add CRLNumber extension
# https : / / cryptography . io / en / latest / x509 / reference / # cryptography . x509 . CRLNumber
crl = builder . sign ( private_key = self . key ( password ) , algorithm = algorithm , backend = default_backend ( ) )
return crl . public_bytes ( encoding ) |
def returnOneIndex ( self , last = False ) :
'''Return the first origin index ( integer ) of the current list . That
index refers to it ' s placement in the original list of dictionaries .
This is very useful when one wants to reference the original entry by
index .
Example of use :
> > > test = [
. . . { " name " : " Jim " , " age " : 18 , " income " : 93000 , " order " : 2 } ,
. . . { " name " : " Larry " , " age " : 18 , " order " : 3 } ,
. . . { " name " : " Joe " , " age " : 20 , " income " : 15000 , " order " : 1 } ,
. . . { " name " : " Bill " , " age " : 19 , " income " : 29000 , " order " : 4 } ,
> > > print PLOD ( test ) . returnOneIndex ( )
> > > print PLOD ( test ) . sort ( " name " ) . returnOneIndex ( )
: param last :
The last origin of the current list is returned rather than the first .
: return :
An integer representing the original placement of the first item in
the list . Returns None if the list is currently empty .''' | if len ( self . table ) == 0 :
return None
else :
if last :
return self . index_track . pop ( )
else :
return self . index_track [ 0 ] |
def rpm ( state , host , source , present = True ) :
'''Add / remove ` ` . rpm ` ` file packages .
+ source : filename or URL of the ` ` . rpm ` ` package
+ present : whether ore not the package should exist on the system
URL sources with ` ` present = False ` ` :
If the ` ` . rpm ` ` file isn ' t downloaded , pyinfra can ' t remove any existing
package as the file won ' t exist until mid - deploy .''' | # If source is a url
if urlparse ( source ) . scheme : # Generate a temp filename ( with . rpm extension to please yum )
temp_filename = '{0}.rpm' . format ( state . get_temp_filename ( source ) )
# Ensure it ' s downloaded
yield files . download ( state , host , source , temp_filename )
# Override the source with the downloaded file
source = temp_filename
# Check for file . rpm information
info = host . fact . rpm_package ( source )
exists = False
# We have info !
if info :
current_packages = host . fact . rpm_packages
if ( info [ 'name' ] in current_packages and info [ 'version' ] in current_packages [ info [ 'name' ] ] ) :
exists = True
# Package does not exist and we want ?
if present and not exists : # If we had info , always install
if info :
yield 'rpm -U {0}' . format ( source )
# This happens if we download the package mid - deploy , so we have no info
# but also don ' t know if it ' s installed . So check at runtime , otherwise
# the install will fail .
else :
yield 'rpm -qa | grep `rpm -qp {0}` || rpm -U {0}' . format ( source )
# Package exists but we don ' t want ?
if exists and not present :
yield 'yum remove -y {0}' . format ( info [ 'name' ] ) |
def _linear_seaborn_ ( self , label = None , style = None , opts = None ) :
"""Returns a Seaborn linear regression plot""" | xticks , yticks = self . _get_ticks ( opts )
try :
fig = sns . lmplot ( self . x , self . y , data = self . df )
fig = self . _set_with_height ( fig , opts )
return fig
except Exception as e :
self . err ( e , self . linear_ , "Can not draw linear regression chart" ) |
def delete_objective ( self , objective_id ) :
"""Deletes the ` ` Objective ` ` identified by the given ` ` Id ` ` .
arg : objective _ id ( osid . id . Id ) : the ` ` Id ` ` of the
` ` Objective ` ` to delete
raise : NotFound - an ` ` Objective ` ` was not found identified by
the given ` ` Id ` `
raise : NullArgument - ` ` objective _ id ` ` is ` ` null ` `
raise : OperationFailed - unable to complete request
raise : PermissionDenied - authorization failure
* compliance : mandatory - - This method must be implemented . *""" | # Implemented from template for
# osid . learning . ObjectiveAdminSession . delete _ objective _ template
if not isinstance ( objective_id , ABCId ) :
raise errors . InvalidArgument ( 'the argument is not a valid OSID Id' )
collection = JSONClientValidated ( 'learning' , collection = 'Activity' , runtime = self . _runtime )
if collection . find ( { 'objectiveId' : str ( objective_id ) } ) . count ( ) != 0 :
raise errors . IllegalState ( 'there are still Activitys associated with this Objective' )
collection = JSONClientValidated ( 'learning' , collection = 'Objective' , runtime = self . _runtime )
collection . delete_one ( { '_id' : ObjectId ( objective_id . get_identifier ( ) ) } ) |
def get_new_term_doc_mat ( self , doc_domains ) :
'''Combines documents together that are in the same domain
Parameters
doc _ domains : array - like
Returns
scipy . sparse . csr _ matrix''' | assert len ( doc_domains ) == self . term_doc_matrix . get_num_docs ( )
doc_domain_set = set ( doc_domains )
num_terms = self . term_doc_matrix . get_num_terms ( )
num_domains = len ( doc_domain_set )
domain_mat = lil_matrix ( ( num_domains , num_terms ) , dtype = int )
X = self . term_doc_matrix . get_term_doc_mat ( )
for i , domain in enumerate ( doc_domain_set ) :
domain_mat [ i , : ] = X [ np . array ( doc_domains == domain ) ] . sum ( axis = 0 )
return domain_mat . tocsr ( ) |
def _cleanup ( self ) :
'''Cleanup all the local data .''' | self . _declare_cb = None
self . _bind_cb = None
self . _unbind_cb = None
self . _delete_cb = None
self . _purge_cb = None
super ( QueueClass , self ) . _cleanup ( ) |
def max ( self , axis = None , skipna = True , * args , ** kwargs ) :
"""Return the maximum value of the Index or maximum along
an axis .
See Also
numpy . ndarray . max
Series . max : Return the maximum value in a Series .""" | nv . validate_max ( args , kwargs )
nv . validate_minmax_axis ( axis )
if not len ( self ) :
return self . _na_value
i8 = self . asi8
try : # quick check
if len ( i8 ) and self . is_monotonic :
if i8 [ - 1 ] != iNaT :
return self . _box_func ( i8 [ - 1 ] )
if self . hasnans :
if skipna :
max_stamp = self [ ~ self . _isnan ] . asi8 . max ( )
else :
return self . _na_value
else :
max_stamp = i8 . max ( )
return self . _box_func ( max_stamp )
except ValueError :
return self . _na_value |
def read_input ( self , filename , has_header = True ) :
"""filename is any filename , or something on which open ( ) can be called
for example :
csv _ input = CSVInput ( )
csv _ input . read _ input ( " csvfile . csv " )""" | stream = open ( filename )
reader = csv . reader ( stream )
csv_data = [ ]
for ( i , row ) in enumerate ( reader ) :
if i == 0 :
if not has_header :
csv_data . append ( [ str ( i ) for i in xrange ( 0 , len ( row ) ) ] )
csv_data . append ( row )
self . data = csv_data |
def constant ( self , val , ty ) :
"""Creates a constant as a VexValue
: param val : The value , as an integer
: param ty : The type of the resulting VexValue
: return : a VexValue""" | if isinstance ( val , VexValue ) and not isinstance ( val , IRExpr ) :
raise Exception ( 'Constant cannot be made from VexValue or IRExpr' )
rdt = self . irsb_c . mkconst ( val , ty )
return VexValue ( self . irsb_c , rdt ) |
def bind ( self , study , ** kwargs ) : # @ UnusedVariable
"""Returns a copy of the Spec bound to the given study
Parameters
study : Study
A study to bind the fileset spec to ( should happen in the
study _ _ init _ _ )""" | if self . _study is not None : # Avoid rebinding specs in sub - studies that have already
# been bound to MultiStudy
bound = self
else :
bound = copy ( self )
bound . _study = study
if not hasattr ( study , self . pipeline_getter ) :
raise ArcanaError ( "{} does not have a method named '{}' required to " "derive {}" . format ( study , self . pipeline_getter , self ) )
bound . _bind_tree ( study . tree )
return bound |
def runner ( ) :
'''Return all inline documentation for runner modules
CLI Example :
. . code - block : : bash
salt - run doc . runner''' | client = salt . runner . RunnerClient ( __opts__ )
ret = client . get_docs ( )
return ret |
def serialize_op ( cls , opcode , opdata , opfields , verbose = True ) :
"""Given an opcode ( byte ) , associated data ( dict ) , and the operation
fields to serialize ( opfields ) , convert it
into its canonical serialized form ( i . e . in order to
generate a consensus hash .
opdata is allowed to have extra fields . They will be ignored
Return the canonical form on success .
Return None on error .""" | fields = opfields . get ( opcode , None )
if fields is None :
log . error ( "BUG: unrecongnized opcode '%s'" % opcode )
return None
all_values = [ ]
debug_all_values = [ ]
missing = [ ]
for field in fields :
if not opdata . has_key ( field ) :
missing . append ( field )
field_value = opdata . get ( field , None )
if field_value is None :
field_value = ""
# netstring format
debug_all_values . append ( str ( field ) + "=" + str ( len ( str ( field_value ) ) ) + ":" + str ( field_value ) )
all_values . append ( str ( len ( str ( field_value ) ) ) + ":" + str ( field_value ) )
if len ( missing ) > 0 :
log . error ( "Missing fields; dump follows:\n{}" . format ( simplejson . dumps ( opdata , indent = 4 , sort_keys = True ) ) )
raise Exception ( "BUG: missing fields '{}'" . format ( "," . join ( missing ) ) )
if verbose :
log . debug ( "SERIALIZE: {}:{}" . format ( opcode , "," . join ( debug_all_values ) ) )
field_values = "," . join ( all_values )
return opcode + ":" + field_values |
def to_bytes ( s ) :
"""Convert string ` s ` to an integer number of bytes . Suffixes like
' KB ' , ' MB ' , ' GB ' ( up to ' YB ' ) , with or without the trailing ' B ' ,
are allowed and properly accounted for . Case is ignored in
suffixes .
Examples : :
> > > to _ bytes ( ' 12 ' )
12
> > > to _ bytes ( ' 12B ' )
12
> > > to _ bytes ( ' 12KB ' )
12000
> > > to _ bytes ( ' 1G ' )
100000
Binary units ' KiB ' , ' MiB ' etc . are also accepted :
> > > to _ bytes ( ' 1KiB ' )
1024
> > > to _ bytes ( ' 1MiB ' )
1048576""" | last = - 1
unit = s [ last ] . lower ( )
if unit . isdigit ( ) : # ` s ` is a integral number
return int ( s )
if unit == 'b' : # ignore the the ' b ' or ' B ' suffix
last -= 1
unit = s [ last ] . lower ( )
if unit == 'i' :
k = 1024
last -= 1
unit = s [ last ] . lower ( )
else :
k = 1000
# convert the substring of ` s ` that does not include the suffix
if unit . isdigit ( ) :
return int ( s [ 0 : ( last + 1 ) ] )
if unit == 'k' :
return int ( float ( s [ 0 : last ] ) * k )
if unit == 'm' :
return int ( float ( s [ 0 : last ] ) * k * k )
if unit == 'g' :
return int ( float ( s [ 0 : last ] ) * k * k * k )
if unit == 't' :
return int ( float ( s [ 0 : last ] ) * k * k * k * k )
if unit == 'p' :
return int ( float ( s [ 0 : last ] ) * k * k * k * k * k )
if unit == 'e' :
return int ( float ( s [ 0 : last ] ) * k * k * k * k * k * k )
if unit == 'z' :
return int ( float ( s [ 0 : last ] ) * k * k * k * k * k * k * k )
if unit == 'y' :
return int ( float ( s [ 0 : last ] ) * k * k * k * k * k * k * k * k ) |
def handle_block ( mediator_state : MediatorTransferState , state_change : Block , channelidentifiers_to_channels : ChannelMap , pseudo_random_generator : random . Random , ) -> TransitionResult [ MediatorTransferState ] :
"""After Raiden learns about a new block this function must be called to
handle expiration of the hash time locks .
Args :
state : The current state .
Return :
TransitionResult : The resulting iteration""" | expired_locks_events = events_to_remove_expired_locks ( mediator_state , channelidentifiers_to_channels , state_change . block_number , pseudo_random_generator , )
secret_reveal_events = events_for_onchain_secretreveal_if_dangerzone ( channelmap = channelidentifiers_to_channels , secrethash = mediator_state . secrethash , transfers_pair = mediator_state . transfers_pair , block_number = state_change . block_number , block_hash = state_change . block_hash , )
unlock_fail_events = events_for_expired_pairs ( channelidentifiers_to_channels = channelidentifiers_to_channels , transfers_pair = mediator_state . transfers_pair , waiting_transfer = mediator_state . waiting_transfer , block_number = state_change . block_number , )
iteration = TransitionResult ( mediator_state , unlock_fail_events + secret_reveal_events + expired_locks_events , )
return iteration |
def ordered_load ( self , stream , Loader = yaml . Loader , object_pairs_hook = OrderedDict ) :
"""Allows you to use ` pyyaml ` to load as OrderedDict .
Taken from https : / / stackoverflow . com / a / 21912744/1927102""" | class OrderedLoader ( Loader ) :
pass
def construct_mapping ( loader , node ) :
loader . flatten_mapping ( node )
return object_pairs_hook ( loader . construct_pairs ( node ) )
OrderedLoader . add_constructor ( yaml . resolver . BaseResolver . DEFAULT_MAPPING_TAG , construct_mapping )
try :
try :
result = yaml . load ( stream , OrderedLoader )
except yaml . scanner . ScannerError :
if type ( stream ) == str :
result = json . loads ( stream , object_pairs_hook = object_pairs_hook )
else :
stream . seek ( 0 )
result = json . load ( stream , object_pairs_hook = object_pairs_hook )
except Exception as e :
self . error ( e )
result = { }
return result |
def create_alarm ( panel_json , abode , area = '1' ) :
"""Create a new alarm device from a panel response .""" | panel_json [ 'name' ] = CONST . ALARM_NAME
panel_json [ 'id' ] = CONST . ALARM_DEVICE_ID + area
panel_json [ 'type' ] = CONST . ALARM_TYPE
panel_json [ 'type_tag' ] = CONST . DEVICE_ALARM
panel_json [ 'generic_type' ] = CONST . TYPE_ALARM
return AbodeAlarm ( panel_json , abode , area ) |
def matchesOneOf ( cntxt : Context , T : RDFGraph , expr : ShExJ . OneOf , _ : DebugContext ) -> bool :
"""expr is a OneOf and there is some shape expression se2 in shapeExprs such that a matches ( T , se2 , m ) .""" | return any ( matches ( cntxt , T , e ) for e in expr . expressions ) |
def serviceManifest ( self , fileType = "json" ) :
"""The service manifest resource documents the data and other
resources that define the service origins and power the service .
This resource will tell you underlying databases and their location
along with other supplementary files that make up the service .
Inputs :
fileType - this can be json or xml . json returns the
manifest . json file . xml returns the manifest . xml file . These
files are stored at \a rcgisserver \ directories \a rcgissystem arcgisinput \ % servicename % . % servicetype % \ extracted folder .
Outputs :
Python dictionary if fileType is json and Python object of
xml . etree . ElementTree . ElementTree type if fileType is xml .""" | url = self . _url + "/iteminfo/manifest/manifest.%s" % fileType
params = { }
f = self . _get ( url = url , param_dict = params , securityHandler = self . _securityHandler , proxy_url = self . _proxy_url , proxy_port = self . _proxy_port , out_folder = tempfile . gettempdir ( ) , file_name = os . path . basename ( url ) )
if fileType == 'json' :
return f
if fileType == 'xml' :
return ET . ElementTree ( ET . fromstring ( f ) ) |
def removeRef ( self , doc ) :
"""Remove the given attribute from the Ref table maintained
internally .""" | if doc is None :
doc__o = None
else :
doc__o = doc . _o
ret = libxml2mod . xmlRemoveRef ( doc__o , self . _o )
return ret |
def delete_value ( hive , key , vname = None , use_32bit_registry = False ) :
'''Delete a registry value entry or the default value for a key .
Args :
hive ( str ) :
The name of the hive . Can be one of the following
- HKEY _ LOCAL _ MACHINE or HKLM
- HKEY _ CURRENT _ USER or HKCU
- HKEY _ USER or HKU
- HKEY _ CLASSES _ ROOT or HKCR
- HKEY _ CURRENT _ CONFIG or HKCC
key ( str ) :
The key ( looks like a path ) to the value name .
vname ( str ) :
The value name . These are the individual name / data pairs under the
key . If not passed , the key ( Default ) value will be deleted .
use _ 32bit _ registry ( bool ) :
Deletes the 32bit portion of the registry on 64bit installations . On
32bit machines this is ignored .
Return :
bool : True if successful , otherwise False
Usage :
. . code - block : : python
import salt . utils . win _ reg
winreg . delete _ value ( hive = ' HKLM ' , key = ' SOFTWARE \\ SaltTest ' , vname = ' version ' )''' | local_hive = _to_unicode ( hive )
local_key = _to_unicode ( key )
local_vname = _to_unicode ( vname )
registry = Registry ( )
try :
hkey = registry . hkeys [ local_hive ]
except KeyError :
raise CommandExecutionError ( 'Invalid Hive: {0}' . format ( local_hive ) )
access_mask = registry . registry_32 [ use_32bit_registry ] | win32con . KEY_ALL_ACCESS
handle = None
try :
handle = win32api . RegOpenKeyEx ( hkey , local_key , 0 , access_mask )
win32api . RegDeleteValue ( handle , local_vname )
broadcast_change ( )
return True
except Exception as exc : # pylint : disable = E0602
if exc . winerror == 2 :
return None
else :
log . error ( exc , exc_info = True )
log . error ( 'Hive: %s' , local_hive )
log . error ( 'Key: %s' , local_key )
log . error ( 'ValueName: %s' , local_vname )
log . error ( '32bit Reg: %s' , use_32bit_registry )
return False
finally :
if handle :
win32api . RegCloseKey ( handle ) |
def get_patient_vcf ( job , patient_dict ) :
"""Convenience function to get the vcf from the patient dict
: param dict patient _ dict : dict of patient info
: return : The vcf
: rtype : toil . fileStore . FileID""" | temp = job . fileStore . readGlobalFile ( patient_dict [ 'mutation_vcf' ] , os . path . join ( os . getcwd ( ) , 'temp.gz' ) )
if is_gzipfile ( temp ) :
outfile = job . fileStore . writeGlobalFile ( gunzip ( temp ) )
job . fileStore . deleteGlobalFile ( patient_dict [ 'mutation_vcf' ] )
else :
outfile = patient_dict [ 'mutation_vcf' ]
return outfile |
def describe_features ( self , traj ) :
"""Return a list of dictionaries describing the dihderal features .
Parameters
traj : mdtraj . Trajectory
The trajectory to describe
Returns
feature _ descs : list of dict
Dictionary describing each feature with the following information
about the atoms participating in each dihedral
- resnames : unique names of residues
- atominds : the four atom indicies
- resseqs : unique residue sequence ids ( not necessarily
0 - indexed )
- resids : unique residue ids ( 0 - indexed )
- featurizer : Dihedral
- featuregroup : The bin index ( 0 . . nbins - 1)
and dihedral type ( phi / psi / chi1 etc )""" | feature_descs = [ ]
for dihed_type in self . types : # TODO : Don ' t recompute dihedrals , just get the indices
func = getattr ( md , 'compute_%s' % dihed_type )
# ainds is a list of four - tuples of atoms participating
# in each dihedral
aind_tuples , _ = func ( traj )
top = traj . topology
bin_info = [ ]
resseqs = [ ]
resids = [ ]
resnames = [ ]
all_aind = [ ]
# its bin0 - - - all phis bin1 - - all _ phis
for bin_index in range ( self . n_bins ) :
for ainds in aind_tuples :
resid = set ( top . atom ( ai ) . residue . index for ai in ainds )
all_aind . append ( ainds )
bin_info += [ "bin-%d" % bin_index ]
resids += [ list ( resid ) ]
reseq = set ( top . atom ( ai ) . residue . resSeq for ai in ainds )
resseqs += [ list ( reseq ) ]
resname = set ( top . atom ( ai ) . residue . name for ai in ainds )
resnames += [ list ( resname ) ]
zippy = zip ( all_aind , resseqs , resids , resnames )
# fast check to make sure we have the right number of features
assert len ( bin_info ) == len ( aind_tuples ) * self . n_bins
zippy = zip ( [ "VonMises" ] * len ( bin_info ) , [ dihed_type ] * len ( bin_info ) , bin_info , zippy )
feature_descs . extend ( dict_maker ( zippy ) )
return feature_descs |
def clean_out_dir ( directory ) :
"""Delete all the files and subdirectories in a directory .""" | if not isinstance ( directory , path ) :
directory = path ( directory )
for file_path in directory . files ( ) :
file_path . remove ( )
for dir_path in directory . dirs ( ) :
dir_path . rmtree ( ) |
def _l_cv_weight ( self , donor_catchment ) :
"""Return L - CV weighting for a donor catchment .
Methodology source : Science Report SC050050 , eqn . 6.18 and 6.22a""" | try :
dist = donor_catchment . similarity_dist
except AttributeError :
dist = self . _similarity_distance ( self . catchment , donor_catchment )
b = 0.0047 * sqrt ( dist ) + 0.0023 / 2
c = 0.02609 / ( donor_catchment . record_length - 1 )
return 1 / ( b + c ) |
def _seconds_or_timedelta ( duration ) :
"""Returns ` datetime . timedelta ` object for the passed duration .
Keyword Arguments :
duration - - ` datetime . timedelta ` object or seconds in ` int ` format .""" | if isinstance ( duration , int ) :
dt_timedelta = timedelta ( seconds = duration )
elif isinstance ( duration , timedelta ) :
dt_timedelta = duration
else :
raise TypeError ( 'Expects argument as `datetime.timedelta` object ' 'or seconds in `int` format' )
return dt_timedelta |
def train ( cls , data , iterations = 100 , step = 1.0 , miniBatchFraction = 1.0 , initialWeights = None , regParam = 0.0 , regType = None , intercept = False , validateData = True , convergenceTol = 0.001 ) :
"""Train a linear regression model using Stochastic Gradient
Descent ( SGD ) . This solves the least squares regression
formulation
f ( weights ) = 1 / ( 2n ) | | A weights - y | | ^ 2
which is the mean squared error . Here the data matrix has n rows ,
and the input RDD holds the set of rows of A , each with its
corresponding right hand side label y .
See also the documentation for the precise formulation .
: param data :
The training data , an RDD of LabeledPoint .
: param iterations :
The number of iterations .
( default : 100)
: param step :
The step parameter used in SGD .
( default : 1.0)
: param miniBatchFraction :
Fraction of data to be used for each SGD iteration .
( default : 1.0)
: param initialWeights :
The initial weights .
( default : None )
: param regParam :
The regularizer parameter .
( default : 0.0)
: param regType :
The type of regularizer used for training our model .
Supported values :
- " l1 " for using L1 regularization
- " l2 " for using L2 regularization
- None for no regularization ( default )
: param intercept :
Boolean parameter which indicates the use or not of the
augmented representation for training data ( i . e . , whether bias
features are activated or not ) .
( default : False )
: param validateData :
Boolean parameter which indicates if the algorithm should
validate data before training .
( default : True )
: param convergenceTol :
A condition which decides iteration termination .
( default : 0.001)""" | warnings . warn ( "Deprecated in 2.0.0. Use ml.regression.LinearRegression." , DeprecationWarning )
def train ( rdd , i ) :
return callMLlibFunc ( "trainLinearRegressionModelWithSGD" , rdd , int ( iterations ) , float ( step ) , float ( miniBatchFraction ) , i , float ( regParam ) , regType , bool ( intercept ) , bool ( validateData ) , float ( convergenceTol ) )
return _regression_train_wrapper ( train , LinearRegressionModel , data , initialWeights ) |
def watch_crc ( params , ctxt , scope , stream , coord ) :
"""WatchCrc32 - Watch the total crc32 of the params .
Example :
The code below uses the ` ` WatchCrc32 ` ` update function to update
the ` ` crc ` ` field to the crc of the ` ` length ` ` and ` ` data ` ` fields : :
char length ;
char data [ length ] ;
int crc < watch = length ; data , update = WatchCrc32 > ;""" | if len ( params ) <= 1 :
raise errors . InvalidArguments ( coord , "{} args" . format ( len ( params ) ) , "at least two arguments" )
to_update = params [ 0 ]
total_data = utils . binary ( "" )
for param in params [ 1 : ] :
total_data += param . _pfp__build ( )
to_update . _pfp__set_value ( binascii . crc32 ( total_data ) ) |
def gf_poly_div ( dividend , divisor ) :
'''Fast polynomial division by using Extended Synthetic Division and optimized for GF ( 2 ^ p ) computations ( doesn ' t work with standard polynomials outside of this galois field ) .''' | # CAUTION : this function expects polynomials to follow the opposite convention at decoding : the terms must go from the biggest to lowest degree ( while most other functions here expect a list from lowest to biggest degree ) . eg : 1 + 2x + 5x ^ 2 = [ 5 , 2 , 1 ] , NOT [ 1 , 2 , 5]
msg_out = bytearray ( dividend )
# Copy the dividend list and pad with 0 where the ecc bytes will be computed
# normalizer = divisor [ 0 ] # precomputing for performance
for i in xrange ( len ( dividend ) - ( len ( divisor ) - 1 ) ) : # msg _ out [ i ] / = normalizer # for general polynomial division ( when polynomials are non - monic ) , the usual way of using synthetic division is to divide the divisor g ( x ) with its leading coefficient ( call it a ) . In this implementation , this means : we need to compute : coef = msg _ out [ i ] / gen [ 0 ] . For more infos , see http : / / en . wikipedia . org / wiki / Synthetic _ division
coef = msg_out [ i ]
# precaching
if coef != 0 : # log ( 0 ) is undefined , so we need to avoid that case explicitly ( and it ' s also a good optimization ) . In fact if you remove it , it should still work because gf _ mul ( ) will take care of the condition . But it ' s still a good practice to put the condition here .
for j in xrange ( 1 , len ( divisor ) ) : # in synthetic division , we always skip the first coefficient of the divisior , because it ' s only used to normalize the dividend coefficient
if divisor [ j ] != 0 : # log ( 0 ) is undefined
msg_out [ i + j ] ^= gf_mul ( divisor [ j ] , coef )
# equivalent to the more mathematically correct ( but xoring directly is faster ) : msg _ out [ i + j ] + = - divisor [ j ] * coef
# The resulting msg _ out contains both the quotient and the remainder , the remainder being the size of the divisor ( the remainder has necessarily the same degree as the divisor - - not length but degree = = length - 1 - - since it ' s what we couldn ' t divide from the dividend ) , so we compute the index where this separation is , and return the quotient and remainder .
separator = - ( len ( divisor ) - 1 )
return msg_out [ : separator ] , msg_out [ separator : ] |
def with_vtk ( plot = True ) :
"""Tests VTK interface and mesh repair of Stanford Bunny Mesh""" | mesh = vtki . PolyData ( bunny_scan )
meshfix = pymeshfix . MeshFix ( mesh )
if plot :
print ( 'Plotting input mesh' )
meshfix . plot ( )
meshfix . repair ( )
if plot :
print ( 'Plotting repaired mesh' )
meshfix . plot ( )
return meshfix . mesh |
def get_qword_from_data ( self , data , offset ) :
"""Convert eight bytes of data to a word ( little endian )
' offset ' is assumed to index into a word array . So setting it to
N will return a dword out of the data starting at offset N * 8.
Returns None if the data can ' t be turned into a quad word .""" | if ( offset + 1 ) * 8 > len ( data ) :
return None
return struct . unpack ( '<Q' , data [ offset * 8 : ( offset + 1 ) * 8 ] ) [ 0 ] |
def _PrintEventLabelsCounter ( self , event_labels_counter , session_identifier = None ) :
"""Prints the event labels counter .
Args :
event _ labels _ counter ( collections . Counter ) : number of event tags per
label .
session _ identifier ( Optional [ str ] ) : session identifier .""" | if not event_labels_counter :
return
title = 'Event tags generated per label'
if session_identifier :
title = '{0:s}: {1:s}' . format ( title , session_identifier )
table_view = views . ViewsFactory . GetTableView ( self . _views_format_type , column_names = [ 'Label' , 'Number of event tags' ] , title = title )
for key , value in sorted ( event_labels_counter . items ( ) ) :
if key == 'total' :
continue
table_view . AddRow ( [ key , value ] )
try :
total = event_labels_counter [ 'total' ]
except KeyError :
total = 'N/A'
table_view . AddRow ( [ 'Total' , total ] )
table_view . Write ( self . _output_writer ) |
def highlight_canvas ( self , highlight ) :
"""Set a colored frame around the FigureCanvas if highlight is True .""" | colorname = self . canvas . palette ( ) . highlight ( ) . color ( ) . name ( )
if highlight :
self . canvas . setStyleSheet ( "FigureCanvas{border: 1px solid %s;}" % colorname )
else :
self . canvas . setStyleSheet ( "FigureCanvas{}" ) |
def project_delete_event ( self , proj_info ) :
"""Process project delete event .""" | LOG . debug ( "Processing project_delete_event..." )
proj_id = proj_info . get ( 'resource_info' )
proj_name = self . get_project_name ( proj_id )
if proj_name :
try :
self . dcnm_client . delete_project ( proj_name , self . cfg . dcnm . default_partition_name )
except dexc . DfaClientRequestFailed : # Failed to delete project in DCNM .
# Save the info and mark it as failure and retry it later .
LOG . error ( "Failed to create project %s on DCNM." , proj_name )
self . update_project_info_cache ( proj_id , name = proj_name , opcode = 'delete' , result = constants . DELETE_FAIL )
else :
self . update_project_info_cache ( proj_id , opcode = 'delete' )
LOG . debug ( 'Deleted project:%s' , proj_name )
self . project_delete_notif ( proj_id , proj_name ) |
def _find_parent ( self , path_elements ) :
"""Recurse up the tree of FileSetStates until we find a parent , i . e .
one whose path _ elements member is the start of the path _ element
argument""" | if not self . path_elements : # Automatically terminate on root
return self
elif self . path_elements == path_elements [ 0 : len ( self . path_elements ) ] :
return self
else :
return self . parent . _find_parent ( path_elements ) |
def backward ( self , loss ) :
"""backward propagation with loss""" | with mx . autograd . record ( ) :
if isinstance ( loss , ( tuple , list ) ) :
ls = [ l * self . _scaler . loss_scale for l in loss ]
else :
ls = loss * self . _scaler . loss_scale
mx . autograd . backward ( ls ) |
def _setter ( self , attr , value , bottom , top , to_step ) :
"""Set a value .
: param attr : Attribute to set .
: param value : Value to use .
: param bottom : Get to bottom value .
: param top : Get to top value .
: param to _ step : Get to intermediary value .""" | if value < 0 or value > 1 :
raise ValueError ( "out of range" )
if value == 0.0 :
bottom ( )
elif value == 1.0 :
top ( )
else :
to_step ( value )
setattr ( self , attr , value ) |
def parse_cgn_postag ( rawtag , raisefeatureexceptions = False ) :
global subsets , constraints
"""decodes PoS features like " N ( soort , ev , basis , onz , stan ) " into a PosAnnotation data structure
based on CGN tag overview compiled by Matje van de Camp""" | begin = rawtag . find ( '(' )
if rawtag [ - 1 ] == ')' and begin > 0 :
tag = folia . PosAnnotation ( None , cls = rawtag , set = 'http://ilk.uvt.nl/folia/sets/cgn' )
head = rawtag [ 0 : begin ]
tag . append ( folia . Feature , subset = 'head' , cls = head )
rawfeatures = rawtag [ begin + 1 : - 1 ] . split ( ',' )
for rawfeature in rawfeatures :
if rawfeature :
found = False
for subset , classes in subsets . items ( ) :
if rawfeature in classes :
if subset in constraints :
if not head in constraints [ subset ] :
continue
# constraint not met !
found = True
tag . append ( folia . Feature , subset = subset , cls = rawfeature )
break
if not found :
print ( "\t\tUnknown feature value: " + rawfeature + " in " + rawtag , file = stderr )
if raisefeatureexceptions :
raise InvalidFeatureException ( "Unknown feature value: " + rawfeature + " in " + rawtag )
else :
continue
return tag
else :
raise InvalidTagException ( "Not a valid CGN tag" ) |
def init_config ( cls ) :
"""Initialize Gandi CLI configuration .
Create global configuration directory with API credentials""" | try : # first load current conf and only overwrite needed params
# we don ' t want to reset everything
config_file = os . path . expanduser ( cls . home_config )
config = cls . load ( config_file , 'global' )
cls . _del ( 'global' , 'api.env' )
hidden_apikey = '%s...' % cls . get ( 'api.key' , '' ) [ : 6 ]
apikey = click . prompt ( 'Api key (xmlrpc)' , default = hidden_apikey )
if apikey == hidden_apikey : # if default value then use actual value not hidden one
apikey = cls . get ( 'api.key' )
env_choice = click . Choice ( list ( cls . apienvs . keys ( ) ) )
apienv = click . prompt ( 'Environnment [production]/ote' , default = cls . default_apienv , type = env_choice , show_default = False )
sshkey = click . prompt ( 'SSH keyfile' , default = '~/.ssh/id_rsa.pub' )
hidden_apikeyrest = '%s...' % cls . get ( 'apirest.key' , '' ) [ : 6 ]
apikeyrest = click . prompt ( 'Api key (REST)' , default = hidden_apikeyrest )
if apikeyrest == hidden_apikeyrest : # if default value then use actual value not hidden one
apikeyrest = cls . get ( 'apirest.key' )
config . update ( { 'api' : { 'key' : apikey , 'host' : cls . apienvs [ apienv ] } , } )
if apikeyrest :
config . update ( { 'apirest' : { 'key' : apikeyrest } , } )
if sshkey is not None :
sshkey_file = os . path . expanduser ( sshkey )
if os . path . exists ( sshkey_file ) :
config [ 'sshkey' ] = [ sshkey_file ]
directory = os . path . expanduser ( os . path . dirname ( config_file ) )
if not os . path . exists ( directory ) :
mkpath ( directory , 0o700 )
# save to disk
cls . save ( config_file , config )
# load in memory
cls . load ( config_file , 'global' )
except ( KeyboardInterrupt , click . exceptions . Abort ) :
cls . echo ( 'Aborted.' )
sys . exit ( 1 ) |
def all ( ctx , fetcher_num , processor_num , result_worker_num , run_in ) :
"""Run all the components in subprocess or thread""" | ctx . obj [ 'debug' ] = False
g = ctx . obj
# FIXME : py34 cannot run components with threads
if run_in == 'subprocess' and os . name != 'nt' :
run_in = utils . run_in_subprocess
else :
run_in = utils . run_in_thread
threads = [ ]
try : # phantomjs
if not g . get ( 'phantomjs_proxy' ) :
phantomjs_config = g . config . get ( 'phantomjs' , { } )
phantomjs_config . setdefault ( 'auto_restart' , True )
threads . append ( run_in ( ctx . invoke , phantomjs , ** phantomjs_config ) )
time . sleep ( 2 )
if threads [ - 1 ] . is_alive ( ) and not g . get ( 'phantomjs_proxy' ) :
g [ 'phantomjs_proxy' ] = '127.0.0.1:%s' % phantomjs_config . get ( 'port' , 25555 )
# puppeteer
if not g . get ( 'puppeteer_proxy' ) :
puppeteer_config = g . config . get ( 'puppeteer' , { } )
puppeteer_config . setdefault ( 'auto_restart' , True )
threads . append ( run_in ( ctx . invoke , puppeteer , ** puppeteer_config ) )
time . sleep ( 2 )
if threads [ - 1 ] . is_alive ( ) and not g . get ( 'puppeteer_proxy' ) :
g [ 'puppeteer_proxy' ] = '127.0.0.1:%s' % puppeteer_config . get ( 'port' , 22222 )
# result worker
result_worker_config = g . config . get ( 'result_worker' , { } )
for i in range ( result_worker_num ) :
threads . append ( run_in ( ctx . invoke , result_worker , ** result_worker_config ) )
# processor
processor_config = g . config . get ( 'processor' , { } )
for i in range ( processor_num ) :
threads . append ( run_in ( ctx . invoke , processor , ** processor_config ) )
# fetcher
fetcher_config = g . config . get ( 'fetcher' , { } )
fetcher_config . setdefault ( 'xmlrpc_host' , '127.0.0.1' )
for i in range ( fetcher_num ) :
threads . append ( run_in ( ctx . invoke , fetcher , ** fetcher_config ) )
# scheduler
scheduler_config = g . config . get ( 'scheduler' , { } )
scheduler_config . setdefault ( 'xmlrpc_host' , '127.0.0.1' )
threads . append ( run_in ( ctx . invoke , scheduler , ** scheduler_config ) )
# running webui in main thread to make it exitable
webui_config = g . config . get ( 'webui' , { } )
webui_config . setdefault ( 'scheduler_rpc' , 'http://127.0.0.1:%s/' % g . config . get ( 'scheduler' , { } ) . get ( 'xmlrpc_port' , 23333 ) )
ctx . invoke ( webui , ** webui_config )
finally : # exit components run in threading
for each in g . instances :
each . quit ( )
# exit components run in subprocess
for each in threads :
if not each . is_alive ( ) :
continue
if hasattr ( each , 'terminate' ) :
each . terminate ( )
each . join ( ) |
def switch_format ( self , gsr ) :
"""Convert the Wharton GSR format into the studyspaces API format .""" | if "error" in gsr :
return gsr
categories = { "cid" : 1 , "name" : "Huntsman Hall" , "rooms" : [ ] }
for time in gsr [ "times" ] :
for entry in time :
entry [ "name" ] = entry [ "room_number" ]
del entry [ "room_number" ]
start_time_str = entry [ "start_time" ]
end_time = datetime . datetime . strptime ( start_time_str [ : - 6 ] , '%Y-%m-%dT%H:%M:%S' ) + datetime . timedelta ( minutes = 30 )
end_time_str = end_time . strftime ( "%Y-%m-%dT%H:%M:%S" ) + "-{}" . format ( self . get_dst_gmt_timezone ( ) )
time = { "available" : not entry [ "reserved" ] , "start" : entry [ "start_time" ] , "end" : end_time_str , }
exists = False
for room in categories [ "rooms" ] :
if room [ "name" ] == entry [ "name" ] :
room [ "times" ] . append ( time )
exists = True
if not exists :
del entry [ "booked_by_user" ]
del entry [ "building" ]
if "reservation_id" in entry :
del entry [ "reservation_id" ]
entry [ "lid" ] = 1
entry [ "gid" ] = 1
entry [ "capacity" ] = 5
entry [ "room_id" ] = int ( entry [ "id" ] )
del entry [ "id" ]
entry [ "times" ] = [ time ]
del entry [ "reserved" ]
del entry [ "end_time" ]
del entry [ "start_time" ]
categories [ "rooms" ] . append ( entry )
return { "categories" : [ categories ] , "rooms" : categories [ "rooms" ] } |
def commit_msg_hook ( argv ) :
"""Hook : for checking commit message ( prevent commit ) .""" | with open ( argv [ 1 ] , "r" , "utf-8" ) as fh :
message = "\n" . join ( filter ( lambda x : not x . startswith ( "#" ) , fh . readlines ( ) ) )
options = { "allow_empty" : True }
if not _check_message ( message , options ) :
click . echo ( "Aborting commit due to commit message errors (override with " "'git commit --no-verify')." , file = sys . stderr )
raise click . Abort
return 0 |
def get_addon_name ( addonxml ) :
'''Parses an addon name from the given addon . xml filename .''' | xml = parse ( addonxml )
addon_node = xml . getElementsByTagName ( 'addon' ) [ 0 ]
return addon_node . getAttribute ( 'name' ) |
def newComic ( self , comic ) :
"""Start new comic list in HTML .""" | if self . lastUrl is not None :
self . html . write ( u'</li>\n' )
if self . lastComic is not None :
self . html . write ( u'</ul>\n' )
self . html . write ( u'<li>%s</li>\n' % comic . name )
self . html . write ( u'<ul>\n' ) |
def new_points ( factory : IterationPointFactory , solution , weights : List [ List [ float ] ] = None ) -> List [ Tuple [ np . ndarray , List [ float ] ] ] :
"""Generate approximate set of points
Generate set of Pareto optimal solutions projecting from the Pareto optimal solution
using weights to determine the direction .
Parameters
factory :
IterationPointFactory with suitable optimization problem
solution :
Current solution from which new solutions are projected
weights :
Direction of the projection , if not given generate with
: func : random _ weights""" | from desdeo . preference . direct import DirectSpecification
points = [ ]
nof = factory . optimization_method . optimization_problem . problem . nof_objectives ( )
if not weights :
weights = random_weights ( nof , 50 * nof )
for pref in map ( lambda w : DirectSpecification ( factory . optimization_method , np . array ( w ) ) , weights ) :
points . append ( factory . result ( pref , solution ) )
return points |
def parse_template ( template_str ) :
"""Parse the SAM template .
: param template _ str : A packaged YAML or json CloudFormation template
: type template _ str : str
: return : Dictionary with keys defined in the template
: rtype : dict""" | try : # PyYAML doesn ' t support json as well as it should , so if the input
# is actually just json it is better to parse it with the standard
# json parser .
return json . loads ( template_str , object_pairs_hook = OrderedDict )
except ValueError :
yaml . SafeLoader . add_constructor ( yaml . resolver . BaseResolver . DEFAULT_MAPPING_TAG , _dict_constructor )
yaml . SafeLoader . add_multi_constructor ( '!' , intrinsics_multi_constructor )
return yaml . safe_load ( template_str ) |
def make_close_message ( code = 1000 , message = b'' ) :
"""Close the websocket , sending the specified code and message .""" | return _make_frame ( struct . pack ( '!H%ds' % len ( message ) , code , message ) , opcode = OPCODE_CLOSE ) |
def main ( ) :
"""The main function .
These are the steps performed for the data clean up :
1 . Prints the version number .
2 . Reads the configuration file ( : py : func : ` read _ config _ file ` ) .
3 . Creates a new directory with ` ` data _ clean _ up ` ` as prefix and the date
and time as suffix .
4 . Check the input file type ( ` ` bfile ` ` , ` ` tfile ` ` or ` ` file ` ` ) .
5 . Creates an intermediate directory with the section as prefix and the
script name as suffix ( inside the previous directory ) .
6 . Runs the required script in order ( according to the configuration file
section ) .
. . note : :
The main function is not responsible to check if the required files
exist . This should be done in the ` ` run ` ` functions .""" | # Getting and checking the options
args = parse_args ( )
check_args ( args )
# The directory name
dirname = "data_clean_up."
dirname += datetime . datetime . today ( ) . strftime ( "%Y-%m-%d_%H.%M.%S" )
while os . path . isdir ( dirname ) :
time . sleep ( 1 )
dirname = "data_clean_up."
dirname += datetime . datetime . today ( ) . strftime ( "%Y-%m-%d_%H.%M.%S" )
# Creating the output directory
os . mkdir ( dirname )
# Configuring the root logger
add_file_handler_to_root ( os . path . join ( dirname , "pyGenClean.log" ) )
logger . info ( "pyGenClean version {}" . format ( __version__ ) )
plink_version = get_plink_version ( )
logger . info ( "Using Plink version {}" . format ( plink_version ) )
# Reading the configuration file
logger . info ( "Reading configuration file [ {} ]" . format ( args . conf ) )
order , conf = read_config_file ( args . conf )
# Executing the data clean up
current_in = None
current_in_type = None
suffixes = None
if args . tfile is not None :
current_in = args . tfile
current_in_type = "tfile"
suffixes = ( ".tped" , ".tfam" )
elif args . bfile is not None :
current_in = args . bfile
current_in_type = "bfile"
suffixes = ( ".bed" , ".bim" , ".fam" )
else :
current_in = args . file
current_in_type = "file"
suffixes = ( ".ped" , ".map" )
# Creating the excluded files
try :
with open ( os . path . join ( dirname , "excluded_markers.txt" ) , "w" ) as o_f :
pass
with open ( os . path . join ( dirname , "excluded_samples.txt" ) , "w" ) as o_f :
pass
with open ( os . path . join ( dirname , "initial_files.txt" ) , "w" ) as o_file :
for s in suffixes :
print >> o_file , current_in + s
except IOError :
msg = "{}: cannot write summary" . format ( dirname )
raise ProgramError ( msg )
# Counting the number of markers and samples in the datafile
logger . info ( "Counting initial number of samples and markers" )
nb_markers , nb_samples = count_markers_samples ( current_in , current_in_type )
logger . info ( " - {:,d} samples" . format ( nb_samples ) )
logger . info ( " - {:,d} markers" . format ( nb_markers ) )
# Creating the result summary file containing the initial numbers
try :
with open ( os . path . join ( dirname , "results_summary.txt" ) , "w" ) as o_file :
print >> o_file , "# initial"
print >> o_file , ( "Initial number of markers\t" "{:,d}" . format ( nb_markers ) )
print >> o_file , ( "Initial number of samples\t" "{:,d}" . format ( nb_samples ) )
print >> o_file , "---"
except IOError :
msg = "{}: cannot write summary" . format ( dirname )
raise ProgramError ( msg )
latex_summaries = [ ]
steps = [ ]
descriptions = [ ]
long_descriptions = [ ]
graphic_paths = set ( )
for number in order : # Getting the script name and its options
script_name , options = conf [ number ]
# Getting the output prefix
output_prefix = os . path . join ( dirname , "{}_{}" . format ( number , script_name ) )
# Getting the function to use
function_to_use = available_functions [ script_name ]
# Executing the function
logger . info ( "Running {} {}" . format ( number , script_name ) )
logger . info ( " - Using {} as prefix for input " "files" . format ( current_in ) )
logger . info ( " - Results will be in [ {} ]" . format ( output_prefix ) )
# Executing the function
step_results = function_to_use ( in_prefix = current_in , in_type = current_in_type , out_prefix = output_prefix , base_dir = dirname , options = options , )
# Updating the input files and input file types
current_in = step_results . next_file
current_in_type = step_results . next_file_type
# Saving what ' s necessary for the LaTeX report
latex_summaries . append ( step_results . latex_summary )
steps . append ( script_name )
descriptions . append ( step_results . description )
long_descriptions . append ( step_results . long_description )
if step_results . graph_path is not None :
graphic_paths . update ( step_results . graph_path )
# Counting the final number of samples and markers
logger . info ( "Counting final number of samples and markers" )
nb_markers , nb_samples = count_markers_samples ( current_in , current_in_type )
logger . info ( " - {:,d} samples" . format ( nb_samples ) )
logger . info ( " - {:,d} markers" . format ( nb_markers ) )
# Getting the final suffixes
suffixes = None
if current_in_type == "tfile" :
suffixes = ( ( ".tped" , nb_markers ) , ( ".tfam" , nb_samples ) )
elif current_in_type == "bfile" :
suffixes = ( ( ".bed" , None ) , ( ".bim" , nb_markers ) , ( ".fam" , nb_samples ) )
else :
suffixes = ( ( ".ped" , nb_samples ) , ( ".map" , nb_markers ) )
with open ( os . path . join ( dirname , "final_files.txt" ) , "w" ) as o_file :
for s , nb in suffixes :
if nb :
print >> o_file , current_in + s + "\t{:,d}" . format ( nb )
else :
print >> o_file , current_in + s
# Generating the graphics paths file
graphic_paths_fn = None
if len ( graphic_paths ) > 0 :
try :
graphic_paths_fn = os . path . join ( dirname , "graphic_paths.txt" )
with open ( graphic_paths_fn , "w" ) as o_file :
for path in sorted ( graphic_paths ) :
print >> o_file , path
except IOError :
msg = "{}: cannot write summary" . format ( dirname )
raise ProgramError ( msg )
# We create the automatic report
logger . info ( "Generating automatic report" )
report_name = os . path . join ( dirname , "automatic_report.tex" )
auto_report . create_report ( dirname , report_name , project_name = args . report_number , steps = steps , descriptions = descriptions , graphic_paths_fn = graphic_paths_fn , long_descriptions = long_descriptions , summaries = latex_summaries , background = args . report_background , summary_fn = os . path . join ( dirname , "results_summary.txt" ) , report_title = args . report_title , report_author = args . report_author , initial_files = os . path . join ( dirname , "initial_files.txt" ) , final_files = os . path . join ( dirname , "final_files.txt" ) , final_nb_markers = "{:,d}" . format ( nb_markers ) , final_nb_samples = "{:,d}" . format ( nb_samples ) , plink_version = plink_version , ) |
def display ( self , * arg ) :
"""For simple Demo
測試用顯示樣式 。""" | print self . stock_name , self . stock_no
print '%s %s %s(%+.2f%%)' % ( self . data_date [ - 1 ] , self . raw_data [ - 1 ] , self . stock_range [ - 1 ] , self . range_per )
for i in arg :
print ' - MA%02s %.2f %s(%s)' % ( i , self . MA ( i ) , self . MAC ( i ) , self . MA_serial ( i ) [ 0 ] )
print ' - Volume: %s %s(%s)' % ( self . MAVOL ( 1 ) / 1000 , self . MACVOL ( 1 ) , self . MAVOL_serial ( 1 ) [ 0 ] )
MAO = self . MAO ( 3 , 6 )
print ' - MAO(3-6): %.2f %s(%s)' % ( MAO [ 0 ] [ 1 ] [ - 1 ] , MAO [ 1 ] , MAO [ 0 ] [ 0 ] )
print ' - RABC: %s' % self . RABC |
def get_country ( _ , data ) :
"""http : / / git . kernel . org / cgit / linux / kernel / git / jberg / iw . git / tree / scan . c ? id = v3.17 # n267.
Positional arguments :
data - - bytearray data to read .
Returns :
Dict .""" | answers = { 'Environment' : country_env_str ( chr ( data [ 2 ] ) ) }
data = data [ 3 : ]
while len ( data ) >= 3 :
triplet = ieee80211_country_ie_triplet ( data )
if triplet . ext . reg_extension_id >= IEEE80211_COUNTRY_EXTENSION_ID :
answers [ 'Extension ID' ] = triplet . ext . reg_extension_id
answers [ 'Regulatory Class' ] = triplet . ext . reg_class
answers [ 'Coverage class' ] = triplet . ext . coverage_class
answers [ 'up to dm' ] = triplet . ext . coverage_class * 450
data = data [ 3 : ]
continue
if triplet . chans . first_channel <= 14 : # 2 GHz .
end_channel = triplet . chans . first_channel + ( triplet . chans . num_channels - 1 )
else :
end_channel = triplet . chans . first_channel + ( 4 * ( triplet . chans . num_channels - 1 ) )
answers [ 'Channels dBm' ] = triplet . chans . max_power
answers [ 'Channels' ] = ( triplet . chans . first_channel , end_channel )
data = data [ 3 : ]
return answers |
def get_list ( client , list_id ) :
'''Gets the given list''' | endpoint = '/' . join ( [ client . api . Endpoints . LISTS , str ( list_id ) ] )
response = client . authenticated_request ( endpoint )
return response . json ( ) |
def _name ( self ) :
"""Define object name .""" | return "{0} {1} {2}" . format ( self . _camera . name , pretty_timestamp ( self . created_at ) , self . _attrs . get ( 'mediaDuration' ) ) |
def md_report ( self , file_path ) :
"""Generate and save MD report""" | self . logger . debug ( 'Generating MD report' )
report = self . zap . core . mdreport ( )
self . _write_report ( report , file_path ) |
def ufo2glyphs ( options ) :
"""Convert one designspace file or one or more UFOs to a Glyphs . app source file .""" | import fontTools . designspaceLib
import defcon
sources = options . designspace_file_or_UFOs
designspace_file = None
if ( len ( sources ) == 1 and sources [ 0 ] . endswith ( ".designspace" ) and os . path . isfile ( sources [ 0 ] ) ) :
designspace_file = sources [ 0 ]
designspace = fontTools . designspaceLib . DesignSpaceDocument ( )
designspace . read ( designspace_file )
object_to_read = designspace
elif all ( source . endswith ( ".ufo" ) and os . path . isdir ( source ) for source in sources ) :
ufos = [ defcon . Font ( source ) for source in sources ]
ufos . sort ( key = lambda ufo : [ # Order the masters by weight and width
ufo . info . openTypeOS2WeightClass or 400 , ufo . info . openTypeOS2WidthClass or 5 , ] )
object_to_read = ufos
else :
print ( "Please specify just one designspace file *or* one or more " "UFOs. They must end in '.designspace' or '.ufo', respectively." , file = sys . stderr , )
return 1
font = glyphsLib . to_glyphs ( object_to_read , minimize_ufo_diffs = options . no_preserve_glyphsapp_metadata )
# Make the Glyphs file more suitable for roundtrip :
font . customParameters [ "Disable Last Change" ] = options . enable_last_change
font . disablesAutomaticAlignment = options . enable_automatic_alignment
if options . output_path :
font . save ( options . output_path )
else :
if designspace_file :
filename_to_write = os . path . splitext ( designspace_file ) [ 0 ] + ".glyphs"
else :
filename_to_write = os . path . join ( os . path . dirname ( sources [ 0 ] ) , font . familyName . replace ( " " , "" ) + ".glyphs" , )
font . save ( filename_to_write ) |
def get_available_devices ( self ) :
"""Gets available devices using mbedls and self . available _ edbg _ ports .
: return : List of connected devices as dictionaries .""" | connected_devices = self . mbeds . list_mbeds ( ) if self . mbeds else [ ]
# Check non mbedOS supported devices .
# Just for backward compatible reason - is obsolete . .
edbg_ports = self . available_edbg_ports ( )
for port in edbg_ports :
connected_devices . append ( { "platform_name" : "SAM4E" , "serial_port" : port , "mount_point" : None , "target_id" : None , "baud_rate" : 460800 } )
for dev in connected_devices :
dev [ 'state' ] = "unknown"
return connected_devices |
def docx_docx_gen_text ( doc : DOCX_DOCUMENT_TYPE , config : TextProcessingConfig ) -> Iterator [ str ] : # only called if docx loaded
"""Iterate through a DOCX file and yield text .
Args :
doc : DOCX document to process
config : : class : ` TextProcessingConfig ` control object
Yields :
pieces of text ( paragraphs )""" | if in_order :
for thing in docx_docx_iter_block_items ( doc ) :
if isinstance ( thing , docx . text . paragraph . Paragraph ) :
yield docx_process_simple_text ( thing . text , config . width )
elif isinstance ( thing , docx . table . Table ) :
yield docx_process_table ( thing , config )
else :
for paragraph in doc . paragraphs :
yield docx_process_simple_text ( paragraph . text , config . width )
for table in doc . tables :
yield docx_process_table ( table , config ) |
def detectRamPorts ( stm : IfContainer , current_en : RtlSignalBase ) :
"""Detect RAM ports in If statement
: param stm : statement to detect the ram ports in
: param current _ en : curent en / clk signal""" | if stm . ifFalse or stm . elIfs :
return
for _stm in stm . ifTrue :
if isinstance ( _stm , IfContainer ) :
yield from detectRamPorts ( _stm , _stm . cond & current_en )
elif isinstance ( _stm , Assignment ) :
if isinstance ( _stm . dst . _dtype , HArray ) :
assert len ( _stm . indexes ) == 1 , "one address per RAM port"
w_addr = _stm . indexes [ 0 ]
mem = _stm . dst
yield ( RAM_WRITE , mem , w_addr , current_en , _stm . src )
elif _stm . src . hidden and len ( _stm . src . drivers ) == 1 :
op = _stm . src . drivers [ 0 ]
mem = op . operands [ 0 ]
if isinstance ( mem . _dtype , HArray ) and op . operator == AllOps . INDEX :
r_addr = op . operands [ 1 ]
if _stm . indexes :
raise NotImplementedError ( )
yield ( RAM_READ , mem , r_addr , current_en , _stm . dst ) |
def _enforce_instance ( model_or_class ) :
"""It ' s a common mistake to not initialize a
schematics class . We should handle that by just
calling the default constructor .""" | if isinstance ( model_or_class , type ) and issubclass ( model_or_class , BaseType ) :
return model_or_class ( )
return model_or_class |
def prepare_mosaic ( self , image , fov_deg , name = None ) :
"""Prepare a new ( blank ) mosaic image based on the pointing of
the parameter image""" | header = image . get_header ( )
ra_deg , dec_deg = header [ 'CRVAL1' ] , header [ 'CRVAL2' ]
data_np = image . get_data ( )
# dtype = data _ np . dtype
dtype = None
self . bg_ref = iqcalc . get_median ( data_np )
# TODO : handle skew ( differing rotation for each axis ) ?
skew_limit = self . settings . get ( 'skew_limit' , 0.1 )
( rot_deg , cdelt1 , cdelt2 ) = wcs . get_rotation_and_scale ( header , skew_threshold = skew_limit )
self . logger . debug ( "image0 rot=%f cdelt1=%f cdelt2=%f" % ( rot_deg , cdelt1 , cdelt2 ) )
# Prepare pixel scale for each axis
px_scale = ( math . fabs ( cdelt1 ) , math . fabs ( cdelt2 ) )
cdbase = [ np . sign ( cdelt1 ) , np . sign ( cdelt2 ) ]
reuse_image = self . settings . get ( 'reuse_image' , False )
if ( not reuse_image ) or ( self . img_mosaic is None ) :
self . logger . debug ( "creating blank image to hold mosaic" )
self . fv . gui_do ( self . _prepare_mosaic1 , "Creating blank image..." )
# GC old mosaic
self . img_mosaic = None
img_mosaic = dp . create_blank_image ( ra_deg , dec_deg , fov_deg , px_scale , rot_deg , cdbase = cdbase , logger = self . logger , pfx = 'mosaic' , dtype = dtype )
if name is not None :
img_mosaic . set ( name = name )
imname = img_mosaic . get ( 'name' , image . get ( 'name' , "NoName" ) )
self . logger . debug ( "mosaic name is '%s'" % ( imname ) )
# avoid making a thumbnail of this if seed image is also that way
nothumb = not self . settings . get ( 'make_thumbs' , False )
if nothumb :
img_mosaic . set ( nothumb = True )
# image is not on disk , set indication for other plugins
img_mosaic . set ( path = None )
# TODO : fill in interesting / select object headers from seed image
self . img_mosaic = img_mosaic
self . logger . info ( "adding mosaic image '%s' to channel" % ( imname ) )
self . fv . gui_call ( self . fv . add_image , imname , img_mosaic , chname = self . mosaic_chname )
else : # < - - reuse image ( faster )
self . logger . debug ( "Reusing previous mosaic image" )
self . fv . gui_do ( self . _prepare_mosaic1 , "Reusing previous mosaic image..." )
img_mosaic = dp . recycle_image ( self . img_mosaic , ra_deg , dec_deg , fov_deg , px_scale , rot_deg , cdbase = cdbase , logger = self . logger , pfx = 'mosaic' )
header = img_mosaic . get_header ( )
( rot , cdelt1 , cdelt2 ) = wcs . get_rotation_and_scale ( header , skew_threshold = skew_limit )
self . logger . debug ( "mosaic rot=%f cdelt1=%f cdelt2=%f" % ( rot , cdelt1 , cdelt2 ) )
return img_mosaic |
def destroy_server ( server_id ) :
'''Given a UUID id of a div removed or replaced in the Jupyter
notebook , destroy the corresponding server sessions and stop it .''' | server = curstate ( ) . uuid_to_server . get ( server_id , None )
if server is None :
log . debug ( "No server instance found for uuid: %r" % server_id )
return
try :
for session in server . get_sessions ( ) :
session . destroy ( )
server . stop ( )
del curstate ( ) . uuid_to_server [ server_id ]
except Exception as e :
log . debug ( "Could not destroy server for id %r: %s" % ( server_id , e ) ) |
def get_header ( uri ) :
"""Pull a FITS header from observation at the given URI
@ param uri : The URI of the image in VOSpace .""" | if uri not in astheaders :
astheaders [ uri ] = get_hdu ( uri , cutout = "[1:1,1:1]" ) [ 0 ] . header
return astheaders [ uri ] |
def to_ip ( self , values , from_unit ) :
"""Return values in IP and the units to which the values have been converted .""" | if from_unit in self . ip_units :
return values , from_unit
elif from_unit == 'tonne' :
return self . to_unit ( values , 'ton' , from_unit ) , 'ton'
else :
return self . to_unit ( values , 'lb' , from_unit ) , 'lb' |
def shift ( self , amount ) :
"""shifts position""" | if self . left is not None :
self . left += amount
if self . left is not None :
self . right += amount |
def com_google_fonts_check_fontdata_namecheck ( ttFont , familyname ) :
"""Familyname must be unique according to namecheck . fontdata . com""" | FB_ISSUE_TRACKER = "https://github.com/googlefonts/fontbakery/issues"
import requests
url = f"http://namecheck.fontdata.com/?q={familyname}"
try :
response = requests . get ( url , timeout = 10 )
data = response . content . decode ( "utf-8" )
if "fonts by that exact name" in data :
yield INFO , ( "The family name '{}' seem to be already in use.\n" "Please visit {} for more info." ) . format ( familyname , url )
else :
yield PASS , "Font familyname seems to be unique."
except :
yield ERROR , ( "Failed to access: '{}'.\n" "Please report this issue at:\n{}" ) . format ( url , FB_ISSUE_TRACKER ) |
def handle_CR ( self , value ) :
"""Parses cited references .""" | citation = self . entry_class ( )
value = strip_tags ( value )
# First - author name and publication date .
ptn = '([\w\s\W]+),\s([0-9]{4}),\s([\w\s]+)'
ny_match = re . match ( ptn , value , flags = re . U )
nj_match = re . match ( '([\w\s\W]+),\s([\w\s]+)' , value , flags = re . U )
if ny_match is not None :
name_raw , date , journal = ny_match . groups ( )
elif nj_match is not None :
name_raw , journal = nj_match . groups ( )
date = None
else :
return
datematch = re . match ( '([0-9]{4})' , value )
if datematch :
date = datematch . group ( 1 )
name_raw = None
if name_raw :
name_tokens = [ t . replace ( '.' , '' ) for t in name_raw . split ( ' ' ) ]
if len ( name_tokens ) > 4 or value . startswith ( '*' ) : # Probably not a person .
proc = lambda x : _strip_punctuation ( x )
aulast = ' ' . join ( [ proc ( n ) for n in name_tokens ] ) . upper ( )
auinit = ''
elif len ( name_tokens ) > 0 :
aulast = name_tokens [ 0 ] . upper ( )
proc = lambda x : _space_sep ( _strip_punctuation ( x ) )
auinit = ' ' . join ( [ proc ( n ) for n in name_tokens [ 1 : ] ] ) . upper ( )
else :
aulast = name_tokens [ 0 ] . upper ( )
auinit = ''
setattr ( citation , 'authors_init' , [ ( aulast , auinit ) ] )
if date :
date = int ( date )
setattr ( citation , 'date' , date )
setattr ( citation , 'journal' , journal )
# Volume .
v_match = re . search ( '\,\s+V([0-9A-Za-z]+)' , value )
if v_match is not None :
volume = v_match . group ( 1 )
else :
volume = None
setattr ( citation , 'volume' , volume )
# Start page .
p_match = re . search ( '\,\s+[Pp]([0-9A-Za-z]+)' , value )
if p_match is not None :
page = p_match . group ( 1 )
else :
page = None
setattr ( citation , 'pageStart' , page )
# DOI .
doi_match = re . search ( 'DOI\s(.*)' , value )
if doi_match is not None :
doi = doi_match . group ( 1 )
else :
doi = None
setattr ( citation , 'doi' , doi )
return citation |
def do_refresh ( self , args ) :
"""Refresh the view of the log group""" | print "stackResource: {}" . format ( self . stackResource )
self . roleDetails = AwsConnectionFactory . getIamClient ( ) . get_role ( RoleName = self . stackResource . physical_resource_id )
print "== role details =="
pprint ( self . roleDetails )
self . rolePolicies = self . loadRolePolicies ( )
print "== attached policies =="
pprint ( self . rolePolicies ) |
def parse_xml_report ( cls , conf , path ) :
"""Parse the ivy xml report corresponding to the name passed to ivy .
: API : public
: param string conf : the ivy conf name ( e . g . " default " )
: param string path : The path to the ivy report file .
: returns : The info in the xml report .
: rtype : : class : ` IvyInfo `
: raises : : class : ` IvyResolveMappingError ` if no report exists .""" | if not os . path . exists ( path ) :
raise cls . IvyResolveReportError ( 'Missing expected ivy output file {}' . format ( path ) )
logger . debug ( "Parsing ivy report {}" . format ( path ) )
ret = IvyInfo ( conf )
etree = ET . parse ( path )
doc = etree . getroot ( )
for module in doc . findall ( 'dependencies/module' ) :
org = module . get ( 'organisation' )
name = module . get ( 'name' )
for revision in module . findall ( 'revision' ) :
rev = revision . get ( 'name' )
callers = [ ]
for caller in revision . findall ( 'caller' ) :
callers . append ( IvyModuleRef ( caller . get ( 'organisation' ) , caller . get ( 'name' ) , caller . get ( 'callerrev' ) ) )
for artifact in revision . findall ( 'artifacts/artifact' ) :
classifier = artifact . get ( 'extra-classifier' )
ext = artifact . get ( 'ext' )
ivy_module_ref = IvyModuleRef ( org = org , name = name , rev = rev , classifier = classifier , ext = ext )
artifact_cache_path = artifact . get ( 'location' )
ivy_module = IvyModule ( ivy_module_ref , artifact_cache_path , tuple ( callers ) )
ret . add_module ( ivy_module )
return ret |
def _replace_bm ( self ) :
"""Replace ` ` _ block _ matcher ` ` with current values .""" | self . _block_matcher = cv2 . StereoSGBM ( minDisparity = self . _min_disparity , numDisparities = self . _num_disp , SADWindowSize = self . _sad_window_size , uniquenessRatio = self . _uniqueness , speckleWindowSize = self . _speckle_window_size , speckleRange = self . _speckle_range , disp12MaxDiff = self . _max_disparity , P1 = self . _P1 , P2 = self . _P2 , fullDP = self . _full_dp ) |
def from_bytes ( cls , bitstream ) :
'''Parse the given packet and update properties accordingly''' | packet = cls ( )
# Convert to ConstBitStream ( if not already provided )
if not isinstance ( bitstream , ConstBitStream ) :
if isinstance ( bitstream , Bits ) :
bitstream = ConstBitStream ( auto = bitstream )
else :
bitstream = ConstBitStream ( bytes = bitstream )
# Read the type
type_nr = bitstream . read ( 'uint:4' )
if type_nr != packet . message_type :
msg = 'Invalid bitstream for a {0} packet'
class_name = packet . __class__ . __name__
raise ValueError ( msg . format ( class_name ) )
# Read the flags
( packet . probe , packet . enlra_enabled , packet . security ) = bitstream . readlist ( '3*bool' )
# Skip reserved bits
packet . _reserved1 = bitstream . read ( 17 )
# Store the record count until we need it
record_count = bitstream . read ( 'uint:8' )
# Read the nonce
packet . nonce = bitstream . read ( 'bytes:8' )
# Read the records
for dummy in range ( record_count ) :
record = MapReplyRecord . from_bytes ( bitstream )
packet . records . append ( record )
# If the security flag is set then there should be security data left
# TODO : deal with security flag [ LISP - Security ]
if packet . security :
raise NotImplementedError ( 'Handling security data is not ' + 'implemented yet' )
# Verify that the properties make sense
packet . sanitize ( )
return packet |
def calcMz ( self , specfiles = None , guessCharge = True , obsMzKey = 'obsMz' ) :
"""Calculate the exact mass for ` ` Sii ` ` elements from the
` ` Sii . peptide ` ` sequence .
: param specfiles : the name of an ms - run file or a list of names . If None
all specfiles are selected .
: param guessCharge : bool , True if the charge should be guessed if the
attribute ` ` charge ` ` is missing from ` ` Sii ` ` . Uses the calculated
peptide mass and the observed m / z value to calculate the charge .
: param obsMzKey : attribute name of the observed m / z value in ` ` Sii ` ` .""" | # TODO : important to test function , since changes were made
_calcMass = maspy . peptidemethods . calcPeptideMass
_calcMzFromMass = maspy . peptidemethods . calcMzFromMass
_massProton = maspy . constants . atomicMassProton
_guessCharge = lambda mass , mz : round ( mass / ( mz - _massProton ) , 0 )
if specfiles is None :
specfiles = [ _ for _ in viewkeys ( self . info ) ]
else :
specfiles = aux . toList ( specfiles )
tempMasses = dict ( )
for specfile in specfiles :
if specfile not in self . info :
warntext = 'Error while calling "SiiContainer.calcMz()": ' '"%s" is not present in "SiiContainer.info"!' % ( specfile , )
warnings . warn ( warntext )
else :
for sii in self . getItems ( specfiles = specfile ) :
peptide = sii . peptide
if peptide not in tempMasses :
if hasattr ( sii , 'diPeptide' ) :
tempMasses [ peptide ] = ( _calcMass ( sii . peptide1 ) + _calcMass ( sii . peptide2 ) )
else :
tempMasses [ peptide ] = _calcMass ( peptide )
peptideMass = tempMasses [ peptide ]
if sii . charge is not None :
sii . excMz = _calcMzFromMass ( peptideMass , sii . charge )
elif guessCharge :
guessedCharge = _guessCharge ( peptideMass , getattr ( sii , obsMzKey ) )
sii . excMz = _calcMzFromMass ( peptideMass , guessedCharge )
sii . charge = guessedCharge
else :
sii . excMz = None
del ( tempMasses ) |
def read_electrostatic_potential ( self ) :
"""Parses the eletrostatic potential for the last ionic step""" | pattern = { "ngf" : r"\s+dimension x,y,z NGXF=\s+([\.\-\d]+)\sNGYF=\s+([\.\-\d]+)\sNGZF=\s+([\.\-\d]+)" }
self . read_pattern ( pattern , postprocess = int )
self . ngf = self . data . get ( "ngf" , [ [ ] ] ) [ 0 ]
pattern = { "radii" : r"the test charge radii are((?:\s+[\.\-\d]+)+)" }
self . read_pattern ( pattern , reverse = True , terminate_on_match = True , postprocess = str )
self . sampling_radii = [ float ( f ) for f in self . data [ "radii" ] [ 0 ] [ 0 ] . split ( ) ]
header_pattern = r"\(the norm of the test charge is\s+[\.\-\d]+\)"
table_pattern = r"((?:\s+\d+\s*[\.\-\d]+)+)"
footer_pattern = r"\s+E-fermi :"
pots = self . read_table_pattern ( header_pattern , table_pattern , footer_pattern )
pots = "" . join ( itertools . chain . from_iterable ( pots ) )
pots = re . findall ( r"\s+\d+\s?([\.\-\d]+)+" , pots )
pots = [ float ( f ) for f in pots ]
self . electrostatic_potential = pots |
def is_ignored ( resource ) :
'''Check of the resource ' s URL is part of LINKCHECKING _ IGNORE _ DOMAINS''' | ignored_domains = current_app . config [ 'LINKCHECKING_IGNORE_DOMAINS' ]
url = resource . url
if url :
parsed_url = urlparse ( url )
return parsed_url . netloc in ignored_domains
return True |
def delmod_cli ( argv , alter_logger = True ) :
"""Command - line access to ` ` delmod ` ` functionality .
The ` ` delmod ` ` task deletes " on - the - fly " model information from a
Measurement Set . It is so easy to implement that a standalone
function is essentially unnecessary . Just write : :
from pwkit . environments . casa import util
cb = util . tools . calibrater ( )
cb . open ( ' datasaet . ms ' , addcorr = False , addmodel = False )
cb . delmod ( otf = True , scr = False )
cb . close ( )
If you want to delete the scratch columns , use : func : ` delcal ` . If you want
to clear the scratch columns , use : func : ` clearcal ` .""" | check_usage ( delmod_doc , argv , usageifnoargs = True )
if alter_logger :
util . logger ( )
cb = util . tools . calibrater ( )
for mspath in argv [ 1 : ] :
cb . open ( b ( mspath ) , addcorr = False , addmodel = False )
cb . delmod ( otf = True , scr = False )
cb . close ( ) |
def show ( context , id ) :
"""show ( context , id )
Show a Feeder .
> > > dcictl feeder - show [ OPTIONS ]
: param string id : ID of the feeder to show [ required ]""" | result = feeder . get ( context , id = id )
utils . format_output ( result , context . format ) |
def _validate_object_can_be_tagged_with_redactor ( self , annotated_object ) :
"""Validates that the object type can be annotated and object does not have
conflicting annotations .""" | data_type = annotated_object . data_type
name = annotated_object . name
loc = annotated_object . _ast_node . lineno , annotated_object . _ast_node . path
curr_data_type = data_type
while isinstance ( curr_data_type , Alias ) or isinstance ( curr_data_type , Nullable ) : # aliases have redactors assocaited with the type itself
if hasattr ( curr_data_type , 'redactor' ) and curr_data_type . redactor :
raise InvalidSpec ( "A redactor has already been defined for '%s' by '%s'." % ( str ( name ) , str ( curr_data_type . name ) ) , * loc )
curr_data_type = curr_data_type . data_type
if hasattr ( annotated_object , 'redactor' ) and annotated_object . redactor :
if is_map_type ( curr_data_type ) or is_list_type ( curr_data_type ) :
while True :
if is_map_type ( curr_data_type ) :
curr_data_type = curr_data_type . value_data_type
else :
curr_data_type = curr_data_type . data_type
should_continue = ( is_map_type ( curr_data_type ) or is_list_type ( curr_data_type ) or is_nullable_type ( curr_data_type ) )
if should_continue is False :
break
if is_user_defined_type ( curr_data_type ) or is_void_type ( curr_data_type ) :
raise InvalidSpec ( "Redactors can't be applied to user-defined or void types." , * loc ) |
def add_directory_digests_for_jars ( self , targets_and_jars ) :
"""For each target , get DirectoryDigests for its jars and return them zipped with the jars .
: param targets _ and _ jars : List of tuples of the form ( Target , [ pants . java . jar . jar _ dependency _ utils . ResolveJar ] )
: return : list [ tuple [ ( Target , list [ pants . java . jar . jar _ dependency _ utils . ResolveJar ] ) ]""" | targets_and_jars = list ( targets_and_jars )
if not targets_and_jars or not self . get_options ( ) . capture_snapshots :
return targets_and_jars
jar_paths = [ ]
for target , jars_to_snapshot in targets_and_jars :
for jar in jars_to_snapshot :
jar_paths . append ( fast_relpath ( jar . pants_path , get_buildroot ( ) ) )
snapshots = self . context . _scheduler . capture_snapshots ( tuple ( PathGlobsAndRoot ( PathGlobs ( [ jar ] ) , get_buildroot ( ) ) for jar in jar_paths ) )
# We want to map back the list [ Snapshot ] to targets _ and _ jars
# We assume that ( 1 ) jars _ to _ snapshot has the same number of ResolveJars as snapshots does Snapshots ,
# and that ( 2 ) capture _ snapshots preserves ordering .
digests = [ snapshot . directory_digest for snapshot in snapshots ]
digest_iterator = iter ( digests )
snapshotted_targets_and_jars = [ ]
for target , jars_to_snapshot in targets_and_jars :
snapshotted_jars = [ ResolvedJar ( coordinate = jar . coordinate , cache_path = jar . cache_path , pants_path = jar . pants_path , directory_digest = next ( digest_iterator ) ) for jar in jars_to_snapshot ]
snapshotted_targets_and_jars . append ( ( target , snapshotted_jars ) )
return snapshotted_targets_and_jars |
def _read_opt_calipso ( self , code , * , desc ) :
"""Read HOPOPT CALIPSO option .
Structure of HOPOPT CALIPSO option [ RFC 5570 ] :
| Next Header | Hdr Ext Len | Option Type | Option Length |
| CALIPSO Domain of Interpretation |
| Cmpt Length | Sens Level | Checksum ( CRC - 16 ) |
| Compartment Bitmap ( Optional ; variable length ) |
Octets Bits Name Description
0 0 hopopt . calipso . type Option Type
0 0 hopopt . calipso . type . value Option Number
0 0 hopopt . calipso . type . action Action ( 00)
0 2 hopopt . calipso . type . change Change Flag ( 0)
1 8 hopopt . calipso . length Length of Option Data
2 16 hopopt . calipso . domain CALIPSO Domain of Interpretation
6 48 hopopt . calipso . cmpt _ len Cmpt Length
7 56 hopopt . calipso . level Sens Level
8 64 hopopt . calipso . chksum Checksum ( CRC - 16)
9 72 hopopt . calipso . bitmap Compartment Bitmap""" | _type = self . _read_opt_type ( code )
_size = self . _read_unpack ( 1 )
if _size < 8 and _size % 8 != 0 :
raise ProtocolError ( f'{self.alias}: [Optno {code}] invalid format' )
_cmpt = self . _read_unpack ( 4 )
_clen = self . _read_unpack ( 1 )
if _clen % 2 != 0 :
raise ProtocolError ( f'{self.alias}: [Optno {code}] invalid format' )
_sens = self . _read_unpack ( 1 )
_csum = self . _read_fileng ( 2 )
opt = dict ( desc = desc , type = _type , length = _size + 2 , domain = _cmpt , cmpt_len = _clen * 4 , level = _sens , chksum = _csum , )
if _clen :
_bmap = list ( )
for _ in range ( _clen // 2 ) :
_bmap . append ( self . _read_binary ( 8 ) )
opt [ 'bitmap' ] = tuple ( _bmap )
_plen = _size - _clen * 4 - 8
if _plen :
self . _read_fileng ( _plen )
return opt |
def insert_line ( self , line ) :
"""Insert a new line""" | if self . current_block is not None :
self . current_block . append ( line )
else :
self . header . append ( line ) |
def fit ( self ) :
r"""Loop over distributions and find best parameter to fit the data for each
When a distribution is fitted onto the data , we populate a set of
dataframes :
- : attr : ` df _ errors ` : sum of the square errors between the data and the fitted
distribution i . e . , : math : ` \ sum _ i \ left ( Y _ i - pdf ( X _ i ) \ right ) ^ 2 `
- : attr : ` fitted _ param ` : the parameters that best fit the data
- : attr : ` fitted _ pdf ` : the PDF generated with the parameters that best fit the data
Indices of the dataframes contains the name of the distribution .""" | for distribution in self . distributions :
try : # need a subprocess to check time it takes . If too long , skip it
dist = eval ( "scipy.stats." + distribution )
# TODO here , dist . fit may take a while or just hang forever
# with some distributions . So , I thought to use signal module
# to catch the error when signal takes too long . It did not work
# presumably because another try / exception is inside the
# fit function , so I used threading with arecipe from stackoverflow
# See timed _ run function above
param = self . _timed_run ( dist . fit , distribution , args = self . _data )
# with signal , does not work . maybe because another expection is caught
pdf_fitted = dist . pdf ( self . x , * param )
# hoping the order returned by fit is the same as in pdf
self . fitted_param [ distribution ] = param [ : ]
self . fitted_pdf [ distribution ] = pdf_fitted
sq_error = pylab . sum ( ( self . fitted_pdf [ distribution ] - self . y ) ** 2 )
if self . verbose :
print ( "Fitted {} distribution with error={})" . format ( distribution , sq_error ) )
# compute some errors now
self . _fitted_errors [ distribution ] = sq_error
except Exception as err :
if self . verbose :
print ( "SKIPPED {} distribution (taking more than {} seconds)" . format ( distribution , self . timeout ) )
# if we cannot compute the error , set it to large values
# FIXME use inf
self . _fitted_errors [ distribution ] = 1e6
self . df_errors = pd . DataFrame ( { 'sumsquare_error' : self . _fitted_errors } ) |
def _get_object_parser ( self , json ) :
"""Parses a json document into a pandas object .""" | typ = self . typ
dtype = self . dtype
kwargs = { "orient" : self . orient , "dtype" : self . dtype , "convert_axes" : self . convert_axes , "convert_dates" : self . convert_dates , "keep_default_dates" : self . keep_default_dates , "numpy" : self . numpy , "precise_float" : self . precise_float , "date_unit" : self . date_unit }
obj = None
if typ == 'frame' :
obj = FrameParser ( json , ** kwargs ) . parse ( )
if typ == 'series' or obj is None :
if not isinstance ( dtype , bool ) :
kwargs [ 'dtype' ] = dtype
obj = SeriesParser ( json , ** kwargs ) . parse ( )
return obj |
def advance ( self , myDateTime ) :
"""Advances to the next value and returns an appropriate value for the given
time .
: param myDateTime : ( datetime ) when to fetch the value for
: return : ( float | int ) value for given time""" | if self . getTime ( ) == myDateTime :
out = self . next ( )
# Sometimes , the stream has no value for this field and returns None , in
# this case we ' ll use the last value as well .
if out is None :
out = self . last ( )
else :
out = self . last ( )
# If there ' s no more data , we must fetch more
if len ( self ) is 0 :
self . _fetchNextData ( )
self . _updateMinMax ( out )
if isinstance ( out , float ) :
self . _dataType = "float"
# Convert to proper data type
if self . _dataType is "float" :
out = float ( out )
else :
out = int ( out )
return out |
def receive ( self , sequence , args ) :
"""Receive one packet
If the sequence number is one we ' ve already seen before , it is dropped .
If it is not the next expected sequence number , it is put into the
_ out _ of _ order queue to be processed once the holes in sequence number
are filled in .
Args :
sequence ( int ) : The sequence number of the received packet
args ( list ) : The list of packet contents that will be passed to callback
as callback ( * args )""" | # If we are told to ignore sequence numbers , just pass the packet on
if not self . _reorder :
self . _callback ( * args )
return
# If this packet is in the past , drop it
if self . _next_expected is not None and sequence < self . _next_expected :
print ( "Dropping out of order packet, seq=%d" % sequence )
return
self . _out_of_order . append ( ( sequence , args ) )
self . _out_of_order . sort ( key = lambda x : x [ 0 ] )
# If we have received packets , attempt to process them in order
while len ( self . _out_of_order ) > 0 :
seq , args = self . _out_of_order [ 0 ]
if self . _next_expected is not None and seq != self . _next_expected :
return
self . _callback ( * args )
self . _out_of_order . pop ( 0 )
self . _next_expected = seq + 1 |
def downsampled_mesh ( self , step ) :
"""Returns a downsampled copy of this mesh .
Args :
step : the step size for the sampling
Returns :
a new , downsampled Mesh object .
Raises :
ValueError if this Mesh has faces .""" | from lace . mesh import Mesh
if self . f is not None :
raise ValueError ( 'Function `downsampled_mesh` does not support faces.' )
low = Mesh ( )
if self . v is not None :
low . v = self . v [ : : step ]
if self . vc is not None :
low . vc = self . vc [ : : step ]
return low |
def edit ( self , physicalPath , cleanupMode , maxFileAge , description ) :
"""The server directory ' s edit operation allows you to change the path
and clean up properties of the directory . This operation updates
the GIS service configurations ( and points them to the new path )
that are using this directory , causing them to restart . It is
therefore recommended that any edit to the server directories be
performed when the server is not under load .
This operation is mostly used when growing a single machine site to
a multiple machine site configuration , which requires that the
server directories and configuration store be put on a
network - accessible file share .
Inputs :
physicalPath - The absolute physical path of the server
directory .
cleanupMode - Defines if files in the server directory needs to
be cleaned up . The default is NONE .
maxFileAge - Defines how long a file in the directory needs to
be kept before it is deleted .
description - An optional description for the server directory""" | url = self . _url + "/edit"
params = { "f" : "json" , "physicalPath" : physicalPath , "cleanupMode" : cleanupMode , "maxFileAge" : maxFileAge , "description" : description }
return self . _post ( url = url , param_dict = params , securityHandler = self . _securityHandler , proxy_url = self . _proxy_url , proxy_port = self . _proxy_port ) |
def versions ( self ) :
"""Announce Versions of CLI and Server
Args : None
Returns :
The running versions of both the CLI and the Workbench Server""" | print '%s<<< Workbench CLI Version %s >>>%s' % ( color . LightBlue , self . version , color . Normal )
print self . workbench . help ( 'version' ) |
def commit ( self ) :
"""Commit dirty records to the server . This method is automatically
called when the ` auto _ commit ` option is set to ` True ` ( default ) .
It can be useful to set the former option to ` False ` to get better
performance by reducing the number of RPC requests generated .
With ` auto _ commit ` set to ` True ` ( default behaviour ) , each time a value
is set on a record field a RPC request is sent to the server to update
the record :
. . doctest : :
> > > user = odoo . env . user
> > > user . name = " Joe " # write ( { ' name ' : " Joe " } )
> > > user . email = " joe @ odoo . net " # write ( { ' email ' : " joe @ odoo . net " } )
With ` auto _ commit ` set to ` False ` , changes on a record are sent all at
once when calling the : func : ` commit ` method :
. . doctest : :
> > > odoo . config [ ' auto _ commit ' ] = False
> > > user = odoo . env . user
> > > user . name = " Joe "
> > > user . email = " joe @ odoo . net "
> > > user in odoo . env . dirty
True
> > > odoo . env . commit ( ) # write ( { ' name ' : " Joe " , ' email ' : " joe @ odoo . net " } )
> > > user in odoo . env . dirty
False
Only one RPC request is generated in the last case .""" | # Iterate on a new set , as we remove record during iteration from the
# original one
for record in set ( self . dirty ) :
values = { }
for field in record . _values_to_write :
if record . id in record . _values_to_write [ field ] :
value = record . _values_to_write [ field ] . pop ( record . id )
values [ field ] = value
# Store the value in the ' _ values ' dictionary . This
# operation is delegated to each field descriptor as some
# values can not be stored " as is " ( e . g . magic tuples of
# 2many fields need to be converted )
record . __class__ . __dict__ [ field ] . store ( record , value )
record . write ( values )
self . dirty . remove ( record ) |
def cropped ( self , T0 , T1 ) :
"""returns a cropped copy of the path .""" | assert 0 <= T0 <= 1 and 0 <= T1 <= 1
assert T0 != T1
assert not ( T0 == 1 and T1 == 0 )
if T0 == 1 and 0 < T1 < 1 and self . isclosed ( ) :
return self . cropped ( 0 , T1 )
if T1 == 1 :
seg1 = self [ - 1 ]
t_seg1 = 1
i1 = len ( self ) - 1
else :
seg1_idx , t_seg1 = self . T2t ( T1 )
seg1 = self [ seg1_idx ]
if np . isclose ( t_seg1 , 0 ) :
i1 = ( self . index ( seg1 ) - 1 ) % len ( self )
seg1 = self [ i1 ]
t_seg1 = 1
else :
i1 = self . index ( seg1 )
if T0 == 0 :
seg0 = self [ 0 ]
t_seg0 = 0
i0 = 0
else :
seg0_idx , t_seg0 = self . T2t ( T0 )
seg0 = self [ seg0_idx ]
if np . isclose ( t_seg0 , 1 ) :
i0 = ( self . index ( seg0 ) + 1 ) % len ( self )
seg0 = self [ i0 ]
t_seg0 = 0
else :
i0 = self . index ( seg0 )
if T0 < T1 and i0 == i1 :
new_path = Path ( seg0 . cropped ( t_seg0 , t_seg1 ) )
else :
new_path = Path ( seg0 . cropped ( t_seg0 , 1 ) )
# T1 < T0 must cross discontinuity case
if T1 < T0 :
if not self . isclosed ( ) :
raise ValueError ( "This path is not closed, thus T0 must " "be less than T1." )
else :
for i in range ( i0 + 1 , len ( self ) ) :
new_path . append ( self [ i ] )
for i in range ( 0 , i1 ) :
new_path . append ( self [ i ] )
# T0 < T1 straight - forward case
else :
for i in range ( i0 + 1 , i1 ) :
new_path . append ( self [ i ] )
if t_seg1 != 0 :
new_path . append ( seg1 . cropped ( 0 , t_seg1 ) )
return new_path |
def _uninstall ( cls ) :
"""uninstall the hook if installed""" | if cls . _hook :
sys . meta_path . remove ( cls . _hook )
cls . _hook = None |
def set_window_geometry ( geometry ) :
"""Set window geometry .
Parameters
geometry : tuple ( 4 integers ) or None
x , y , dx , dy values employed to set the Qt backend geometry .""" | if geometry is not None :
x_geom , y_geom , dx_geom , dy_geom = geometry
mngr = plt . get_current_fig_manager ( )
if 'window' in dir ( mngr ) :
try :
mngr . window . setGeometry ( x_geom , y_geom , dx_geom , dy_geom )
except AttributeError :
pass
else :
pass |
async def restart ( request : web . Request ) -> web . Response :
"""Restart the robot .
Blocks while the restart lock is held .""" | async with request . app [ RESTART_LOCK_NAME ] :
asyncio . get_event_loop ( ) . call_later ( 1 , _do_restart )
return web . json_response ( { 'message' : 'Restarting in 1s' } , status = 200 ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.