signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def NoSuchEntityOk ( f ) :
"""Decorator to remove NoSuchEntity exceptions , and raises all others .""" | def ExceptionFilter ( * args ) :
try :
return f ( * args )
except boto . exception . BotoServerError as e :
if e . error_code == 'NoSuchEntity' :
pass
else :
raise
except :
raise
return False
return ExceptionFilter |
def save ( self ) :
"""modified from suvi code by vhsu""" | pri_hdu = fits . PrimaryHDU ( data = self . thmap )
# Temporal Information
date_fmt = '%Y-%m-%dT%H:%M:%S.%f'
date_beg = self . start_time . strftime ( date_fmt )
date_end = self . end_time . strftime ( date_fmt )
date_now = datetime . utcnow ( ) . strftime ( date_fmt )
self . set_fits_header ( "TIMESYS" , self . ref_hdr , pri_hdu )
pri_hdu . header . append ( ( "DATE-BEG" , date_beg , "sun observation start time on sat" ) )
pri_hdu . header . append ( ( "DATE-END" , date_end , "sun observation end time on sat" ) )
pri_hdu . header . append ( ( "DATE" , date_now , "file generation time" ) )
pri_hdu . header . append ( ( "EXPERT" , self . config . expert , "person who labeled image" ) )
pri_hdu . header . append ( ( "DATE-LAB" , date_now , "date of labeling for the image" ) )
# Instrument & Spacecraft State during Observation
pri_hdu . header . append ( ( "EXPTIME" , 1. , "[s] effective imaging exposure time" ) )
self . set_fits_header ( "YAW_FLIP" , self . ref_hdr , pri_hdu )
self . set_fits_header ( "ECLIPSE" , self . ref_hdr , pri_hdu )
# Pointing & Projection
self . set_fits_header ( "WCSNAME" , self . ref_hdr , pri_hdu )
self . set_fits_header ( "CTYPE1" , self . ref_hdr , pri_hdu )
self . set_fits_header ( "CTYPE2" , self . ref_hdr , pri_hdu )
self . set_fits_header ( "CUNIT1" , self . ref_hdr , pri_hdu )
self . set_fits_header ( "CUNIT2" , self . ref_hdr , pri_hdu )
self . set_fits_header ( "PC1_1" , self . ref_hdr , pri_hdu )
self . set_fits_header ( "PC1_2" , self . ref_hdr , pri_hdu )
self . set_fits_header ( "PC2_1" , self . ref_hdr , pri_hdu )
self . set_fits_header ( "PC2_2" , self . ref_hdr , pri_hdu )
self . set_fits_header ( "CDELT1" , self . ref_hdr , pri_hdu )
self . set_fits_header ( "CDELT2" , self . ref_hdr , pri_hdu )
self . set_fits_header ( "CRVAL1" , self . ref_hdr , pri_hdu )
self . set_fits_header ( "CRVAL2" , self . ref_hdr , pri_hdu )
self . set_fits_header ( "CRPIX1" , self . ref_hdr , pri_hdu )
self . set_fits_header ( "CRPIX2" , self . ref_hdr , pri_hdu )
self . set_fits_header ( "DIAM_SUN" , self . ref_hdr , pri_hdu )
self . set_fits_header ( "LONPOLE" , self . ref_hdr , pri_hdu )
self . set_fits_header ( "CROTA" , self . ref_hdr , pri_hdu )
self . set_fits_header ( "SOLAR_B0" , self . ref_hdr , pri_hdu )
# File Provenance
pri_hdu . header . append ( ( "TITLE" , "Expert Labeled Thematic Map Image" , "image title" ) )
pri_hdu . header . append ( ( "MAP_MTHD" , "human" , "thematic map classifier method" ) )
try : # Add COMMENT cards
pri_hdu . header . insert ( pri_hdu . header . index ( "TITLE" ) + 1 , ( "COMMENT" , '------------------------------------------------------------------------' ) )
pri_hdu . header . insert ( pri_hdu . header . index ( "TITLE" ) + 2 , ( "COMMENT" , 'USING SUVI THEMATIC MAP FILES' ) )
pri_hdu . header . insert ( pri_hdu . header . index ( "TITLE" ) + 3 , ( "COMMENT" , '------------------------------------------------------------------------' ) )
pri_hdu . header . insert ( pri_hdu . header . index ( "TITLE" ) + 4 , ( "COMMENT" , 'Map labels are described in the FITS extension.' ) )
pri_hdu . header . insert ( pri_hdu . header . index ( "TITLE" ) + 5 , ( "COMMENT" , 'Example:' ) )
pri_hdu . header . insert ( pri_hdu . header . index ( "TITLE" ) + 6 , ( "COMMENT" , 'from astropy.io import fits as pyfits' ) )
pri_hdu . header . insert ( pri_hdu . header . index ( "TITLE" ) + 7 , ( "COMMENT" , 'img = pyfits.open(<filename>)' ) )
pri_hdu . header . insert ( pri_hdu . header . index ( "TITLE" ) + 8 , ( "COMMENT" , 'map_labels = img[1].data' ) )
pri_hdu . header . insert ( pri_hdu . header . index ( "TITLE" ) + 9 , ( "COMMENT" , '------------------------------------------------------------------------' ) )
pri_hdu . header . insert ( pri_hdu . header . index ( "TITLE" ) + 10 , ( "COMMENT" , 'TEMPORAL INFORMATION' ) )
pri_hdu . header . insert ( pri_hdu . header . index ( "TITLE" ) + 11 , ( "COMMENT" , '------------------------------------------------------------------------' ) )
pri_hdu . header . insert ( pri_hdu . header . index ( "DATE" ) + 1 , ( "COMMENT" , '------------------------------------------------------------------------' ) )
pri_hdu . header . insert ( pri_hdu . header . index ( "DATE" ) + 2 , ( "COMMENT" , 'INSTRUMENT & SPACECRAFT STATE DURING OBSERVATION' ) )
pri_hdu . header . insert ( pri_hdu . header . index ( "DATE" ) + 3 , ( "COMMENT" , '------------------------------------------------------------------------' ) )
pri_hdu . header . insert ( pri_hdu . header . index ( "ECLIPSE" ) + 1 , ( "COMMENT" , '------------------------------------------------------------------------' ) )
pri_hdu . header . insert ( pri_hdu . header . index ( "ECLIPSE" ) + 2 , ( "COMMENT" , 'POINTING & PROJECTION' ) )
pri_hdu . header . insert ( pri_hdu . header . index ( "ECLIPSE" ) + 3 , ( "COMMENT" , '------------------------------------------------------------------------' ) )
pri_hdu . header . insert ( pri_hdu . header . index ( "SOLAR_B0" ) + 1 , ( "COMMENT" , '------------------------------------------------------------------------' ) )
pri_hdu . header . insert ( pri_hdu . header . index ( "SOLAR_B0" ) + 2 , ( "COMMENT" , 'FILE PROVENANCE' ) )
pri_hdu . header . insert ( pri_hdu . header . index ( "SOLAR_B0" ) + 3 , ( "COMMENT" , '------------------------------------------------------------------------' ) )
except :
print ( "This thematic map may be degraded and missing many keywords." )
# Thematic map feature list ( Secondary HDU extension )
map_val = [ ]
map_label = [ ]
for key , value in self . config . solar_class_index . items ( ) : # sorted ( SOLAR _ CLASS _ INDEX . items ( ) , key = lambda p : ( lambda k , v : ( v , k ) ) ) :
map_label . append ( key )
map_val . append ( value )
c1 = fits . Column ( name = "Thematic Map Value" , format = "B" , array = np . array ( map_val ) )
c2 = fits . Column ( name = "Feature Name" , format = "22A" , array = np . array ( map_label ) )
bintbl_hdr = fits . Header ( [ ( "XTENSION" , "BINTABLE" ) ] )
sec_hdu = fits . BinTableHDU . from_columns ( [ c1 , c2 ] , header = bintbl_hdr )
# Output thematic map as the primary HDU and the list of map features as an extension BinTable HDU
hdu = fits . HDUList ( [ pri_hdu , sec_hdu ] )
hdu . writeto ( self . filename , overwrite = True , checksum = True ) |
def _export_section ( sections , pc ) :
"""Switch chron data to index - by - number
: param dict sections : Metadata
: return list _ sections : Metadata""" | logger_jsons . info ( "enter export_data: {}" . format ( pc ) )
_sections = [ ]
for name , section in sections . items ( ) : # Process chron models
if "model" in section :
section [ "model" ] = _export_model ( section [ "model" ] )
# Process the chron measurement table
if "measurementTable" in section :
section [ "measurementTable" ] = _idx_table_by_num ( section [ "measurementTable" ] )
# Add only the table to the output list
_sections . append ( section )
logger_jsons . info ( "exit export_data: {}" . format ( pc ) )
return _sections |
def watch_pending_transactions ( self , callback ) :
'''Callback will receive one argument : the transaction object just observed
This is equivalent to ` eth . filter ( ' pending ' ) `''' | self . pending_tx_watchers . append ( callback )
if len ( self . pending_tx_watchers ) == 1 :
eth . filter ( 'pending' ) . watch ( self . _new_pending_tx ) |
def get_ngrams ( self , minimum , maximum , filter_ngrams ) :
"""Returns a generator supplying the n - grams ( ` minimum ` < = n
< = ` maximum ` ) for this text .
Each iteration of the generator supplies a tuple consisting of
the size of the n - grams and a ` collections . Counter ` of the
n - grams .
: param minimum : minimum n - gram size
: type minimum : ` int `
: param maximum : maximum n - gram size
: type maximum : ` int `
: param filter _ ngrams : n - grams that must be contained by the generated
n - grams
: type filter _ ngrams : ` list `
: rtype : ` generator `""" | tokens = self . get_tokens ( )
filter_pattern = self . get_filter_ngrams_pattern ( filter_ngrams )
for size in range ( minimum , maximum + 1 ) :
ngrams = collections . Counter ( self . _ngrams ( tokens , size , filter_pattern ) )
yield ( size , ngrams ) |
def main ( ) :
"""Executes the given command . Returns error _ message if command is not valid .
Returns :
Output of the given command or error message if command is not valid .""" | try :
command = sys . argv [ 1 ]
except IndexError :
return error_message ( )
try :
module = importlib . import_module ( 'i18n.%s' % command )
module . main . args = sys . argv [ 2 : ]
except ( ImportError , AttributeError ) :
return error_message ( )
return module . main ( ) |
def dict_to_object ( source ) :
"""Returns an object with the key - value pairs in source as attributes .""" | target = inspectable_class . InspectableClass ( )
for k , v in source . items ( ) :
setattr ( target , k , v )
return target |
def offer ( self , requestType , * args ) :
"""public interface to the reactor .
: param requestType :
: param args :
: return :""" | if self . _funcsByRequest . get ( requestType ) is not None :
self . _workQueue . put ( ( requestType , list ( * args ) ) )
else :
logger . error ( "Ignoring unknown request on reactor " + self . _name + " " + requestType ) |
def document ( self , wrapper ) :
"""Get the document root . For I { document / literal } , this is the name of the
wrapper element qualified by the schema ' s target namespace .
@ param wrapper : The method name .
@ type wrapper : L { xsd . sxbase . SchemaObject }
@ return : A root element .
@ rtype : L { Element }""" | tag = wrapper [ 1 ] . name
ns = wrapper [ 1 ] . namespace ( "ns0" )
return Element ( tag , ns = ns ) |
def check_lazy_load_wegsegment ( f ) :
'''Decorator function to lazy load a : class : ` Wegsegment ` .''' | def wrapper ( * args ) :
wegsegment = args [ 0 ]
if ( wegsegment . _methode_id is None or wegsegment . _geometrie is None or wegsegment . _metadata is None ) :
log . debug ( 'Lazy loading Wegsegment %d' , wegsegment . id )
wegsegment . check_gateway ( )
w = wegsegment . gateway . get_wegsegment_by_id ( wegsegment . id )
wegsegment . _methode_id = w . _methode_id
wegsegment . _geometrie = w . _geometrie
wegsegment . _metadata = w . _metadata
return f ( * args )
return wrapper |
def mask_and_mean_loss ( input_tensor , binary_tensor , axis = None ) :
"""Mask a loss by using a tensor filled with 0 or 1 and average correctly .
: param input _ tensor : A float tensor of shape [ batch _ size , . . . ] representing the loss / cross _ entropy
: param binary _ tensor : A float tensor of shape [ batch _ size , . . . ] representing the mask .
: return : A float tensor of shape [ batch _ size , . . . ] representing the masked loss .
: param axis : The dimensions to reduce . If None ( the default ) , reduces all dimensions .
Must be in the range [ - rank ( input _ tensor ) , rank ( input _ tensor ) ) .""" | return mean_on_masked ( mask_loss ( input_tensor , binary_tensor ) , binary_tensor , axis = axis ) |
def simxSetObjectSelection ( clientID , objectHandles , operationMode ) :
'''Please have a look at the function description / documentation in the V - REP user manual''' | c_objectHandles = ( ct . c_int * len ( objectHandles ) ) ( * objectHandles )
return c_SetObjectSelection ( clientID , c_objectHandles , len ( objectHandles ) , operationMode ) |
def _flatten_up_to_token ( self , token ) :
"""Yields all tokens up to token but excluding current .""" | if token . is_group :
token = next ( token . flatten ( ) )
for t in self . _curr_stmt . flatten ( ) :
if t == token :
break
yield t |
def isvalid ( self ) :
"""Checks whether contents of repo are consistent with standard set .""" | gcontents = [ gf . rstrip ( '\n' ) for gf in self . repo . bake ( 'ls-files' ) ( ) ]
fcontents = os . listdir ( self . repopath )
return all ( [ sf in gcontents for sf in std_files ] ) and all ( [ sf in fcontents for sf in std_files ] ) |
def combine ( * rnf_profiles ) :
"""Combine more profiles and set their maximal values .
Args :
* rnf _ profiles ( rnftools . rnfformat . RnfProfile ) : RNF profile .""" | for rnf_profile in rnf_profiles :
self . prefix_width = max ( self . prefix_width , rnf_profile . prefix_width )
self . read_tuple_id_width = max ( self . read_tuple_id_width , rnf_profile . read_tuple_id_width )
self . genome_id_width = max ( self . genome_id_width , rnf_profile . genome_id_width )
self . chr_id_width = max ( self . chr_id_width , rnf_profile . chr_id_width )
self . coor_width = max ( self . coor_width , rnf_profile . coor_width ) |
def send_frame ( self , frame ) :
'''Send a single frame . If there is no transport or we ' re not connected
yet , append to the output buffer , else send immediately to the socket .
This is called from within the MethodFrames .''' | if self . _closed :
if self . _close_info and len ( self . _close_info [ 'reply_text' ] ) > 0 :
raise ConnectionClosed ( "connection is closed: %s : %s" % ( self . _close_info [ 'reply_code' ] , self . _close_info [ 'reply_text' ] ) )
raise ConnectionClosed ( "connection is closed" )
if self . _transport is None or ( not self . _connected and frame . channel_id != 0 ) :
self . _output_frame_buffer . append ( frame )
return
if self . _debug > 1 :
self . logger . debug ( "WRITE: %s" , frame )
buf = bytearray ( )
frame . write_frame ( buf )
if len ( buf ) > self . _frame_max :
self . close ( reply_code = 501 , reply_text = 'attempted to send frame of %d bytes, frame max %d' % ( len ( buf ) , self . _frame_max ) , class_id = 0 , method_id = 0 , disconnect = True )
raise ConnectionClosed ( "connection is closed: %s : %s" % ( self . _close_info [ 'reply_code' ] , self . _close_info [ 'reply_text' ] ) )
self . _transport . write ( buf )
self . _frames_written += 1 |
def _GetNextInterval ( self ) :
"""Returns the next Range of the file that is to be hashed .
For all fingers , inspect their next expected range , and return the
lowest uninterrupted range of interest . If the range is larger than
BLOCK _ SIZE , truncate it .
Returns :
Next range of interest in a Range namedtuple .""" | ranges = [ x . CurrentRange ( ) for x in self . fingers ]
starts = set ( [ r . start for r in ranges if r ] )
ends = set ( [ r . end for r in ranges if r ] )
if not starts :
return None
min_start = min ( starts )
starts . remove ( min_start )
ends |= starts
min_end = min ( ends )
if min_end - min_start > self . BLOCK_SIZE :
min_end = min_start + self . BLOCK_SIZE
return Range ( min_start , min_end ) |
def date_from_isoformat ( isoformat_date ) :
"""Convert an ISO - 8601 date into a ` datetime . date ` object .
Argument :
isoformat _ date ( str ) : a date in ISO - 8601 format ( YYYY - MM - DD )
Returns :
~ datetime . date : the object corresponding to the given ISO date .
Raises :
ValueError : when the date could not be converted successfully .
See Also :
` ISO - 8601 specification < https : / / en . wikipedia . org / wiki / ISO _ 8601 > ` _ .""" | year , month , day = isoformat_date . split ( '-' )
return datetime . date ( int ( year ) , int ( month ) , int ( day ) ) |
def get_crimes_location ( self , location_id , date = None ) :
"""Get crimes at a particular snap - point location . Uses the
crimes - at - location _ API call .
. . _ crimes - at - location :
https : / / data . police . uk / docs / method / crimes - at - location /
: rtype : list
: param int location _ id : The ID of the location to get crimes for .
: param date : The month in which the crimes were reported in the format
` ` YYYY - MM ` ` ( the latest date is used if ` ` None ` ` ) .
: type date : str or None
: return : A ` ` list ` ` of : class : ` Crime ` objects which were snapped to the
: class : ` Location ` with the specified ID in the given month .""" | kwargs = { 'location_id' : location_id , }
crimes = [ ]
if date is not None :
kwargs [ 'date' ] = date
for c in self . service . request ( 'GET' , 'crimes-at-location' , ** kwargs ) :
crimes . append ( Crime ( self , data = c ) )
return crimes |
def importProteinDatabase ( filePath , proteindb = None , decoyTag = '[decoy]' , contaminationTag = '[cont]' , headerParser = None , forceId = False , cleavageRule = '[KR]' , minLength = 5 , maxLength = 40 , missedCleavage = 2 , ignoreIsoleucine = False , removeNtermM = True ) :
"""Generates a : class : ` ProteinDatabase ` by in silico digestion of proteins
from a fasta file .
: param filePath : File path
: param proteindb : optional an existing : class : ` ProteinDatabase ` can be
specified , otherwise a new instance is generated and returned
: param decoyTag : If a fasta file contains decoy protein entries , they should
be specified with a sequence tag
: param contaminationTag : If a fasta file contains contamination protein
entries , they should be specified with a sequence tag
: param headerParser : optional a headerParser can be specified
# TODO : describe how a parser looks like
: param forceId : bool , if True and no id can be extracted from the fasta
header the whole header sequence is used as a protein id instead of
raising an exception .
: param cleavageRule : cleavage rule expressed in a regular expression , see
: attr : ` maspy . constants . expasy _ rules `
: param missedCleavage : number of allowed missed cleavage sites
: param removeNtermM : bool , True to consider also peptides with the
N - terminal Methionine of the protein removed
: param minLength : int , only yield peptides with length > = minLength
: param maxLength : int , only yield peptides with length < = maxLength
: param ignoreIsoleucine : bool , if True treat Isoleucine and Leucine in
peptide sequences as indistinguishable
See also : func : ` maspy . peptidemethods . digestInSilico `""" | proteindb = ProteinDatabase ( ) if proteindb is None else proteindb
fastaRead = _readFastaFile ( filePath )
for header , sequence in fastaRead :
proteinTags = list ( )
if header . startswith ( decoyTag ) :
isDecoy = True
header = header . replace ( decoyTag , '' )
proteinTags . append ( decoyTag )
else :
isDecoy = False
if header . startswith ( contaminationTag ) :
isCont = True
header = header . replace ( contaminationTag , '' )
proteinTags . append ( contaminationTag )
else :
isCont = False
headerInfo = _extractFastaHeader ( header , headerParser , forceId )
proteinId = '' . join ( itertools . chain ( proteinTags , [ headerInfo [ 'id' ] ] ) )
if 'name' in headerInfo :
proteinName = '' . join ( itertools . chain ( proteinTags , [ headerInfo [ 'name' ] ] ) )
else :
proteinName = proteinId
if proteinId not in proteindb . proteins :
protein = ProteinSequence ( proteinId , sequence )
protein . name = proteinName
protein . fastaHeader = header
protein . fastaInfo = headerInfo
proteindb . proteins [ protein . id ] = protein
# Perform the insilico digestion
_digestion = maspy . peptidemethods . digestInSilico ( sequence , cleavageRule , missedCleavage , removeNtermM , minLength , maxLength )
# Add peptides to the protein database
for unmodPeptide , info in _digestion :
if ignoreIsoleucine :
unmodPeptideNoIsoleucine = unmodPeptide . replace ( 'I' , 'L' )
if unmodPeptideNoIsoleucine in proteindb . peptides :
currPeptide = proteindb . peptides [ unmodPeptideNoIsoleucine ]
else :
currPeptide = PeptideSequence ( unmodPeptideNoIsoleucine , mc = info [ 'missedCleavage' ] )
proteindb . peptides [ unmodPeptideNoIsoleucine ] = currPeptide
if unmodPeptide not in proteindb . peptides :
proteindb . peptides [ unmodPeptide ] = currPeptide
else :
if unmodPeptide in proteindb . peptides :
currPeptide = proteindb . peptides [ unmodPeptide ]
else :
currPeptide = PeptideSequence ( unmodPeptide , mc = info [ 'missedCleavage' ] )
proteindb . peptides [ unmodPeptide ] = currPeptide
if proteinId not in currPeptide . proteins :
currPeptide . proteins . add ( proteinId )
# TODO : change that a peptide can appear multiple times in a
# protein sequence .
currPeptide . proteinPositions [ proteinId ] = ( info [ 'startPos' ] , info [ 'endPos' ] )
# Add peptide entries to the protein entries , define wheter a peptide can be
# uniquely assigend to a single protein ( . isUnique = True ) .
for peptide , peptideEntry in viewitems ( proteindb . peptides ) :
numProteinMatches = len ( peptideEntry . proteins )
if numProteinMatches == 1 :
peptideEntry . isUnique = True
elif numProteinMatches > 1 :
peptideEntry . isUnique = False
else :
raise Exception ( 'No protein matches in proteindb for peptide' + 'sequence: ' + peptide )
for proteinId in peptideEntry . proteins :
if peptideEntry . isUnique :
proteindb . proteins [ proteinId ] . uniquePeptides . add ( peptide )
else :
proteindb . proteins [ proteinId ] . sharedPeptides . add ( peptide )
# Check protein entries if the digestions generated at least one peptide that
# is uniquely assigned to the protein ( . isUnique = True )
for proteinEntry in viewvalues ( proteindb . proteins ) :
if len ( proteinEntry . uniquePeptides ) > 0 :
proteinEntry . isUnique = True
else :
proteinEntry . isUnique = False
# Note : TODO , altough isoleucin is ignored , the protein entry should only
# show the actually present ILE / LEU occurence , not any possibilities
return proteindb |
def plural_verb ( self , text , count = None ) :
"""Return the plural of text , where text is a verb .
If count supplied , then return text if count is one of :
1 , a , an , one , each , every , this , that
otherwise return the plural .
Whitespace at the start and end is preserved .""" | pre , word , post = self . partition_word ( text )
if not word :
return text
plural = self . postprocess ( word , self . _pl_special_verb ( word , count ) or self . _pl_general_verb ( word , count ) , )
return "{}{}{}" . format ( pre , plural , post ) |
def get_project_versions ( self , key , expand = None ) :
"""Contains a full representation of a the specified project ' s versions .
: param key :
: param expand : the parameters to expand
: return :""" | params = { }
if expand is not None :
params [ 'expand' ] = expand
return self . get ( 'rest/api/2/project/{}/versions' . format ( key ) , params = params ) |
def new_dataset ( self , * args , ** kwargs ) :
"""Creates a new dataset
: param args : Positional args passed to the Dataset constructor .
: param kwargs : Keyword args passed to the Dataset constructor .
: return : : class : ` ambry . orm . Dataset `
: raises : : class : ` ambry . orm . ConflictError ` if the a Dataset records already exists with the given vid""" | ds = Dataset ( * args , ** kwargs )
try :
self . session . add ( ds )
self . session . commit ( )
ds . _database = self
return ds
except IntegrityError as e :
self . session . rollback ( )
raise ConflictError ( "Can't create dataset '{}'; one probably already exists: {} " . format ( str ( ds ) , e ) ) |
def _get_from_registry ( self ) :
"""Retrieves the path to the default Java installation stored in the
Windows registry
: return : The path found in the registry , or None""" | from . _windows import reg_keys
for location in reg_keys :
location = location . replace ( '\\' , '/' )
jreKey = "/proc/registry/HKEY_LOCAL_MACHINE/{}" . format ( location )
try :
with open ( jreKey + "/CurrentVersion" ) as f :
cv = f . read ( ) . split ( '\x00' )
versionKey = jreKey + "/" + cv [ 0 ]
with open ( versionKey + "/RunTimeLib" ) as f :
cv = f . read ( ) . split ( '\x00' )
return cv [ 0 ]
except OSError :
pass
return None |
def mapred ( self , transport , inputs , query , timeout ) :
"""mapred ( inputs , query , timeout )
Executes a MapReduce query .
. . note : : This request is automatically retried : attr : ` retries `
times if it fails due to network error .
: param inputs : the input list / structure
: type inputs : list , dict
: param query : the list of query phases
: type query : list
: param timeout : the query timeout
: type timeout : integer , None
: rtype : mixed""" | _validate_timeout ( timeout )
return transport . mapred ( inputs , query , timeout ) |
def get_upregulated_genes ( self ) -> VertexSeq :
"""Get genes that are up - regulated .
: return : Up - regulated genes .""" | up_regulated = self . graph . vs . select ( self . _is_upregulated_gene )
logger . info ( f"No. of up-regulated genes after laying on network: {len(up_regulated)}" )
return up_regulated |
def __request ( self , method , endpoint , data , params = None , ** kwargs ) :
"""Do requests""" | if params is None :
params = { }
url = self . __get_url ( endpoint )
auth = None
headers = { "user-agent" : "WooCommerce API Client-Python/%s" % __version__ , "accept" : "application/json" }
if self . is_ssl is True and self . query_string_auth is False :
auth = ( self . consumer_key , self . consumer_secret )
elif self . is_ssl is True and self . query_string_auth is True :
params . update ( { "consumer_key" : self . consumer_key , "consumer_secret" : self . consumer_secret } )
else :
encoded_params = urlencode ( params )
url = "%s?%s" % ( url , encoded_params )
url = self . __get_oauth_url ( url , method , ** kwargs )
if data is not None :
data = jsonencode ( data , ensure_ascii = False ) . encode ( 'utf-8' )
headers [ "content-type" ] = "application/json;charset=utf-8"
return request ( method = method , url = url , verify = self . verify_ssl , auth = auth , params = params , data = data , timeout = self . timeout , headers = headers , ** kwargs ) |
def Database_executeSQL ( self , databaseId , query ) :
"""Function path : Database . executeSQL
Domain : Database
Method name : executeSQL
Parameters :
Required arguments :
' databaseId ' ( type : DatabaseId ) - > No description
' query ' ( type : string ) - > No description
Returns :
' columnNames ' ( type : array ) - > No description
' values ' ( type : array ) - > No description
' sqlError ' ( type : Error ) - > No description""" | assert isinstance ( query , ( str , ) ) , "Argument 'query' must be of type '['str']'. Received type: '%s'" % type ( query )
subdom_funcs = self . synchronous_command ( 'Database.executeSQL' , databaseId = databaseId , query = query )
return subdom_funcs |
def select_elements ( self , json_string , expr ) :
"""Return list of elements from _ json _ string _ , matching [ http : / / jsonselect . org / | JSONSelect ] expression .
* DEPRECATED * JSON Select query language is outdated and not supported any more .
Use other keywords of this library to query JSON .
* Args : * \n
_ json _ string _ - JSON string ; \n
_ expr _ - JSONSelect expression ;
* Returns : * \n
List of found elements or ` ` None ` ` if no elements were found
* Example : * \n
| * Settings * | * Value * |
| Library | JsonValidator |
| Library | OperatingSystem |
| * Test Cases * | * Action * | * Argument * | * Argument * |
| Select json elements | $ { json _ example } = | OperatingSystem . Get File | $ { CURDIR } $ { / } json _ example . json |
| | $ { json _ elements } = | Select elements | $ { json _ example } | . author : contains ( " Evelyn Waugh " ) ~ . price |
| 12.99""" | load_input_json = self . string_to_json ( json_string )
# parsing jsonselect
match = jsonselect . match ( sel = expr , obj = load_input_json )
ret = list ( match )
return ret if ret else None |
def stop ( self ) :
"""all links also need stop ( ) to stop their runloops""" | self . keep_listening = False
# if threaded , kill threads before going down
if hasattr ( self , 'join' ) :
self . join ( )
self . log ( "Went down." )
return True |
def __start ( self ) :
"""Start a new thread to process Cron""" | thread = Thread ( target = self . __loop , args = ( ) )
thread . daemon = True
# daemonize thread
thread . start ( )
self . __enabled = True |
def get_ocv_old ( self , cycle_number = None , ocv_type = 'ocv' , dataset_number = None ) :
"""Find ocv data in DataSet ( voltage vs time ) .
Args :
cycle _ number ( int ) : find for all cycles if None .
ocv _ type ( " ocv " , " ocvrlx _ up " , " ocvrlx _ down " ) :
ocv - get up and down ( default )
ocvrlx _ up - get up
ocvrlx _ down - get down
dataset _ number ( int ) : test number ( default first )
( usually not used ) .
Returns :
if cycle _ number is not None
ocv or [ ocv _ up , ocv _ down ]
ocv ( and ocv _ up and ocv _ down ) are list
containg [ time , voltage ] ( that are Series )
if cycle _ number is None
[ ocv1 , ocv2 , . . . ocvN , . . . ] N = cycle
ocvN = pandas DataFrame containing the columns
cycle inded , step time , step index , data point , datetime ,
voltage
( TODO : check if copy or reference of dfdata is returned )""" | # function for getting ocv curves
dataset_number = self . _validate_dataset_number ( dataset_number )
if dataset_number is None :
self . _report_empty_dataset ( )
return
if ocv_type in [ 'ocvrlx_up' , 'ocvrlx_down' ] :
ocv = self . _get_ocv ( dataset_number = None , ocv_type = ocv_type , select_last = True , select_columns = True , cycle_number = cycle_number , )
return ocv
else :
ocv_up = self . _get_ocv ( dataset_number = None , ocv_type = 'ocvrlx_up' , select_last = True , select_columns = True , cycle_number = cycle_number , )
ocv_down = self . _get_ocv ( dataset_number = None , ocv_type = 'ocvrlx_down' , select_last = True , select_columns = True , cycle_number = cycle_number , )
return ocv_up , ocv_down |
def profile ( message = None , verbose = False ) :
"""Decorator for profiling a function .
TODO : Support ` @ profile ` syntax ( without parens ) . This would involve
inspecting the args . In this case ` profile ` would receive a single
argument , which is the function to be decorated .""" | import functools
from harrison . registered_timer import RegisteredTimer
# Adjust the call stack index for RegisteredTimer so the call is Timer use
# is properly attributed .
class DecoratorTimer ( RegisteredTimer ) :
_CALLER_STACK_INDEX = 2
def wrapper ( fn ) :
desc = message or fn . __name__
@ functools . wraps ( fn )
def wrapped_fn ( * args , ** kwargs ) :
with DecoratorTimer ( desc = desc , verbose = verbose ) :
return fn ( * args , ** kwargs )
return wrapped_fn
return wrapper |
def describe_unique_1d ( series ) :
"""Compute summary statistics of a unique ( ` S _ TYPE _ UNIQUE ` ) variable ( a Series ) .
Parameters
series : Series
The variable to describe .
Returns
Series
The description of the variable as a Series with index being stats keys .""" | return pd . Series ( [ base . S_TYPE_UNIQUE ] , index = [ 'type' ] , name = series . name ) |
def emit ( self , prob_min = 0.0 , prob_max = 1.0 ) :
"""m . emit ( , prob _ min = 0.0 , prob _ max = 1.0 ) - - Consider motif as a generative model , and have it emit a sequence""" | if not self . cumP :
for logcol in self . logP :
tups = [ ]
for L in ACGT :
p = math . pow ( 2 , logcol [ L ] )
tups . append ( ( p , L ) )
tups . sort ( )
cumu = [ ]
tot = 0
for p , L in tups :
tot = tot + p
cumu . append ( ( tot , L ) )
self . cumP . append ( cumu )
s = [ ]
# u = random ( ) + 0.01 # Can make higher for more consistent motifs
u = ( prob_max - prob_min ) * random ( ) + prob_min
for cumu in self . cumP : # u = random ( ) + 0.01 # Can make higher for more consistent motifs
last = 0
for p , L in cumu :
if last < u and u <= p :
letter = L
break
else :
last = p
# print L , ' % 8.4f ' % u , cumu
s . append ( L )
# print ' ' . join ( s )
return '' . join ( s ) |
def cli_form ( self , * args ) :
"""Display a schemata ' s form definition""" | if args [ 0 ] == '*' :
for schema in schemastore :
self . log ( schema , ':' , schemastore [ schema ] [ 'form' ] , pretty = True )
else :
self . log ( schemastore [ args [ 0 ] ] [ 'form' ] , pretty = True ) |
def delete_if_not_in_zsets ( self , key , member , set_list , client = None ) :
"""Removes ` ` key ` ` only if ` ` member ` ` is not member of any sets in the
` ` set _ list ` ` . Returns the number of removed elements ( 0 or 1 ) .""" | return self . _delete_if_not_in_zsets ( keys = [ key ] + set_list , args = [ member ] , client = client ) |
def getDefaultApplicationForMimeType ( self , pchMimeType , pchAppKeyBuffer , unAppKeyBufferLen ) :
"""return the app key that will open this mime type""" | fn = self . function_table . getDefaultApplicationForMimeType
result = fn ( pchMimeType , pchAppKeyBuffer , unAppKeyBufferLen )
return result |
def all_groupings ( partition ) :
"""Return all possible groupings of states for a particular coarse graining
( partition ) of a network .
Args :
partition ( tuple [ tuple ] ) : A partition of micro - elements into macro
elements .
Yields :
tuple [ tuple [ tuple ] ] : A grouping of micro - states into macro states of
system .
TODO : document exactly how to interpret the grouping .""" | if not all ( partition ) :
raise ValueError ( 'Each part of the partition must have at least one ' 'element.' )
micro_groupings = [ _partitions_list ( len ( part ) + 1 ) if len ( part ) > 1 else [ [ [ 0 ] , [ 1 ] ] ] for part in partition ]
for grouping in itertools . product ( * micro_groupings ) :
if all ( len ( element ) < 3 for element in grouping ) :
yield tuple ( tuple ( tuple ( tuple ( state ) for state in states ) for states in grouping ) ) |
def list_upgrades ( refresh = True , backtrack = 3 , ** kwargs ) : # pylint : disable = W0613
'''List all available package upgrades .
refresh
Whether or not to sync the portage tree before checking for upgrades .
backtrack
Specifies an integer number of times to backtrack if dependency
calculation fails due to a conflict or an unsatisfied dependency
( default : ́3 ́ ) .
. . versionadded : 2015.8.0
CLI Example :
. . code - block : : bash
salt ' * ' pkg . list _ upgrades''' | if salt . utils . data . is_true ( refresh ) :
refresh_db ( )
return _get_upgradable ( backtrack ) |
def get_publickey ( keydata ) :
"""Load the public key from a PEM encoded string .""" | try :
key = serialization . load_pem_public_key ( keydata , backend = default_backend ( ) , )
return key
except ValueError :
key = serialization . load_pem_private_key ( keydata , password = None , backend = default_backend ( ) , )
key = key . public_key ( )
return key |
def log_in ( self ) :
"""Perform the ` log _ in ` task to setup the API session for future data requests .""" | if not self . password : # Password wasn ' t give , ask for it now
self . password = getpass . getpass ( 'Password: ' )
utils . pending_message ( 'Performing login...' )
login_result = self . client . login ( account = self . account , password = self . password )
if 'error' in login_result :
self . handle_failed_login ( login_result )
utils . info_message ( 'Login successful' ) |
def get_AV_infinity ( ra , dec , frame = 'icrs' ) :
"""Gets the A _ V exctinction at infinity for a given line of sight .
Queries the NED database .
: param ra , dec :
Desired coordinates , in degrees .
: param frame : ( optional )
Frame of input coordinates ( e . g . , ` ` ' icrs ' , ' galactic ' ` ` )""" | coords = SkyCoord ( ra , dec , unit = 'deg' , frame = frame ) . transform_to ( 'icrs' )
rah , ram , ras = coords . ra . hms
decd , decm , decs = coords . dec . dms
if decd > 0 :
decsign = '%2B'
else :
decsign = '%2D'
url = 'http://ned.ipac.caltech.edu/cgi-bin/nph-calc?in_csys=Equatorial&in_equinox=J2000.0&obs_epoch=2010&lon=' + '%i' % rah + '%3A' + '%i' % ram + '%3A' + '%05.2f' % ras + '&lat=%s' % decsign + '%i' % abs ( decd ) + '%3A' + '%i' % abs ( decm ) + '%3A' + '%05.2f' % abs ( decs ) + '&pa=0.0&out_csys=Equatorial&out_equinox=J2000.0'
AV = None
for line in urllib . request . urlopen ( url ) . readlines ( ) :
m = re . search ( b'^Landolt V \(0.54\)\s+(\d+\.\d+)' , line )
if m :
AV = ( float ( m . group ( 1 ) ) )
break
if AV is None :
raise RuntimeError ( 'AV query fails! URL is {}' . format ( url ) )
return AV |
def reshape_bar_plot ( df , x , y , bars ) :
"""Reshape data from long form to " bar plot form " .
Bar plot form has x value as the index with one column for bar grouping .
Table values come from y values .""" | idx = [ bars , x ]
if df . duplicated ( idx ) . any ( ) :
warnings . warn ( 'Duplicated index found.' )
df = df . drop_duplicates ( idx , keep = 'last' )
df = df . set_index ( idx ) [ y ] . unstack ( x ) . T
return df |
def get_new_command ( command ) :
"""Attempt to rebuild the path string by spellchecking the directories .
If it fails ( i . e . no directories are a close enough match ) , then it
defaults to the rules of cd _ mkdir .
Change sensitivity by changing MAX _ ALLOWED _ DIFF . Default value is 0.6""" | dest = command . script_parts [ 1 ] . split ( os . sep )
if dest [ - 1 ] == '' :
dest = dest [ : - 1 ]
if dest [ 0 ] == '' :
cwd = os . sep
dest = dest [ 1 : ]
elif six . PY2 :
cwd = os . getcwdu ( )
else :
cwd = os . getcwd ( )
for directory in dest :
if directory == "." :
continue
elif directory == ".." :
cwd = os . path . split ( cwd ) [ 0 ]
continue
best_matches = get_close_matches ( directory , _get_sub_dirs ( cwd ) , cutoff = MAX_ALLOWED_DIFF )
if best_matches :
cwd = os . path . join ( cwd , best_matches [ 0 ] )
else :
return cd_mkdir . get_new_command ( command )
return u'cd "{0}"' . format ( cwd ) |
def find_matching ( self ) -> Dict [ TLeft , TRight ] :
"""Finds a matching in the bipartite graph .
This is done using the Hopcroft - Karp algorithm with an implementation from the
` hopcroftkarp ` package .
Returns :
A dictionary where each edge of the matching is represented by a key - value pair
with the key being from the left part of the graph and the value from te right part .""" | # The directed graph is represented as a dictionary of edges
# The key is the tail of all edges which are represented by the value
# The value is a set of heads for the all edges originating from the tail ( key )
# In addition , the graph stores which part of the bipartite graph a node originated from
# to avoid problems when a value exists in both halfs .
# Only one direction of the undirected edge is needed for the HopcroftKarp class
directed_graph = { }
# type : Dict [ Tuple [ int , TLeft ] , Set [ Tuple [ int , TRight ] ] ]
for ( left , right ) in self . _edges :
tail = ( LEFT , left )
head = ( RIGHT , right )
if tail not in directed_graph :
directed_graph [ tail ] = { head }
else :
directed_graph [ tail ] . add ( head )
matching = HopcroftKarp ( directed_graph ) . maximum_matching ( )
# Filter out the partitions ( LEFT and RIGHT ) and only return the matching edges
# that go from LEFT to RIGHT
return dict ( ( tail [ 1 ] , head [ 1 ] ) for tail , head in matching . items ( ) if tail [ 0 ] == LEFT ) |
def init_file ( self , filename , lines , expected , line_offset ) :
"""Signal a new file .""" | self . filename = filename
self . lines = lines
self . expected = expected or ( )
self . line_offset = line_offset
self . file_errors = 0
self . counters [ 'files' ] += 1
self . counters [ 'physical lines' ] += len ( lines ) |
def get_archive_format ( filename ) :
"""Detect filename archive format and optional compression .""" | mime , compression = util . guess_mime ( filename )
if not ( mime or compression ) :
raise util . PatoolError ( "unknown archive format for file `%s'" % filename )
if mime in ArchiveMimetypes :
format = ArchiveMimetypes [ mime ]
else :
raise util . PatoolError ( "unknown archive format for file `%s' (mime-type is `%s')" % ( filename , mime ) )
if format == compression : # file cannot be in same format compressed
compression = None
return format , compression |
def simplify ( all_points , tilewidth , tileheight ) :
"""Given a list of points , return list of rects that represent them
kludge :
" A kludge ( or kluge ) is a workaround , a quick - and - dirty solution ,
a clumsy or inelegant , yet effective , solution to a problem , typically
using parts that are cobbled together . "
- - wikipedia
turn a list of points into a rects
adjacent rects will be combined .
plain english :
the input list must be a list of tuples that represent
the areas to be combined into rects
the rects will be blended together over solid groups
so if data is something like :
0 1 1 1 0 0 0
0 1 1 0 0 0 0
0 0 0 0 0 4 0
0 0 0 0 0 4 0
0 0 0 0 0 0 0
0 0 1 1 1 1 1
you ' ll have the 4 rects that mask the area like this :
pretty cool , right ?
there may be cases where the number of rectangles is not as low as possible ,
but I haven ' t found that it is excessively bad . certainly much better than
making a list of rects , one for each tile on the map !""" | def pick_rect ( points , rects ) :
ox , oy = sorted ( [ ( sum ( p ) , p ) for p in points ] ) [ 0 ] [ 1 ]
x = ox
y = oy
ex = None
while 1 :
x += 1
if not ( x , y ) in points :
if ex is None :
ex = x - 1
if ( ox , y + 1 ) in points :
if x == ex + 1 :
y += 1
x = ox
else :
y -= 1
break
else :
if x <= ex :
y -= 1
break
c_rect = pygame . Rect ( ox * tilewidth , oy * tileheight , ( ex - ox + 1 ) * tilewidth , ( y - oy + 1 ) * tileheight )
rects . append ( c_rect )
rect = pygame . Rect ( ox , oy , ex - ox + 1 , y - oy + 1 )
kill = [ p for p in points if rect . collidepoint ( p ) ]
[ points . remove ( i ) for i in kill ]
if points :
pick_rect ( points , rects )
rect_list = [ ]
while all_points :
pick_rect ( all_points , rect_list )
return rect_list |
def _update_keywords ( self , ** update_props ) :
"""Update operation for ISO type - specific Keywords metadata : Theme or Place""" | tree_to_update = update_props [ 'tree_to_update' ]
prop = update_props [ 'prop' ]
values = update_props [ 'values' ]
keywords = [ ]
if prop in KEYWORD_PROPS :
xpath_root = self . _data_map [ '_keywords_root' ]
xpath_map = self . _data_structures [ prop ]
xtype = xpath_map [ 'keyword_type' ]
xroot = xpath_map [ 'keyword_root' ]
xpath = xpath_map [ 'keyword' ]
ktype = KEYWORD_TYPES [ prop ]
# Remove descriptiveKeyword nodes according to type
for element in get_elements ( tree_to_update , xpath_root ) :
if get_element_text ( element , xtype ) . lower ( ) == ktype . lower ( ) :
remove_element ( tree_to_update , xpath_root )
element = insert_element ( tree_to_update , 0 , xpath_root )
insert_element ( element , 0 , xtype , ktype )
# Add the type node
keywords . extend ( update_property ( element , xroot , xpath , prop , values ) )
return keywords |
def mergecsv ( args ) :
"""% prog mergecsv * . tsv
Merge a set of tsv files .""" | p = OptionParser ( mergecsv . __doc__ )
p . set_outfile ( )
opts , args = p . parse_args ( args )
if len ( args ) < 2 :
sys . exit ( not p . print_help ( ) )
tsvfiles = args
outfile = opts . outfile
if op . exists ( outfile ) :
os . remove ( outfile )
tsvfile = tsvfiles [ 0 ]
fw = must_open ( opts . outfile , "w" )
for i , tsvfile in enumerate ( tsvfiles ) :
fp = open ( tsvfile )
if i > 0 :
next ( fp )
for row in fp :
fw . write ( row )
fw . close ( ) |
def flatten_ ( structure ) :
"""Combine all leaves of a nested structure into a tuple .
The nested structure can consist of any combination of tuples , lists , and
dicts . Dictionary keys will be discarded but values will ordered by the
sorting of the keys .
Args :
structure : Nested structure .
Returns :
Flat tuple .""" | if isinstance ( structure , dict ) :
if structure :
structure = zip ( * sorted ( structure . items ( ) , key = lambda x : x [ 0 ] ) ) [ 1 ]
else : # Zip doesn ' t work on an the items of an empty dictionary .
structure = ( )
if isinstance ( structure , ( tuple , list ) ) :
result = [ ]
for element in structure :
result += flatten_ ( element )
return tuple ( result )
return ( structure , ) |
def _op ( self , line , op = None , offset = 0 ) :
"""Returns the gate name for placing a gate on a line .
: param int line : Line number .
: param int op : Operation number or , by default , uses the current op count .
: return : Gate name .
: rtype : string""" | if op is None :
op = self . op_count [ line ]
return "line{}_gate{}" . format ( line , op + offset ) |
def get_padding_lengths ( self ) -> Dict [ str , Dict [ str , int ] ] :
"""Gets the maximum padding lengths from all ` ` Instances ` ` in this batch . Each ` ` Instance ` `
has multiple ` ` Fields ` ` , and each ` ` Field ` ` could have multiple things that need padding .
We look at all fields in all instances , and find the max values for each ( field _ name ,
padding _ key ) pair , returning them in a dictionary .
This can then be used to convert this batch into arrays of consistent length , or to set
model parameters , etc .""" | padding_lengths : Dict [ str , Dict [ str , int ] ] = defaultdict ( dict )
all_instance_lengths : List [ Dict [ str , Dict [ str , int ] ] ] = [ instance . get_padding_lengths ( ) for instance in self . instances ]
if not all_instance_lengths :
return { ** padding_lengths }
all_field_lengths : Dict [ str , List [ Dict [ str , int ] ] ] = defaultdict ( list )
for instance_lengths in all_instance_lengths :
for field_name , instance_field_lengths in instance_lengths . items ( ) :
all_field_lengths [ field_name ] . append ( instance_field_lengths )
for field_name , field_lengths in all_field_lengths . items ( ) :
for padding_key in field_lengths [ 0 ] . keys ( ) :
max_value = max ( x [ padding_key ] if padding_key in x else 0 for x in field_lengths )
padding_lengths [ field_name ] [ padding_key ] = max_value
return { ** padding_lengths } |
def _getAbsoluteTime ( self , start , delay ) :
"""Adds the delay in seconds to the start time .
: param start :
: param delay :
: return : a datetimem for the specified point in time .""" | return start + datetime . timedelta ( days = 0 , seconds = delay ) |
def enabled ( self ) :
"""bool : ` ` True ` ` if BGP is enabled ; ` ` False ` ` if BGP is disabled .""" | namespace = 'urn:ietf:params:xml:ns:netconf:base:1.0'
bgp_filter = 'rbridge-id/router/bgp'
bgp_config = ET . Element ( 'get-config' , xmlns = "%s" % namespace )
source = ET . SubElement ( bgp_config , 'source' )
ET . SubElement ( source , 'running' )
ET . SubElement ( bgp_config , 'filter' , type = "xpath" , select = "%s" % bgp_filter )
bgp_config = self . _callback ( bgp_config , handler = 'get' )
namespace = 'urn:brocade.com:mgmt:brocade-bgp'
enabled = bgp_config . find ( './/*{%s}bgp' % namespace )
if enabled is not None :
return True
return False |
def of_file ( self , abspath , nbytes = 0 ) :
"""Use default hash method to return hash value of a piece of a file
Estimate processing time on :
: param abspath : the absolute path to the file
: param nbytes : only has first N bytes of the file . if 0 , hash all file
CPU = i7-4600U 2.10GHz - 2.70GHz , RAM = 8.00 GB
1 second can process 0.25GB data
- 0.59G - 2.43 sec
- 1.3G - 5.68 sec
- 1.9G - 7.72 sec
- 2.5G - 10.32 sec
- 3.9G - 16.0 sec
ATTENTION :
if you change the meta data ( for example , the title , years
information in audio , video ) of a multi - media file , then the hash
value gonna also change .""" | if not os . path . exists ( abspath ) :
raise FileNotFoundError ( "[Errno 2] No such file or directory: '%s'" % abspath )
m = self . default_hash_method ( )
with open ( abspath , "rb" ) as f :
if nbytes :
data = f . read ( nbytes )
if data :
m . update ( data )
else :
while True :
data = f . read ( self . _chunk_size )
if not data :
break
m . update ( data )
if self . return_int :
return int ( m . hexdigest ( ) , 16 )
else :
return m . hexdigest ( ) |
def addItemTag ( self , item , tag ) :
"""Add a tag to an individal item .
tag string must be in form " user / - / label / [ tag ] " """ | if self . inItemTagTransaction : # XXX : what if item ' s parent is not a feed ?
if not tag in self . addTagBacklog :
self . addTagBacklog [ tag ] = [ ]
self . addTagBacklog [ tag ] . append ( { 'i' : item . id , 's' : item . parent . id } )
return "OK"
else :
return self . _modifyItemTag ( item . id , 'a' , tag ) |
def new_value ( self , key , value ) :
"""Create new value in data""" | data = self . model . get_data ( )
data [ key ] = value
self . set_data ( data ) |
def recipe ( recipe ) :
"""Apply the given recipe to a node
Sets the run _ list to the given recipe
If no nodes / hostname . json file exists , it creates one""" | env . host_string = lib . get_env_host_string ( )
lib . print_header ( "Applying recipe '{0}' on node {1}" . format ( recipe , env . host_string ) )
# Create configuration and sync node
data = lib . get_node ( env . host_string )
data [ "run_list" ] = [ "recipe[{0}]" . format ( recipe ) ]
if not __testing__ :
if env . autodeploy_chef and not chef . chef_test ( ) :
deploy_chef ( ask = "no" )
chef . sync_node ( data ) |
def feature_assert ( * feas ) :
"""Takes some feature patterns ( like in ` feature _ needs ` ) .
Raises a fuse . FuseError if your underlying FUSE lib fails
to have some of the matching features .
( Note : use a ` ` has _ foo ` ` type feature assertion only if lib support
for method ` ` foo ` ` is * necessary * for your fs . Don ' t use this assertion
just because your fs implements ` ` foo ` ` . The usefulness of ` ` has _ foo ` `
is limited by the fact that we can ' t guarantee that your FUSE kernel
module also supports ` ` foo ` ` . )""" | fav = APIVersion ( )
for fea in feas :
fn = feature_needs ( fea )
if fav < fn :
raise FuseError ( "FUSE API version %d is required for feature `%s' but only %d is available" % ( fn , str ( fea ) , fav ) ) |
def fourier_map_2d ( uSin , angles , res , nm , lD = 0 , semi_coverage = False , coords = None , count = None , max_count = None , verbose = 0 ) :
r"""2D Fourier mapping with the Fourier diffraction theorem
Two - dimensional diffraction tomography reconstruction
algorithm for scattering of a plane wave
: math : ` u _ 0 ( \ mathbf { r } ) = u _ 0 ( x , z ) `
by a dielectric object with refractive index
: math : ` n ( x , z ) ` .
This function implements the solution by interpolation in
Fourier space .
Parameters
uSin : ( A , N ) ndarray
Two - dimensional sinogram of line recordings
: math : ` u _ { \ mathrm { B } , \ phi _ j } ( x _ \ mathrm { D } ) `
divided by the incident plane wave : math : ` u _ 0 ( l _ \ mathrm { D } ) `
measured at the detector .
angles : ( A , ) ndarray
Angular positions : math : ` \ phi _ j ` of ` uSin ` in radians .
res : float
Vacuum wavelength of the light : math : ` \ lambda ` in pixels .
nm : float
Refractive index of the surrounding medium : math : ` n _ \ mathrm { m } ` .
lD : float
Distance from center of rotation to detector plane
: math : ` l _ \ mathrm { D } ` in pixels .
semi _ coverage : bool
If set to ` True ` , it is assumed that the sinogram does not
necessarily cover the full angular range from 0 to 2π , but an
equidistant coverage over 2π can be achieved by inferring point
( anti ) symmetry of the ( imaginary ) real parts of the Fourier
transform of f . Valid for any set of angles { X } that result in
a 2π coverage with the union set { X } U { X + π } .
coords : None [ ( 2 , M ) ndarray ]
Computes only the output image at these coordinates . This
keyword is reserved for future versions and is not
implemented yet .
count , max _ count : multiprocessing . Value or ` None `
Can be used to monitor the progress of the algorithm .
Initially , the value of ` max _ count . value ` is incremented
by the total number of steps . At each step , the value
of ` count . value ` is incremented .
verbose : int
Increment to increase verbosity .
Returns
f : ndarray of shape ( N , N ) , complex if ` onlyreal ` is ` False `
Reconstructed object function : math : ` f ( \ mathbf { r } ) ` as defined
by the Helmholtz equation .
: math : ` f ( x , z ) =
k _ m ^ 2 \ left ( \ left ( \ frac { n ( x , z ) } { n _ m } \ right ) ^ 2 - 1 \ right ) `
See Also
backpropagate _ 2d : implementation by backpropagation
odt _ to _ ri : conversion of the object function : math : ` f ( \ mathbf { r } ) `
to refractive index : math : ` n ( \ mathbf { r } ) `
Notes
Do not use the parameter ` lD ` in combination with the Rytov
approximation - the propagation is not correctly described .
Instead , numerically refocus the sinogram prior to converting
it to Rytov data ( using e . g . : func : ` odtbrain . sinogram _ as _ rytov ` )
with a numerical focusing algorithm ( available in the Python
package : py : mod : ` nrefocus ` ) .
The interpolation in Fourier space ( which is done with
: func : ` scipy . interpolate . griddata ` ) may be unstable and lead to
artifacts if the data to interpolate contains sharp spikes . This
issue is not handled at all by this method ( in fact , a test has
been removed in version 0.2.6 because ` ` griddata ` ` gave different
results on Windows and Linux ) .""" | # TODO :
# - zero - padding as for backpropagate _ 2D - However this is not
# necessary as Fourier interpolation is not parallelizable with
# multiprocessing and thus unattractive . Could be interesting for
# specific environments without the Python GIL .
# - Deal with oversampled data . Maybe issue a warning .
A = angles . shape [ 0 ]
if max_count is not None :
max_count . value += 4
# Check input data
assert len ( uSin . shape ) == 2 , "Input data `uSin` must have shape (A,N)!"
assert len ( uSin ) == A , "`len(angles)` must be equal to `len(uSin)`!"
if coords is not None :
raise NotImplementedError ( "Output coordinates cannot yet be set" + "for the 2D backrpopagation algorithm." )
# Cut - Off frequency
# km [ 1 / px ]
km = ( 2 * np . pi * nm ) / res
# Fourier transform of all uB ' s
# In the script we used the unitary angular frequency ( uaf ) Fourier
# Transform . The discrete Fourier transform is equivalent to the
# unitary ordinary frequency ( uof ) Fourier transform .
# uof : f1 ( ξ ) = int f ( x ) exp ( - 2πi xξ )
# uaf : f3 ( ω ) = ( 2π ) ^ ( - n / 2 ) int f ( x ) exp ( - i ωx )
# f1 ( ω / ( 2π ) ) = ( 2π ) ^ ( n / 2 ) f3 ( ω )
# ω = 2πξ
# Our Backpropagation Formula is with uaf convention of the Form
# F ( k ) = 1 / sqrt ( 2π ) U ( kD )
# If we convert now to uof convention , we get
# F ( k ) = U ( kD )
# This means that if we divide the Fourier transform of the input
# data by sqrt ( 2π ) to convert f3 ( ω ) to f1 ( ω / ( 2π ) ) , the resulting
# value for F is off by a factor of 2π .
# Instead , we can just multiply * UB * by sqrt ( 2π ) and calculate
# everything in uof .
# UB = np . fft . fft ( np . fft . ifftshift ( uSin , axes = - 1 ) ) / np . sqrt ( 2 * np . pi )
# Furthermore , we define
# a wave propagating to the right as :
# u0 ( x ) = exp ( ikx )
# However , in physics usually we use the other sign convention :
# u0 ( x ) = exp ( - ikx )
# In order to be consistent with programs like Meep or our
# scattering script for a dielectric cylinder , we want to use the
# latter sign convention .
# This is not a big problem . We only need to multiply the imaginary
# part of the scattered wave by - 1.
UB = np . fft . fft ( np . fft . ifftshift ( uSin , axes = - 1 ) ) * np . sqrt ( 2 * np . pi )
# Corresponding sample frequencies
fx = np . fft . fftfreq ( len ( uSin [ 0 ] ) )
# 1D array
# kx is a 1D array .
kx = 2 * np . pi * fx
if count is not None :
count . value += 1
# Undersampling / oversampling ?
# Determine if the resolution of the image is too low by looking
# at the maximum value for kx . This is no comparison between
# Nyquist and Rayleigh frequency .
if verbose and np . max ( kx ** 2 ) <= km ** 2 : # Detector is not set up properly . Higher resolution
# can be achieved .
print ( "......Measurement data is undersampled." )
else :
print ( "......Measurement data is oversampled." )
# raise NotImplementedError ( " Oversampled data not yet supported . " +
# " Please rescale xD - axis of the input data . " )
# DEAL WITH OVERSAMPLED DATA ?
# lenk = len ( kx )
# kx = np . fft . ifftshift ( np . linspace ( - np . sqrt ( km ) ,
# np . sqrt ( km ) ,
# len ( fx ) , endpoint = False ) )
# F ( kD - kms0 ) = - i km sqrt ( 2 / π ) / a0 * M exp ( - i km M lD ) * UB ( kD )
# kmM = sqrt ( km2 - kx2 )
# s0 = ( - sin ( φ0 ) , cos ( φ0 ) )
# We create the 2D interpolation object F
# - We compute the real coordinates ( krx , kry ) = kD - kms0
# - We set as grid points the right side of the equation
# The interpolated griddata may go up to sqrt ( 2 ) * km for kx and ky .
kx = kx . reshape ( 1 , - 1 )
# a0 should have same shape as kx and UB
# a0 = np . atleast _ 1d ( a0)
# a0 = a0 . reshape ( 1 , - 1)
filter_klp = ( kx ** 2 < km ** 2 )
M = 1. / km * np . sqrt ( km ** 2 - kx ** 2 )
# Fsin = - 1j * km * np . sqrt ( 2 / np . pi ) / a0 * M * np . exp ( - 1j * km * M * lD )
# new in version 0.1.4:
# We multiply by the factor ( M - 1 ) instead of just ( M )
# to take into account that we have a scattered
# wave that is normalized by u0.
Fsin = - 1j * km * np . sqrt ( 2 / np . pi ) * M * np . exp ( - 1j * km * ( M - 1 ) * lD )
# UB has same shape ( len ( angles ) , len ( kx ) )
Fsin = Fsin * UB * filter_klp
ang = angles . reshape ( - 1 , 1 )
if semi_coverage :
Fsin = np . vstack ( ( Fsin , np . conj ( Fsin ) ) )
ang = np . vstack ( ( ang , ang + np . pi ) )
if count is not None :
count . value += 1
# Compute kxl and kyl ( in rotated system φ0)
kxl = kx
kyl = np . sqrt ( ( km ** 2 - kx ** 2 ) * filter_klp ) - km
# rotate kxl and kyl to where they belong
krx = np . cos ( ang ) * kxl + np . sin ( ang ) * kyl
kry = - np . sin ( ang ) * kxl + np . cos ( ang ) * kyl
Xf = krx . flatten ( )
Yf = kry . flatten ( )
Zf = Fsin . flatten ( )
# DEBUG : plot kry vs krx
# from matplotlib import pylab as plt
# plt . figure ( )
# for i in range ( len ( krx ) ) :
# plt . plot ( krx [ i ] , kry [ i ] , " x " )
# plt . axes ( ) . set _ aspect ( ' equal ' )
# plt . show ( )
# interpolation on grid with same resolution as input data
kintp = np . fft . fftshift ( kx . reshape ( - 1 ) )
Fcomp = intp . griddata ( ( Xf , Yf ) , Zf , ( kintp [ None , : ] , kintp [ : , None ] ) )
if count is not None :
count . value += 1
# removed nans
Fcomp [ np . where ( np . isnan ( Fcomp ) ) ] = 0
# Filter data
kinx , kiny = np . meshgrid ( np . fft . fftshift ( kx ) , np . fft . fftshift ( kx ) )
Fcomp [ np . where ( ( kinx ** 2 + kiny ** 2 ) > np . sqrt ( 2 ) * km ) ] = 0
# Fcomp [ np . where ( kinx * * 2 + kiny * * 2 < km ) ] = 0
# Fcomp is centered at K = 0 due to the way we chose kintp / coords
f = np . fft . fftshift ( np . fft . ifft2 ( np . fft . ifftshift ( Fcomp ) ) )
if count is not None :
count . value += 1
return f [ : : - 1 ] |
def delete ( name , timeout = 90 ) :
'''Delete the named service
Args :
name ( str ) : The name of the service to delete
timeout ( int ) :
The time in seconds to wait for the service to be deleted before
returning . This is necessary because a service must be stopped
before it can be deleted . Default is 90 seconds
. . versionadded : : 2017.7.9,2018.3.4
Returns :
bool : ` ` True ` ` if successful , otherwise ` ` False ` ` . Also returns ` ` True ` `
if the service is not present
CLI Example :
. . code - block : : bash
salt ' * ' service . delete < service name >''' | handle_scm = win32service . OpenSCManager ( None , None , win32service . SC_MANAGER_CONNECT )
try :
handle_svc = win32service . OpenService ( handle_scm , name , win32service . SERVICE_ALL_ACCESS )
except pywintypes . error as exc :
win32service . CloseServiceHandle ( handle_scm )
if exc . winerror != 1060 :
raise CommandExecutionError ( 'Failed to open {0}. {1}' . format ( name , exc . strerror ) )
log . debug ( 'Service "%s" is not present' , name )
return True
try :
win32service . DeleteService ( handle_svc )
except pywintypes . error as exc :
raise CommandExecutionError ( 'Failed to delete {0}. {1}' . format ( name , exc . strerror ) )
finally :
log . debug ( 'Cleaning up' )
win32service . CloseServiceHandle ( handle_scm )
win32service . CloseServiceHandle ( handle_svc )
end_time = time . time ( ) + int ( timeout )
while name in get_all ( ) and time . time ( ) < end_time :
time . sleep ( 1 )
return name not in get_all ( ) |
def _compute_sparse_average_correct ( input_ , labels , per_example_weights , topk = 1 ) :
"""Returns the numerator and denominator of classifier accuracy .""" | labels = tf . to_int64 ( labels )
labels . get_shape ( ) . assert_is_compatible_with ( [ input_ . get_shape ( ) [ 0 ] , None ] )
if topk == 1 :
predictions = tf . reshape ( tf . argmax ( input_ , 1 ) , [ - 1 , 1 ] )
in_topk = tf . reduce_any ( tf . equal ( labels , predictions ) , reduction_indices = [ 1 ] )
else : # Use broadcasting to check if ANY of the predictions are in the top k .
# TODO ( eiderman ) : For a multi - label top k , what does accuracy mean ?
predictions = tf . reshape ( tf . nn . top_k ( input_ , topk ) [ 1 ] , [ - 1 , 1 , topk ] )
labels = tf . expand_dims ( labels , [ - 1 ] )
in_topk = tf . reduce_any ( tf . equal ( tf . cast ( labels , predictions . dtype ) , predictions ) , reduction_indices = [ 1 , 2 ] )
correct_predictions = tf . to_float ( in_topk )
# If individual examples are weighted , then we want to normalize by that .
if per_example_weights is not None :
per_example_weights = _convert_and_assert_per_example_weights_compatible ( input_ , per_example_weights , dtype = None )
float_weights = tf . to_float ( per_example_weights )
# TODO ( eiderman ) : This should use an op that doesn ' t support broadcasting .
correct_predictions *= float_weights
num_examples = tf . reduce_sum ( float_weights )
else : # shape only holds ints , but we want to always return the same type
# for num _ examples to make everything compatible .
num_examples = tf . to_float ( tf . gather ( tf . shape ( input_ ) , 0 ) )
return tf . reduce_sum ( correct_predictions ) , num_examples |
def roundClosestValid ( val , res , decimals = None ) :
"""round to closest resolution""" | if decimals is None and "." in str ( res ) :
decimals = len ( str ( res ) . split ( '.' ) [ 1 ] )
return round ( round ( val / res ) * res , decimals ) |
def find_eigen ( hint = None ) :
r'''Try to find the Eigen library . If successful the include directory is returned .''' | # search with pkgconfig
try :
import pkgconfig
if pkgconfig . installed ( 'eigen3' , '>3.0.0' ) :
return pkgconfig . parse ( 'eigen3' ) [ 'include_dirs' ] [ 0 ]
except :
pass
# manual search
search_dirs = [ ] if hint is None else hint
search_dirs += [ "/usr/local/include/eigen3" , "/usr/local/homebrew/include/eigen3" , "/opt/local/var/macports/software/eigen3" , "/opt/local/include/eigen3" , "/usr/include/eigen3" , "/usr/include/local" , "/usr/include" , ]
for d in search_dirs :
path = os . path . join ( d , "Eigen" , "Dense" )
if os . path . exists ( path ) :
vf = os . path . join ( d , "Eigen" , "src" , "Core" , "util" , "Macros.h" )
if not os . path . exists ( vf ) :
continue
src = open ( vf , "r" ) . read ( )
v1 = re . findall ( "#define EIGEN_WORLD_VERSION (.+)" , src )
v2 = re . findall ( "#define EIGEN_MAJOR_VERSION (.+)" , src )
v3 = re . findall ( "#define EIGEN_MINOR_VERSION (.+)" , src )
if not len ( v1 ) or not len ( v2 ) or not len ( v3 ) :
continue
v = "{0}.{1}.{2}" . format ( v1 [ 0 ] , v2 [ 0 ] , v3 [ 0 ] )
print ( "Found Eigen version {0} in: {1}" . format ( v , d ) )
return d
return None |
def transform_e1e2 ( x , y , e1 , e2 , center_x = 0 , center_y = 0 ) :
"""maps the coordinates x , y with eccentricities e1 e2 into a new elliptical coordiante system
: param x :
: param y :
: param e1:
: param e2:
: param center _ x :
: param center _ y :
: return :""" | x_shift = x - center_x
y_shift = y - center_y
x_ = ( 1 - e1 ) * x_shift - e2 * y_shift
y_ = - e2 * x_shift + ( 1 + e1 ) * y_shift
det = np . sqrt ( ( 1 - e1 ) * ( 1 + e1 ) + e2 ** 2 )
return x_ / det , y_ / det |
def filter_out ( queryset , setting_name ) :
"""Remove unwanted results from queryset""" | kwargs = helpers . get_settings ( ) . get ( setting_name , { } ) . get ( 'FILTER_OUT' , { } )
queryset = queryset . exclude ( ** kwargs )
return queryset |
def _is_at_ref_start ( self , nucmer_hit ) :
'''Returns True iff the hit is " close enough " to the start of the reference sequence''' | hit_coords = nucmer_hit . ref_coords ( )
return hit_coords . start < self . ref_end_tolerance |
def detectAndroidTablet ( self ) :
"""Return detection of an Android tablet
Detects if the current device is a ( self - reported ) Android tablet .
Google says these devices will have ' Android ' and NOT ' mobile ' in their user agent .""" | # First , let ' s make sure we ' re on an Android device .
if not self . detectAndroid ( ) :
return False
# Special check for Android devices with Opera Mobile / Mini . They should NOT report here .
if self . detectOperaMobile ( ) :
return False
# Otherwise , if it ' s Android and does NOT have ' mobile ' in it , Google says it ' s a tablet .
return UAgentInfo . mobile not in self . __userAgent |
def _to_unicode ( instr ) :
'''Converts from current users character encoding to unicode .
When instr has a value of None , the return value of the function
will also be None .''' | if instr is None or isinstance ( instr , six . text_type ) :
return instr
else :
return six . text_type ( instr , 'utf8' ) |
def If ( self , condition , * then , ** kwargs ) :
"""* * If * *
If ( Predicate , * Then )
Having conditionals expressions a necesity in every language , Phi includes the ` If ` expression for such a purpose .
* * Arguments * *
* * * Predicate * * : a predicate expression uses to determine if the ` Then ` or ` Else ` branches should be used .
* * * * Then * * : an expression to be excecuted if the ` Predicate ` yields ` True ` , since this parameter is variadic you can stack expression and they will be interpreted as a tuple ` phi . dsl . Seq ` .
This class also includes the ` Elif ` and ` Else ` methods which let you write branched conditionals in sequence , however the following rules apply
* If no branch is entered the whole expression behaves like the identity
* ` Elif ` can only be used after an ` If ` or another ` Elif ` expression
* Many ` Elif ` expressions can be stacked sequentially
* ` Else ` can only be used after an ` If ` or ` Elif ` expression
* * Examples * *
from phi import P , If
assert " Between 2 and 10 " = = P . Pipe (
If ( P > 10,
" Greater than 10"
) . Elif ( P < 2,
" Less than 2"
) . Else (
" Between 2 and 10" """ | cond_f = _parse ( condition ) . _f
then_f = E . Seq ( * then ) . _f
else_f = utils . state_identity
ast = ( cond_f , then_f , else_f )
g = _compile_if ( ast )
expr = self . __then__ ( g , ** kwargs )
expr . _ast = ast
expr . _root = self
return expr |
def to_time_field ( formatter ) :
"""Returns a callable instance that will convert a string to a Time .
: param formatter : String that represents data format for parsing .
: return : instance of the TimeConverter .""" | class TimeConverter ( object ) :
def __init__ ( self , formatter ) :
self . formatter = formatter
def __call__ ( self , value ) :
if isinstance ( value , string_types ) :
value = datetime . strptime ( value , self . formatter ) . time ( )
return value
return TimeConverter ( formatter ) |
def empty_directory ( self ) :
"""Remove all contents of a directory
Including any sub - directories and their contents""" | for child in self . walkfiles ( ) :
child . remove ( )
for child in reversed ( [ d for d in self . walkdirs ( ) ] ) :
if child == self or not child . isdir ( ) :
continue
child . rmdir ( ) |
def ReadBlobs ( self , blob_ids ) :
"""Reads given blobs .""" | result = { }
for blob_id in blob_ids :
result [ blob_id ] = self . blobs . get ( blob_id , None )
return result |
def run_thermal_displacements ( self , t_min = 0 , t_max = 1000 , t_step = 10 , temperatures = None , direction = None , freq_min = None , freq_max = None ) :
"""Prepare thermal displacements calculation
Parameters
t _ min , t _ max , t _ step : float , optional
Minimum and maximum temperatures and the interval in this
temperature range . Default valuues are 0 , 1000 , and 10.
temperatures : array _ like , optional
Temperature points where thermal properties are calculated .
When this is set , t _ min , t _ max , and t _ step are ignored .
direction : array _ like , optional
Projection direction in reduced coordinates . Default is None ,
i . e . , no projection .
dtype = float , shape = ( 3 , )
freq _ min , freq _ max : float , optional
Phonon frequencies larger than freq _ min and smaller than
freq _ max are included . Default is None , i . e . , all phonons .""" | if self . _dynamical_matrix is None :
msg = ( "Dynamical matrix has not yet built." )
raise RuntimeError ( msg )
if self . _mesh is None :
msg = ( "run_mesh has to be done." )
raise RuntimeError ( msg )
mesh_nums = self . _mesh . mesh_numbers
ir_grid_points = self . _mesh . ir_grid_points
if not self . _mesh . with_eigenvectors :
msg = ( "run_mesh has to be done with with_eigenvectors=True." )
raise RuntimeError ( msg )
if np . prod ( mesh_nums ) != len ( ir_grid_points ) :
msg = ( "run_mesh has to be done with is_mesh_symmetry=False." )
raise RuntimeError ( msg )
if direction is not None :
projection_direction = np . dot ( direction , self . _primitive . get_cell ( ) )
td = ThermalDisplacements ( self . _mesh , projection_direction = projection_direction , freq_min = freq_min , freq_max = freq_max )
else :
td = ThermalDisplacements ( self . _mesh , freq_min = freq_min , freq_max = freq_max )
if temperatures is None :
td . set_temperature_range ( t_min , t_max , t_step )
else :
td . set_temperatures ( temperatures )
td . run ( )
self . _thermal_displacements = td |
def get_job_config ( conf ) :
"""Extract handler names from job _ conf . xml""" | rval = [ ]
root = elementtree . parse ( conf ) . getroot ( )
for handler in root . find ( 'handlers' ) :
rval . append ( { 'service_name' : handler . attrib [ 'id' ] } )
return rval |
def obj_to_dict ( cls , obj ) :
"""Takes a model object and converts it into a dictionary suitable for
passing to the constructor ' s data attribute .""" | data = { }
for field_name in cls . get_fields ( ) :
try :
value = getattr ( obj , field_name )
except AttributeError : # If the field doesn ' t exist on the object , fail gracefully
# and don ' t include the field in the data dict at all . Fail
# loudly if the field exists but produces a different error
# ( edge case : accessing an * existing * field could technically
# produce an unrelated AttributeError ) .
continue
if callable ( value ) :
value = value ( )
data [ field_name ] = value
return data |
def finalize_options ( self ) -> None :
"Get options from config files ." | self . arguments = { }
# type : Dict [ str , Any ]
computed_settings = from_path ( os . getcwd ( ) )
for key , value in computed_settings . items ( ) :
self . arguments [ key ] = value |
def setShapeClass ( self , typeID , clazz ) :
"""setShapeClass ( string , string ) - > None
Sets the shape class of vehicles of this type .""" | self . _connection . _sendStringCmd ( tc . CMD_SET_VEHICLETYPE_VARIABLE , tc . VAR_SHAPECLASS , typeID , clazz ) |
def _init_edges ( self , dst_srcs_list ) :
"""Create all GO edges given a list of ( dst , srcs ) .""" | from goatools . gosubdag . go_paths import get_paths_goobjs , paths2edges
edges_all = set ( )
goid_all = set ( )
go2obj = self . go2obj
for dst , srcs in dst_srcs_list :
go2obj_srcs = { }
for goid in srcs :
go2obj_srcs [ goid ] = go2obj [ goid ]
go_paths , go_all = get_paths_goobjs ( go2obj_srcs . values ( ) , go_top = dst , go2obj = go2obj )
edges_all |= paths2edges ( go_paths )
goid_all |= go_all
self . edges = [ ( a . id , b . id ) for a , b in edges_all ]
self . goid_all = goid_all |
def fit ( self , X , y , ** args ) :
"""The fit method is the primary entry point for the manual alpha
selection visualizer . It sets the alpha param for each alpha in the
alphas list on the wrapped estimator , then scores the model using the
passed in X and y data set . Those scores are then aggregated and
drawn using matplotlib .""" | self . errors = [ ]
for alpha in self . alphas :
self . estimator . set_params ( alpha = alpha )
scores = self . score_method ( self . estimator , X , y )
self . errors . append ( scores . mean ( ) )
# Convert errors to an ND array and draw
self . errors = np . array ( self . errors )
self . draw ( )
# Always make sure to return self from fit
return self |
def _string_to_dictsql ( self , part ) :
"""Do magic matching of single words or quoted string""" | self . _logger . debug ( "parsing string: " + unicode ( part [ 0 ] ) + " of type: " + part . getName ( ) )
if part . getName ( ) == 'tag' :
self . _logger . debug ( "Query part '" + part [ 0 ] + "' interpreted as tag" )
dictsql = { 'interpretation' : { 'string' : part [ 0 ] , 'interpretation' : 'tag' , 'attribute' : 'tag' , 'operator' : 'equals_any' , 'error' : False } , 'operator' : 'equals_any' , 'val1' : 'tags' , 'val2' : part [ 0 ] [ 1 : ] }
elif part . getName ( ) == 'vrf_rt' :
self . _logger . debug ( "Query part '" + part . vrf_rt + "' interpreted as VRF RT" )
# TODO : enable this , our fancy new interpretation
dictsql = { 'interpretation' : { 'attribute' : 'VRF RT' , 'interpretation' : 'vrf_rt' , 'operator' : 'equals' , 'string' : part . vrf_rt , 'error' : False } , 'operator' : 'equals' , 'val1' : 'vrf_rt' , 'val2' : part . vrf_rt }
# using old interpretation for the time being to make sure we align
# with old smart search interpreter
dictsql = { 'interpretation' : { 'attribute' : 'name or description' , 'interpretation' : 'text' , 'operator' : 'regex' , 'string' : part . vrf_rt , 'error' : False } , 'operator' : 'or' , 'val1' : { 'operator' : 'regex_match' , 'val1' : 'name' , 'val2' : part . vrf_rt } , 'val2' : { 'operator' : 'regex_match' , 'val1' : 'description' , 'val2' : part . vrf_rt } }
else :
self . _logger . debug ( "Query part '" + part [ 0 ] + "' interpreted as text" )
dictsql = { 'interpretation' : { 'attribute' : 'name or description' , 'interpretation' : 'text' , 'operator' : 'regex' , 'string' : part [ 0 ] , 'error' : False } , 'operator' : 'or' , 'val1' : { 'operator' : 'regex_match' , 'val1' : 'name' , 'val2' : part [ 0 ] } , 'val2' : { 'operator' : 'regex_match' , 'val1' : 'description' , 'val2' : part [ 0 ] } }
return dictsql |
def _granularities ( self ) :
"""Returns a generator of all possible granularities based on the
MIN _ GRANULARITY and MAX _ GRANULARITY settings .""" | keep = False
for g in GRANULARITIES :
if g == app_settings . MIN_GRANULARITY and not keep :
keep = True
elif g == app_settings . MAX_GRANULARITY and keep :
keep = False
yield g
if keep :
yield g |
def lookup_hlr_create ( self , phonenumber , params = None ) :
"""Perform a new HLR lookup .""" | if params is None :
params = { }
return HLR ( ) . load ( self . request ( 'lookup/' + str ( phonenumber ) + '/hlr' , 'POST' , params ) ) |
def _get_xdata_for_function ( self , n , xdata ) :
"""Generates the x - data for plotting the function .
Parameters
Which data set we ' re using
xdata
Data set upon which to base this
Returns
float""" | # Use the xdata itself for the function
if self [ 'fpoints' ] [ n ] in [ None , 0 ] :
return _n . array ( xdata )
# Otherwise , generate xdata with the number of fpoints
# do exponential ranging if xscale is log
if self [ 'xscale' ] [ n ] == 'log' :
return _n . logspace ( _n . log10 ( min ( xdata ) ) , _n . log10 ( max ( xdata ) ) , self [ 'fpoints' ] [ n ] , True , 10.0 )
# otherwise do linear spacing
else :
return _n . linspace ( min ( xdata ) , max ( xdata ) , self [ 'fpoints' ] [ n ] ) |
def increase_posts_count ( sender , instance , ** kwargs ) :
"""Increases the member ' s post count after a post save .
This receiver handles the update of the profile related to the user who is the poster of the
forum post being created or updated .""" | if instance . poster is None : # An anonymous post is considered . No profile can be updated in
# that case .
return
profile , dummy = ForumProfile . objects . get_or_create ( user = instance . poster )
increase_posts_count = False
if instance . pk :
try :
old_instance = instance . __class__ . _default_manager . get ( pk = instance . pk )
except ObjectDoesNotExist : # pragma : no cover
# This should never happen ( except with django loaddata command )
increase_posts_count = True
old_instance = None
if old_instance and old_instance . approved is False and instance . approved is True :
increase_posts_count = True
elif instance . approved :
increase_posts_count = True
if increase_posts_count :
profile . posts_count = F ( 'posts_count' ) + 1
profile . save ( ) |
async def connect ( self , conn_id , connection_string ) :
"""Connect to a device .
See : meth : ` AbstractDeviceAdapter . connect ` .""" | if connection_string . startswith ( 'device/' ) :
adapter_id , local_conn = self . _find_best_adapter ( connection_string , conn_id )
translate_conn = True
elif connection_string . startswith ( 'adapter/' ) :
adapter_str , _ , local_conn = connection_string [ 8 : ] . partition ( '/' )
adapter_id = int ( adapter_str )
translate_conn = False
else :
raise DeviceAdapterError ( conn_id , 'connect' , 'invalid connection string format' )
if self . adapters [ adapter_id ] . can_connect ( ) is False :
raise DeviceAdapterError ( conn_id , 'connect' , 'chosen adapter cannot handle another connection' )
# Make sure to set up the connection information before
# so there are no races with events coming soon after connect .
self . _setup_connection ( conn_id , local_conn )
self . _track_property ( conn_id , 'adapter' , adapter_id )
self . _track_property ( conn_id , 'translate' , translate_conn )
try :
await self . adapters [ adapter_id ] . connect ( conn_id , local_conn )
except :
self . _teardown_connection ( conn_id )
raise |
def slices_from_global_coords ( self , slices ) :
"""Used for converting from mip 0 coordinates to upper mip level
coordinates . This is mainly useful for debugging since the neuroglancer
client displays the mip 0 coordinates for your cursor .""" | bbox = self . bbox_to_mip ( slices , 0 , self . mip )
return bbox . to_slices ( ) |
def set_objective_sense ( self , sense ) :
"""Set type of problem ( maximize or minimize ) .""" | if sense not in ( ObjectiveSense . Minimize , ObjectiveSense . Maximize ) :
raise ValueError ( 'Invalid objective sense' )
self . _p . ModelSense = self . OBJ_SENSE_MAP [ sense ] |
def slice ( self , start , until ) :
"""Takes a slice of the sequence starting at start and until but not including until .
> > > seq ( [ 1 , 2 , 3 , 4 ] ) . slice ( 1 , 2)
> > > seq ( [ 1 , 2 , 3 , 4 ] ) . slice ( 1 , 3)
[2 , 3]
: param start : starting index
: param until : ending index
: return : slice including start until but not including until""" | return self . _transform ( transformations . slice_t ( start , until ) ) |
def add_tier ( self , coro , ** kwargs ) :
"""Add a coroutine to the cell as a task tier . The source can be a
single value or a list of either ` Tier ` types or coroutine functions
already added to a ` Tier ` via ` add _ tier ` .""" | self . assertNotFinalized ( )
assert asyncio . iscoroutinefunction ( coro )
tier = self . Tier ( self , coro , ** kwargs )
self . tiers . append ( tier )
self . tiers_coro_map [ coro ] = tier
return tier |
def show_vcs_output_vcs_guid ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
show_vcs = ET . Element ( "show_vcs" )
config = show_vcs
output = ET . SubElement ( show_vcs , "output" )
vcs_guid = ET . SubElement ( output , "vcs-guid" )
vcs_guid . text = kwargs . pop ( 'vcs_guid' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def print_summary ( self , decimals = 2 , ** kwargs ) :
"""Print summary statistics describing the fit , the coefficients , and the error bounds .
Parameters
decimals : int , optional ( default = 2)
specify the number of decimal places to show
kwargs :
print additional meta data in the output ( useful to provide model names , dataset names , etc . ) when comparing
multiple outputs .""" | # Print information about data first
justify = string_justify ( 18 )
print ( self )
print ( "{} = '{}'" . format ( justify ( "duration col" ) , self . duration_col ) )
print ( "{} = '{}'" . format ( justify ( "event col" ) , self . event_col ) )
if self . weights_col :
print ( "{} = '{}'" . format ( justify ( "weights col" ) , self . weights_col ) )
if self . coef_penalizer > 0 :
print ( "{} = '{}'" . format ( justify ( "coef penalizer" ) , self . coef_penalizer ) )
if self . smoothing_penalizer > 0 :
print ( "{} = '{}'" . format ( justify ( "smoothing penalizer" ) , self . smoothing_penalizer ) )
print ( "{} = {}" . format ( justify ( "number of subjects" ) , self . _n_examples ) )
print ( "{} = {}" . format ( justify ( "number of events" ) , self . event_observed . sum ( ) ) )
print ( "{} = {}" . format ( justify ( "time fit was run" ) , self . _time_fit_was_called ) )
for k , v in kwargs . items ( ) :
print ( "{} = {}\n" . format ( justify ( k ) , v ) )
print ( end = "\n" )
print ( "---" )
df = self . summary
print ( df . to_string ( float_format = format_floats ( decimals ) , formatters = { "p" : format_p_value ( decimals ) , "exp(coef)" : format_exp_floats ( decimals ) } , ) )
# Significance code explanation
print ( "---" )
print ( "Concordance = {:.{prec}f}" . format ( self . score_ , prec = decimals ) ) |
def add ( self , num ) :
"""Adds num to the current value""" | try :
val = self . value ( ) + num
except :
val = num
self . set ( min ( self . imax , max ( self . imin , val ) ) ) |
def list_actions ( i ) :
"""Input : {
( repo _ uoa ) - repo UOA
( module _ uoa ) - module _ uoa , if = = " " , use kernel
( data _ uoa )
Output : {
return - return code = 0 , if successful
> 0 , if error
( error ) - error text if return > 0
actions - list of actions""" | o = i . get ( 'out' , '' )
ruoa = i . get ( 'repo_uoa' , '' )
muoa = i . get ( 'module_uoa' , '' )
duoa = i . get ( 'data_uoa' , '' )
if muoa != '' :
if duoa != '' :
muoa = duoa
duoa = ''
# Find path to module ' module ' to get dummies
ii = { 'action' : 'load' , 'module_uoa' : cfg [ 'module_name' ] , 'data_uoa' : muoa , 'common_func' : 'yes' }
if ruoa != '' :
ii [ 'repo_uoa' ] = ruoa
r = access ( ii )
if r [ 'return' ] > 0 :
return r
dd = r [ 'dict' ]
actions = dd . get ( 'actions' , { } )
else :
actions = cfg [ 'actions' ]
# If console , print actions
if o == 'con' :
for q in sorted ( actions . keys ( ) ) :
s = q
desc = actions [ q ] . get ( 'desc' , '' )
if desc != '' :
s += ' - ' + desc
out ( s )
return { 'return' : 0 , 'actions' : actions } |
def load_categories ( self , max_pages = 30 ) :
"""Load all WordPress categories from the given site .
: param max _ pages : kill counter to avoid infinite looping
: return : None""" | logger . info ( "loading categories" )
# clear them all out so we don ' t get dupes if requested
if self . purge_first :
Category . objects . filter ( site_id = self . site_id ) . delete ( )
path = "sites/{}/categories" . format ( self . site_id )
params = { "number" : 100 }
page = 1
response = self . get ( path , params )
if not response . ok :
logger . warning ( "Response NOT OK! status_code=%s\n%s" , response . status_code , response . text )
while response . ok and response . text and page < max_pages :
logger . info ( " - page: %d" , page )
api_categories = response . json ( ) . get ( "categories" )
if not api_categories : # we ' re done here
break
categories = [ ]
for api_category in api_categories : # if it exists locally , update local version if anything has changed
existing_category = Category . objects . filter ( site_id = self . site_id , wp_id = api_category [ "ID" ] ) . first ( )
if existing_category :
self . update_existing_category ( existing_category , api_category )
else :
categories . append ( self . get_new_category ( api_category ) )
if categories :
Category . objects . bulk_create ( categories )
elif not self . full : # we ' re done here
break
# get next page
page += 1
params [ "page" ] = page
response = self . get ( path , params )
if not response . ok :
logger . warning ( "Response NOT OK! status_code=%s\n%s" , response . status_code , response . text )
return |
def newline ( self , * args , ** kwargs ) :
"""Prints an empty line to the log . Uses the level of the last message
printed unless specified otherwise with the level = kwarg .""" | levelOverride = kwargs . get ( 'level' ) or self . _lastlevel
self . _log ( levelOverride , '' , 'newline' , args , kwargs ) |
def _get_or_create_service_key ( self ) :
"""Get a service key or create one if needed .""" | keys = self . service . _get_service_keys ( self . name )
for key in keys [ 'resources' ] :
if key [ 'entity' ] [ 'name' ] == self . service_name :
return self . service . get_service_key ( self . name , self . service_name )
self . service . create_service_key ( self . name , self . service_name )
return self . service . get_service_key ( self . name , self . service_name ) |
def split_channel ( self ) :
"""A channel can be splitted to new channel or other existing channel .
It creates subscribers list as selectable to moved .""" | if self . current . task_data . get ( 'msg' , False ) :
self . show_warning_messages ( )
self . current . task_data [ 'split_operation' ] = True
channel = Channel . objects . get ( self . current . task_data [ 'chosen_channels' ] [ 0 ] )
_form = SubscriberListForm ( title = _ ( u'Choose Subscribers to Migrate' ) )
for subscriber in Subscriber . objects . filter ( channel = channel ) :
subscriber_name = subscriber . user . username
_form . SubscriberList ( choice = False , name = subscriber_name , key = subscriber . key )
_form . new_channel = fields . Button ( _ ( u"Move to a New Channel" ) , cmd = "create_new_channel" )
_form . existing_channel = fields . Button ( _ ( u"Move to an Existing Channel" ) , cmd = "choose_existing_channel" )
self . form_out ( _form ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.