signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def preformatted_text ( source : str ) -> str : """Renders preformatted text box"""
environ . abort_thread ( ) if not source : return '' source = render_utils . html_escape ( source ) return '<pre class="preformatted-textbox">{text}</pre>' . format ( text = str ( textwrap . dedent ( source ) ) )
def return_real_id_base ( dbpath , set_object ) : """Generic function which returns a list of real _ id ' s Parameters dbpath : string , path to SQLite database file set _ object : object ( either TestSet or TrainSet ) which is stored in the database Returns return _ list : list of real _ id values for the dataset ( a real _ id is the filename minus the suffix and prefix )"""
engine = create_engine ( 'sqlite:////' + dbpath ) session_cl = sessionmaker ( bind = engine ) session = session_cl ( ) return_list = [ ] for i in session . query ( set_object ) . order_by ( set_object . id ) : return_list . append ( i . real_id ) session . close ( ) return return_list
def argmax ( self , axis = None , skipna = True , * args , ** kwargs ) : """Returns the indices of the maximum values along an axis . See ` numpy . ndarray . argmax ` for more information on the ` axis ` parameter . See Also numpy . ndarray . argmax"""
nv . validate_argmax ( args , kwargs ) nv . validate_minmax_axis ( axis ) i8 = self . asi8 if self . hasnans : mask = self . _isnan if mask . all ( ) or not skipna : return - 1 i8 = i8 . copy ( ) i8 [ mask ] = 0 return i8 . argmax ( )
def create_api_integration_response ( restApiId , resourcePath , httpMethod , statusCode , selectionPattern , responseParameters = None , responseTemplates = None , region = None , key = None , keyid = None , profile = None ) : '''Creates an integration response for a given method in a given API CLI Example : . . code - block : : bash salt myminion boto _ apigateway . create _ api _ integration _ response restApiId resourcePath httpMethod \ statusCode selectionPattern [ ' { } ' [ ' { } ' ] ]'''
try : resource = describe_api_resource ( restApiId , resourcePath , region = region , key = key , keyid = keyid , profile = profile ) . get ( 'resource' ) if resource : responseParameters = dict ( ) if responseParameters is None else responseParameters responseTemplates = dict ( ) if responseTemplates is None else responseTemplates conn = _get_conn ( region = region , key = key , keyid = keyid , profile = profile ) response = conn . put_integration_response ( restApiId = restApiId , resourceId = resource [ 'id' ] , httpMethod = httpMethod , statusCode = statusCode , selectionPattern = selectionPattern , responseParameters = responseParameters , responseTemplates = responseTemplates ) return { 'created' : True , 'response' : response } return { 'created' : False , 'error' : 'no such resource' } except ClientError as e : return { 'created' : False , 'error' : __utils__ [ 'boto3.get_error' ] ( e ) }
def static ( self , uri , file_or_directory , * args , ** kwargs ) : """Create a blueprint static route from a decorated function . : param uri : endpoint at which the route will be accessible . : param file _ or _ directory : Static asset ."""
static = FutureStatic ( uri , file_or_directory , args , kwargs ) self . statics . append ( static )
def learn ( self , fit = 0 , size = 0 , configure = None ) : """Learns all ( nearly ) optimal logical networks with give fitness and size tolerance . The first optimum logical network found is saved in the attribute : attr : ` optimum ` while all enumerated logical networks are saved in the attribute : attr : ` networks ` . Example : : > > > from caspo import core , learn > > > graph = core . Graph . read _ sif ( ' pkn . sif ' ) > > > dataset = core . Dataset ( ' dataset . csv ' , 30) > > > zipped = graph . compress ( dataset . setup ) > > > learner = learn . Learner ( zipped , dataset , 2 , ' round ' , 100) > > > learner . learn ( 0.02 , 1) > > > learner . networks . to _ csv ( ' networks . csv ' ) Parameters fit : float Fitness tolerance , e . g . , use 0.1 for 10 % tolerance with respect to the optimum size : int Size tolerance with respect to the optimum configure : callable Callable object responsible of setting a custom clingo configuration"""
encodings = [ 'guess' , 'fixpoint' , 'rss' ] if self . optimum is None : clingo = self . __get_clingo__ ( encodings + [ 'opt' ] ) if configure is not None : configure ( clingo . conf ) clingo . ground ( [ ( "base" , [ ] ) ] ) clingo . solve ( on_model = self . __keep_last__ ) self . stats [ 'time_optimum' ] = clingo . stats [ 'time_total' ] self . _logger . info ( "Optimum logical network learned in %.4fs" , self . stats [ 'time_optimum' ] ) tuples = ( f . args ( ) for f in self . _last ) self . optimum = core . LogicalNetwork . from_hypertuples ( self . hypergraph , tuples ) predictions = self . optimum . predictions ( self . dataset . clampings , self . dataset . readouts . columns ) . values readouts = self . dataset . readouts . values pos = ~ np . isnan ( readouts ) rss = np . sum ( ( np . vectorize ( self . discrete ) ( readouts [ pos ] ) - predictions [ pos ] * self . factor ) ** 2 ) self . stats [ 'optimum_mse' ] = mean_squared_error ( readouts [ pos ] , predictions [ pos ] ) self . stats [ 'optimum_size' ] = self . optimum . size self . _logger . info ( "Optimum logical networks has MSE %.4f and size %s" , self . stats [ 'optimum_mse' ] , self . stats [ 'optimum_size' ] ) self . networks . reset ( ) args = [ '-c maxrss=%s' % int ( rss + rss * fit ) , '-c maxsize=%s' % ( self . optimum . size + size ) ] clingo = self . __get_clingo__ ( encodings + [ 'enum' ] , args ) clingo . conf . solve . models = '0' if configure is not None : configure ( clingo . conf ) clingo . ground ( [ ( "base" , [ ] ) ] ) clingo . solve ( on_model = self . __save__ ) self . stats [ 'time_enumeration' ] = clingo . stats [ 'time_total' ] self . _logger . info ( "%s (nearly) optimal logical networks learned in %.4fs" , len ( self . networks ) , self . stats [ 'time_enumeration' ] )
def get_machine ( self , key ) : """Returns the number of the machine which key gets sent to ."""
h = self . hash ( key ) # edge case where we cycle past hash value of 1 and back to 0. if h > self . hash_tuples [ - 1 ] [ 2 ] : return self . hash_tuples [ 0 ] [ 0 ] hash_values = map ( lambda x : x [ 2 ] , self . hash_tuples ) index = bisect . bisect_left ( hash_values , h ) return self . hash_tuples [ index ] [ 0 ]
def set_initial_representations ( self ) : """Set the initial representations"""
self . update_model_dict ( ) self . rc ( "background solid white" ) self . rc ( "setattr g display 0" ) # Hide all pseudobonds self . rc ( "~display #%i & :/isHet & ~:%s" % ( self . model_dict [ self . plipname ] , self . hetid ) )
def heading ( headingtext , headinglevel , lang = 'en' ) : '''Make a new heading , return the heading element'''
lmap = { 'en' : 'Heading' , 'it' : 'Titolo' } # Make our elements paragraph = makeelement ( 'p' ) pr = makeelement ( 'pPr' ) pStyle = makeelement ( 'pStyle' , attributes = { 'val' : lmap [ lang ] + str ( headinglevel ) } ) run = makeelement ( 'r' ) text = makeelement ( 't' , tagtext = headingtext ) # Add the text the run , and the run to the paragraph pr . append ( pStyle ) run . append ( text ) paragraph . append ( pr ) paragraph . append ( run ) # Return the combined paragraph return paragraph
def _create_meta_cache ( self ) : """Try to dump metadata to a file ."""
try : with open ( self . _cache_filename , 'wb' ) as f : compat . pickle . dump ( self . _document_meta , f , 1 ) except ( IOError , compat . pickle . PickleError ) : pass
def send_keyboard_input ( text = None , key_list = None ) : """Args : text ( None ) : key _ list ( list ) : References : http : / / stackoverflow . com / questions / 14788036 / python - win32api - sendmesage http : / / www . pinvoke . net / default . aspx / user32 . sendinput CommandLine : python - m utool . util _ cplat - - test - send _ keyboard _ input Example : > > > # DISABLE _ DOCTEST > > > from utool . util _ cplat import * # NOQA > > > text = ' % paste ' > > > result = send _ keyboard _ input ( ' % paste ' ) > > > print ( result )"""
# key _ mapping = { # ' enter ' : if WIN32 : # raise NotImplementedError ( ) # import win32api # import win32gui # import win32con # hwnd = win32gui . GetForegroundWindow ( ) # print ( ' entering text into % r ' % ( win32gui . GetWindowText ( hwnd ) , ) ) # win32con . VK _ RETURN # def callback ( hwnd , hwnds ) : # if win32gui . IsWindowVisible ( hwnd ) and win32gui . IsWindowEnabled ( hwnd ) : # hwnds [ win32gui . GetClassName ( hwnd ) ] = hwnd # return True # hwnds = { } # win32gui . EnumChildWindows ( hwnd , callback , hwnds ) # for ord _ char in map ( ord , text ) : # win32api . SendMessage ( hwnd , win32con . WM _ CHAR , ord _ char , 0) from utool . _internal import win32_send_keys pause = float ( .05 ) text = 'paste' keys = text kw = dict ( with_spaces = False , with_tabs = True , with_newlines = False ) win32_send_keys . SendKeys ( keys , pause = pause , turn_off_numlock = True , ** kw ) # win32 _ send _ keys # import time # keys _ = win32 _ send _ keys . parse _ keys ( keys , * * kw ) # for k in keys _ : # k . Run ( ) # time . sleep ( pause ) else : if key_list is None : char_map = { '%' : 'shift+5' } key_list = [ char_map . get ( char , char ) for char in text ] xdotool_args = [ 'xdotool' , 'key' ] + key_list # , ' shift + 5 ' , ' p ' , ' a ' , ' s ' , ' t ' , ' e ' , ' enter ' ] cmd = ' ' . join ( xdotool_args ) print ( 'Running: cmd=%r' % ( cmd , ) ) print ( '+---' ) print ( cmd ) print ( 'L___' ) os . system ( cmd )
def declare ( self , queue = '' , virtual_host = '/' , passive = False , durable = False , auto_delete = False , arguments = None ) : """Declare a Queue . : param str queue : Queue name : param str virtual _ host : Virtual host name : param bool passive : Do not create : param bool durable : Durable queue : param bool auto _ delete : Automatically delete when not in use : param dict | None arguments : Queue key / value arguments : raises ApiError : Raises if the remote server encountered an error . : raises ApiConnectionError : Raises if there was a connectivity issue . : rtype : dict"""
if passive : return self . get ( queue , virtual_host = virtual_host ) queue_payload = json . dumps ( { 'durable' : durable , 'auto_delete' : auto_delete , 'arguments' : arguments or { } , 'vhost' : virtual_host } ) return self . http_client . put ( API_QUEUE % ( quote ( virtual_host , '' ) , queue ) , payload = queue_payload )
def _get_measures ( self , et ) : """Get a list of measures in < continuous > or < SurveyOptions > section"""
list_of_measures = [ ] for tag in et . findall ( "option" ) : if tag . attrib . get ( "value" , "true" ) == "true" : list_of_measures . append ( tag . attrib [ "name" ] ) return list_of_measures
def handle_url ( url , session , res ) : """Parse one search result page ."""
print ( "Parsing" , url , file = sys . stderr ) try : data = getPageContent ( url , session ) except IOError as msg : print ( "ERROR:" , msg , file = sys . stderr ) return for match in url_matcher . finditer ( data ) : url = match . group ( 1 ) + '/' name = unescape ( match . group ( 2 ) ) name = asciify ( name . replace ( '&' , 'And' ) . replace ( '@' , 'At' ) ) name = capfirst ( name ) if name in exclude_comics : continue if contains_case_insensitive ( res , name ) : # we cannot handle two comics that only differ in case print ( "INFO: skipping possible duplicate" , repr ( name ) , file = sys . stderr ) continue # find out how many images this comic has end = match . end ( ) mo = num_matcher . search ( data [ end : ] ) if not mo : print ( "ERROR:" , repr ( data [ end : end + 300 ] ) , file = sys . stderr ) continue num = int ( mo . group ( 1 ) ) url = url_overrides . get ( name , url ) try : if "/d/" not in url : check_robotstxt ( url + "d/" , session ) else : check_robotstxt ( url , session ) except IOError : print ( "INFO: robots.txt denied for comicgenesis" , repr ( name ) ) continue else : res [ name ] = ( url , num )
def lbest_idx ( state , idx ) : """lbest Neighbourhood topology function . Neighbourhood size is determined by state . params [ ' n _ s ' ] . Args : state : cipy . algorithms . pso . State : The state of the PSO algorithm . idx : int : index of the particle in the swarm . Returns : int : The index of the lbest particle ."""
swarm = state . swarm n_s = state . params [ 'n_s' ] cmp = comparator ( swarm [ 0 ] . best_fitness ) indices = __lbest_indices__ ( len ( swarm ) , n_s , idx ) best = None for i in indices : if best is None or cmp ( swarm [ i ] . best_fitness , swarm [ best ] . best_fitness ) : best = i return best
def parse_250_row ( row : list ) -> BasicMeterData : """Parse basic meter data record ( 250)"""
return BasicMeterData ( row [ 1 ] , row [ 2 ] , row [ 3 ] , row [ 4 ] , row [ 5 ] , row [ 6 ] , row [ 7 ] , float ( row [ 8 ] ) , parse_datetime ( row [ 9 ] ) , row [ 10 ] , row [ 11 ] , row [ 12 ] , float ( row [ 13 ] ) , parse_datetime ( row [ 14 ] ) , row [ 15 ] , row [ 16 ] , row [ 17 ] , float ( row [ 18 ] ) , row [ 19 ] , row [ 20 ] , parse_datetime ( row [ 21 ] ) , parse_datetime ( row [ 22 ] ) )
def inherit_docstrings ( cls ) : """Class decorator for inheriting docstrings . Automatically inherits base class doc - strings if not present in the derived class ."""
@ functools . wraps ( cls ) def _inherit_docstrings ( cls ) : if not isinstance ( cls , ( type , colorise . compat . ClassType ) ) : raise RuntimeError ( "Type is not a class" ) for name , value in colorise . compat . iteritems ( vars ( cls ) ) : if isinstance ( getattr ( cls , name ) , types . MethodType ) : if not getattr ( value , '__doc__' , None ) : for base in cls . __bases__ : basemethod = getattr ( base , name , None ) if basemethod and getattr ( base , '__doc__' , None ) : value . __doc__ = basemethod . __doc__ return cls return _inherit_docstrings ( cls )
def fetch_query_from_pgdb ( self , qname , query , con , cxn , limit = None , force = False ) : """Supply either an already established connection , or connection parameters . The supplied connection will override any separate cxn parameter : param qname : The name of the query to save the output to : param query : The SQL query itself : param con : The already - established connection : param cxn : The postgres connection information : param limit : If you only want a subset of rows from the query : return :"""
if con is None and cxn is None : LOG . error ( "ERROR: you need to supply connection information" ) return if con is None and cxn is not None : con = psycopg2 . connect ( host = cxn [ 'host' ] , database = cxn [ 'database' ] , port = cxn [ 'port' ] , user = cxn [ 'user' ] , password = cxn [ 'password' ] ) outfile = '/' . join ( ( self . rawdir , qname ) ) cur = con . cursor ( ) # wrap the query to get the count countquery = ' ' . join ( ( "SELECT COUNT(*) FROM (" , query , ") x" ) ) if limit is not None : countquery = ' ' . join ( ( countquery , "LIMIT" , str ( limit ) ) ) # check local copy . # assume that if the # rows are the same , that the table is the same # TEC - opinion : # the only thing to assume is that if the counts are different # is the data could not be the same . # i . e : for MGI , the dbinfo table has a single row that changes # to check if they are the same sort & compare digests . ( filerowcount = - 1 tablerowcount = - 1 if not force : if os . path . exists ( outfile ) : # get rows in the file filerowcount = self . file_len ( outfile ) LOG . info ( "INFO: rows in local file: %s" , filerowcount ) # get rows in the table # tablerowcount = cur . rowcount cur . execute ( countquery ) tablerowcount = cur . fetchone ( ) [ 0 ] # rowcount - 1 because there ' s a header if force or filerowcount < 0 or ( filerowcount - 1 ) != tablerowcount : if force : LOG . info ( "Forcing download of %s" , qname ) else : LOG . info ( "%s local (%s) different from remote (%s); fetching." , qname , filerowcount , tablerowcount ) # download the file LOG . debug ( "COMMAND:%s" , query ) outputquery = """ COPY ({0}) TO STDOUT WITH DELIMITER AS '\t' CSV HEADER""" . format ( query ) with open ( outfile , 'w' ) as f : cur . copy_expert ( outputquery , f ) # Regenerate row count to check integrity filerowcount = self . file_len ( outfile ) if ( filerowcount - 1 ) < tablerowcount : raise Exception ( "Download from %s failed, %s != %s" , cxn [ 'host' ] + ':' + cxn [ 'database' ] , ( filerowcount - 1 ) , tablerowcount ) elif ( filerowcount - 1 ) > tablerowcount : LOG . warning ( "Fetched from %s more rows in file (%s) than reported in count(%s)" , cxn [ 'host' ] + ':' + cxn [ 'database' ] , ( filerowcount - 1 ) , tablerowcount ) else : LOG . info ( "local data same as remote; reusing." ) return
def connect_signals ( self , target ) : """This is deprecated . Pass your controller to connect signals the old way ."""
if self . connected : raise RuntimeError ( "GtkBuilder can only connect signals once" ) self . builder . connect_signals ( target ) self . connected = True
def new ( cls , alias , certs , key , key_format = 'pkcs8' ) : """Helper function to create a new PrivateKeyEntry . : param str alias : The alias for the Private Key Entry : param list certs : An list of certificates , as byte strings . The first one should be the one belonging to the private key , the others the chain ( in correct order ) . : param str key : A byte string containing the private key in the format specified in the key _ format parameter ( default pkcs8 ) . : param str key _ format : The format of the provided private key . Valid options are pkcs8 or rsa _ raw . Defaults to pkcs8. : returns : A loaded : class : ` PrivateKeyEntry ` instance , ready to be placed in a keystore . : raises UnsupportedKeyFormatException : If the key format is unsupported ."""
timestamp = int ( time . time ( ) ) * 1000 cert_chain = [ ] for cert in certs : cert_chain . append ( ( 'X.509' , cert ) ) pke = cls ( timestamp = timestamp , # Alias must be lower case or it will corrupt the keystore for Java Keytool and Keytool Explorer alias = alias . lower ( ) , cert_chain = cert_chain ) if key_format == 'pkcs8' : private_key_info = decoder . decode ( key , asn1Spec = rfc5208 . PrivateKeyInfo ( ) ) [ 0 ] pke . _algorithm_oid = private_key_info [ 'privateKeyAlgorithm' ] [ 'algorithm' ] . asTuple ( ) pke . pkey = private_key_info [ 'privateKey' ] . asOctets ( ) pke . pkey_pkcs8 = key elif key_format == 'rsa_raw' : pke . _algorithm_oid = RSA_ENCRYPTION_OID # We must encode it to pkcs8 private_key_info = rfc5208 . PrivateKeyInfo ( ) private_key_info . setComponentByName ( 'version' , 'v1' ) a = AlgorithmIdentifier ( ) a . setComponentByName ( 'algorithm' , pke . _algorithm_oid ) a . setComponentByName ( 'parameters' , '\x05\x00' ) private_key_info . setComponentByName ( 'privateKeyAlgorithm' , a ) private_key_info . setComponentByName ( 'privateKey' , key ) pke . pkey_pkcs8 = encoder . encode ( private_key_info , ifNotEmpty = True ) pke . pkey = key else : raise UnsupportedKeyFormatException ( "Key Format '%s' is not supported" % key_format ) return pke
def _set_complete_option ( cls ) : """Check and set complete option ."""
get_config = cls . context . get_config complete = get_config ( 'complete' , None ) if complete is None : conditions = [ get_config ( 'transitions' , False ) , get_config ( 'named_transitions' , False ) , ] complete = not any ( conditions ) cls . context . new_meta [ 'complete' ] = complete
def inventory ( self , inventory_name ) : """Decorator to register filters for given inventory . For a function " abc " , it has the same effect : param inventory _ name : : return : . . code - block : : python tic = CtsTextInventoryCollection ( ) latin = CtsTextInventoryMetadata ( " urn : perseus : latinLit " , parent = tic ) latin . set _ label ( " Classical Latin " , " eng " ) dispatcher = CollectionDispatcher ( tic ) @ dispatcher . inventory ( " urn : perseus : latinLit " ) def dispatchLatinLit ( collection , path = None , * * kwargs ) : if collection . id . startswith ( " urn : cts : latinLit : " ) : return True return False"""
def decorator ( f ) : self . add ( func = f , inventory_name = inventory_name ) return f return decorator
def updated ( self , user , options ) : """True if the issue was commented by given user"""
for comment in self . comments : created = dateutil . parser . parse ( comment [ "created" ] ) . date ( ) try : if ( comment [ "author" ] [ "emailAddress" ] == user . email and created >= options . since . date and created < options . until . date ) : return True except KeyError : pass return False
def pause ( self , movie = None , show = None , episode = None , progress = 0.0 , ** kwargs ) : """Send the scrobble " pause ' action . Use this method when the video is paused . The playback progress will be saved and : code : ` Trakt [ ' sync / playback ' ] . get ( ) ` can be used to resume the video from this exact position . Un - pause a video by calling the : code : ` Trakt [ ' scrobble ' ] . start ( ) ` method again . : param movie : Movie definition ( or ` None ` ) * * Example : * * . . code - block : : python ' title ' : ' Guardians of the Galaxy ' , ' year ' : 2014, ' ids ' : { ' tmdb ' : 118340 : type movie : : class : ` ~ python : dict ` : param show : Show definition ( or ` None ` ) * * Example : * * . . code - block : : python ' title ' : ' Breaking Bad ' , ' year ' : 2008, ' ids ' : { ' tvdb ' : 81189 : type show : : class : ` ~ python : dict ` : param episode : Episode definition ( or ` None ` ) * * Example : * * . . code - block : : python " season " : 3, " number " : 11 : type episode : : class : ` ~ python : dict ` : param progress : Current movie / episode progress percentage : type progress : : class : ` ~ python : float ` : param kwargs : Extra request options : type kwargs : : class : ` ~ python : dict ` : return : Response ( or ` None ` ) * * Example : * * . . code - block : : python ' action ' : ' pause ' , ' progress ' : 75, ' sharing ' : { ' facebook ' : true , ' twitter ' : true , ' tumblr ' : false ' movie ' : { ' title ' : ' Guardians of the Galaxy ' , ' year ' : 2014, ' ids ' : { ' trakt ' : 28, ' slug ' : ' guardians - of - the - galaxy - 2014 ' , ' imdb ' : ' tt2015381 ' , ' tmdb ' : 118340 : rtype : : class : ` ~ python : dict `"""
return self . action ( 'pause' , movie , show , episode , progress , ** kwargs )
def add_dos ( self , label , dos ) : """Adds a dos for plotting . Args : label : label for the DOS . Must be unique . dos : Dos object"""
energies = dos . energies - dos . efermi if self . zero_at_efermi else dos . energies densities = dos . get_smeared_densities ( self . sigma ) if self . sigma else dos . densities efermi = dos . efermi self . _doses [ label ] = { 'energies' : energies , 'densities' : densities , 'efermi' : efermi }
def do_identity ( args ) : """Executes the config commands subcommands ."""
if args . subcommand == 'policy' and args . policy_cmd == 'create' : _do_identity_policy_create ( args ) elif args . subcommand == 'policy' and args . policy_cmd == 'list' : _do_identity_policy_list ( args ) elif args . subcommand == 'role' and args . role_cmd == 'create' : _do_identity_role_create ( args ) elif args . subcommand == 'role' and args . role_cmd == 'list' : _do_identity_role_list ( args ) else : raise AssertionError ( '"{}" is not a valid subcommand of "identity"' . format ( args . subcommand ) )
def send_wsgi_response ( status , headers , content , start_response , cors_handler = None ) : """Dump reformatted response to CGI start _ response . This calls start _ response and returns the response body . Args : status : A string containing the HTTP status code to send . headers : A list of ( header , value ) tuples , the headers to send in the response . content : A string containing the body content to write . start _ response : A function with semantics defined in PEP - 333. cors _ handler : A handler to process CORS request headers and update the headers in the response . Or this can be None , to bypass CORS checks . Returns : A string containing the response body ."""
if cors_handler : cors_handler . update_headers ( headers ) # Update content length . content_len = len ( content ) if content else 0 headers = [ ( header , value ) for header , value in headers if header . lower ( ) != 'content-length' ] headers . append ( ( 'Content-Length' , '%s' % content_len ) ) start_response ( status , headers ) return content
def convertImagesToPIL ( self , images , dither , nq = 0 ) : """convertImagesToPIL ( images , nq = 0) Convert images to Paletted PIL images , which can then be written to a single animaged GIF ."""
# Convert to PIL images images2 = [ ] for im in images : if isinstance ( im , Image . Image ) : images2 . append ( im ) elif np and isinstance ( im , np . ndarray ) : if im . ndim == 3 and im . shape [ 2 ] == 3 : im = Image . fromarray ( im , 'RGB' ) elif im . ndim == 3 and im . shape [ 2 ] == 4 : im = Image . fromarray ( im [ : , : , : 3 ] , 'RGB' ) elif im . ndim == 2 : im = Image . fromarray ( im , 'L' ) images2 . append ( im ) # Convert to paletted PIL images images , images2 = images2 , [ ] # Adaptive PIL algorithm AD = Image . ADAPTIVE for im in images : im = im . convert ( 'P' , palette = AD , dither = dither ) images2 . append ( im ) # Done return images2
def parse ( cls , backend , ik , spk , spk_signature , otpks ) : """Use this method when creating a bundle from data you retrieved directly from some PEP node . This method applies an additional decoding step to the public keys in the bundle . Pass the same structure as the constructor expects ."""
ik = backend . decodePublicKey ( ik ) [ 0 ] spk [ "key" ] = backend . decodePublicKey ( spk [ "key" ] ) [ 0 ] otpks = list ( map ( lambda otpk : { "key" : backend . decodePublicKey ( otpk [ "key" ] ) [ 0 ] , "id" : otpk [ "id" ] } , otpks ) ) return cls ( ik , spk , spk_signature , otpks )
def discover ( app , module_name = None ) : """Automatically apply the permission logics written in the specified module . Examples Assume if you have a ` ` perms . py ` ` in ` ` your _ app ` ` as : : from permission . logics import AuthorPermissionLogic PERMISSION _ LOGICS = ( ( ' your _ app . your _ model ' , AuthorPermissionLogic ) , Use this method to apply the permission logics enumerated in ` ` PERMISSION _ LOGICS ` ` variable like : > > > discover ( ' your _ app ' )"""
from permission . compat import import_module from permission . compat import get_model from permission . conf import settings from permission . utils . logics import add_permission_logic variable_name = settings . PERMISSION_AUTODISCOVER_VARIABLE_NAME module_name = module_name or settings . PERMISSION_AUTODISCOVER_MODULE_NAME # import the module m = import_module ( '%s.%s' % ( app , module_name ) ) # check if the module have PERMISSION _ LOGICS variable if hasattr ( m , variable_name ) : # apply permission logics automatically permission_logic_set = getattr ( m , variable_name ) for model , permission_logic in permission_logic_set : if isinstance ( model , six . string_types ) : # convert model string to model instance model = get_model ( * model . split ( '.' , 1 ) ) add_permission_logic ( model , permission_logic )
def _subtask_result ( self , idx , value ) : """Receive a result from a single subtask ."""
self . _results [ idx ] = value if len ( self . _results ) == self . _num_tasks : self . set_result ( [ self . _results [ i ] for i in range ( self . _num_tasks ) ] )
def get_psf_sky ( self , ra , dec ) : """Determine the local psf at a given sky location . The psf is returned in degrees . Parameters ra , dec : float The sky position ( degrees ) . Returns a , b , pa : float The psf semi - major axis , semi - minor axis , and position angle in ( degrees ) . If a psf is defined then it is the psf that is returned , otherwise the image restoring beam is returned ."""
# If we don ' t have a psf map then we just fall back to using the beam # from the fits header ( including ZA scaling ) if self . data is None : beam = self . wcshelper . get_beam ( ra , dec ) return beam . a , beam . b , beam . pa x , y = self . sky2pix ( [ ra , dec ] ) # We leave the interpolation in the hands of whoever is making these images # clamping the x , y coords at the image boundaries just makes sense x = int ( np . clip ( x , 0 , self . data . shape [ 1 ] - 1 ) ) y = int ( np . clip ( y , 0 , self . data . shape [ 2 ] - 1 ) ) psf_sky = self . data [ : , x , y ] return psf_sky
def verify ( self , message , signature ) : """Verified the signature attached to the supplied message using NTLM2 Session Security : param message : The message whose signature will verified : return : True if the signature is valid , otherwise False"""
# Parse the signature header mac = _Ntlm2MessageSignature ( ) mac . from_string ( signature ) # validate the sequence if mac [ 'sequence' ] != self . incoming_sequence : raise Exception ( "The message was not received in the correct sequence." ) # extract the supplied checksum checksum = struct . pack ( '<q' , mac [ 'checksum' ] ) if self . key_exchange : checksum = self . incoming_seal . update ( checksum ) # calculate the expected checksum for the message hmac_context = hmac . new ( self . incoming_signing_key ) hmac_context . update ( struct . pack ( '<i' , self . incoming_sequence ) + message ) expected_checksum = hmac_context . digest ( ) [ : 8 ] # validate the supplied checksum is correct if checksum != expected_checksum : raise Exception ( "The message has been altered" ) # logger . debug ( " Verify Sequence Number : % s " , AsHex ( self . outgoing _ sequence ) ) self . incoming_sequence += 1
def to_text ( self ) : """Render a Text MessageElement as plain text : returns : The plain text representation of the row . : rtype : basestring"""
row = '---\n' for index , cell in enumerate ( self . cells ) : if index > 0 : row += ', ' row += cell . to_text ( ) row += '---' return row
def main ( ) : """The main function of the script"""
desc = 'Generate files to benchmark' parser = argparse . ArgumentParser ( description = desc ) parser . add_argument ( '--src' , dest = 'src_dir' , default = 'src' , help = 'The directory containing the templates' ) parser . add_argument ( '--out' , dest = 'out_dir' , default = 'generated' , help = 'The output directory' ) parser . add_argument ( '--seed' , dest = 'seed' , default = '13' , help = 'The random seed (to ensure consistent regeneration)' ) args = parser . parse_args ( ) random . seed ( int ( args . seed ) ) mkdir_p ( args . out_dir ) for template in templates_in ( args . src_dir ) : modes = template . modes ( ) n_range = template . range ( ) for n_value in n_range : base = template . instantiate ( n_value ) for mode in modes : write_file ( os . path . join ( args . out_dir , out_filename ( template , n_value , mode ) ) , mode . convert_from ( base ) ) write_file ( os . path . join ( args . out_dir , '{0}.json' . format ( template . name ) ) , json . dumps ( { 'files' : { n : { m . identifier : out_filename ( template , n , m ) for m in modes } for n in n_range } , 'name' : template . name , 'x_axis_label' : template . property ( 'x_axis_label' ) , 'desc' : template . property ( 'desc' ) , 'modes' : { m . identifier : m . description ( ) for m in modes } } ) )
def dump_seek ( self , reading_id ) : """Seek the dump streamer to a given ID . Returns : ( int , int , int ) : Two error codes and the count of remaining readings . The first error code covers the seeking process . The second error code covers the stream counting process ( cannot fail ) The third item in the tuple is the number of readings left in the stream ."""
if self . dump_walker is None : return ( pack_error ( ControllerSubsystem . SENSOR_LOG , SensorLogError . STREAM_WALKER_NOT_INITIALIZED ) , Error . NO_ERROR , 0 ) try : exact = self . dump_walker . seek ( reading_id , target = 'id' ) except UnresolvedIdentifierError : return ( pack_error ( ControllerSubsystem . SENSOR_LOG , SensorLogError . NO_MORE_READINGS ) , Error . NO_ERROR , 0 ) error = Error . NO_ERROR if not exact : error = pack_error ( ControllerSubsystem . SENSOR_LOG , SensorLogError . ID_FOUND_FOR_ANOTHER_STREAM ) return ( error , error . NO_ERROR , self . dump_walker . count ( ) )
def time ( lancet , issue ) : """Start an Harvest timer for the given issue . This command takes care of linking the timer with the issue tracker page for the given issue . If the issue is not passed to command it ' s taken from currently active branch ."""
issue = get_issue ( lancet , issue ) with taskstatus ( "Starting harvest timer" ) as ts : lancet . timer . start ( issue ) ts . ok ( "Started harvest timer" )
def exec_rabbitmqctl_list ( self , resources , args = [ ] , rabbitmq_opts = [ '-q' , '--no-table-headers' ] ) : """Execute a ` ` rabbitmqctl ` ` command to list the given resources . : param resources : the resources to list , e . g . ` ` ' vhosts ' ` ` : param args : a list of args for the command : param rabbitmqctl _ opts : a list of extra options to pass to ` ` rabbitmqctl ` ` : returns : a tuple of the command exit code and output"""
command = 'list_{}' . format ( resources ) return self . exec_rabbitmqctl ( command , args , rabbitmq_opts )
def get_products ( self , product_ids ) : """This function ( and backend API ) is being obsoleted . Don ' t use it anymore ."""
if self . product_set_id is None : raise ValueError ( 'product_set_id must be specified' ) data = { 'ids' : product_ids } return self . client . get ( self . base_url + '/products' , json = data )
def is_number_type_geographical ( num_type , country_code ) : """Tests whether a phone number has a geographical association , as represented by its type and the country it belongs to . This version of isNumberGeographical exists since calculating the phone number type is expensive ; if we have already done this , we don ' t want to do it again ."""
return ( num_type == PhoneNumberType . FIXED_LINE or num_type == PhoneNumberType . FIXED_LINE_OR_MOBILE or ( ( country_code in _GEO_MOBILE_COUNTRIES ) and num_type == PhoneNumberType . MOBILE ) )
def dry_lapse ( pressure , temperature , ref_pressure = None ) : r"""Calculate the temperature at a level assuming only dry processes . This function lifts a parcel starting at ` temperature ` , conserving potential temperature . The starting pressure can be given by ` ref _ pressure ` . Parameters pressure : ` pint . Quantity ` The atmospheric pressure level ( s ) of interest temperature : ` pint . Quantity ` The starting temperature ref _ pressure : ` pint . Quantity ` , optional The reference pressure . If not given , it defaults to the first element of the pressure array . Returns ` pint . Quantity ` The resulting parcel temperature at levels given by ` pressure ` See Also moist _ lapse : Calculate parcel temperature assuming liquid saturation processes parcel _ profile : Calculate complete parcel profile potential _ temperature"""
if ref_pressure is None : ref_pressure = pressure [ 0 ] return temperature * ( pressure / ref_pressure ) ** mpconsts . kappa
def frame ( self , frame ) : """Return a path go the given frame in the sequence . Integer or string digits are treated as a frame number and padding is applied , all other values are passed though . Examples : > > > seq . frame ( 1) / foo / bar . 0001 . exr > > > seq . frame ( " # " ) / foo / bar . # . exr Args : frame ( int or str ) : the desired frame number or a char to pass through ( ie . # ) Returns : str :"""
try : zframe = str ( int ( frame ) ) . zfill ( self . _zfill ) except ValueError : zframe = frame # There may have been no placeholder for frame IDs in # the sequence , in which case we don ' t want to insert # a frame ID if self . _zfill == 0 : zframe = "" return "" . join ( ( self . _dir , self . _base , zframe , self . _ext ) )
def escape_latex ( text ) : r"""Escape characters of given text . This function takes the given text and escapes characters that have a special meaning in LaTeX : # $ % ^ & _ { } ~ \"""
text = unicode ( text . decode ( 'utf-8' ) ) CHARS = { '&' : r'\&' , '%' : r'\%' , '$' : r'\$' , '#' : r'\#' , '_' : r'\_' , '{' : r'\{' , '}' : r'\}' , '~' : r'\~{}' , '^' : r'\^{}' , '\\' : r'\textbackslash{}' , } escaped = "" . join ( [ CHARS . get ( char , char ) for char in text ] ) return escaped . encode ( 'utf-8' )
def partialclass ( cls , * args , ** kwargs ) : """Returns a partially instantiated class : return : A partial class instance : rtype : cls > > > source = partialclass ( Source , url = " https : / / pypi . org / simple " ) > > > source < class ' _ _ main _ _ . Source ' > > > > source ( name = " pypi " ) > > > source . _ _ dict _ _ mappingproxy ( { ' _ _ module _ _ ' : ' _ _ main _ _ ' , ' _ _ dict _ _ ' : < attribute ' _ _ dict _ _ ' of ' Source ' objects > , ' _ _ weakref _ _ ' : < attribute ' _ _ weakref _ _ ' of ' Source ' objects > , ' _ _ doc _ _ ' : None , ' _ _ init _ _ ' : functools . partialmethod ( < function Source . _ _ init _ _ at 0x7f23af429bf8 > , , url = ' https : / / pypi . org / simple ' ) } ) > > > new _ source = source ( name = " pypi " ) > > > new _ source < _ _ main _ _ . Source object at 0x7f23af189b38 > > > > new _ source . _ _ dict _ _ { ' url ' : ' https : / / pypi . org / simple ' , ' verify _ ssl ' : True , ' name ' : ' pypi ' }"""
name_attrs = [ n for n in ( getattr ( cls , name , str ( cls ) ) for name in ( "__name__" , "__qualname__" ) ) if n is not None ] name_attrs = name_attrs [ 0 ] type_ = type ( name_attrs , ( cls , ) , { "__init__" : partialmethod ( cls . __init__ , * args , ** kwargs ) } ) # Swiped from attrs . make _ class try : type_ . __module__ = sys . _getframe ( 1 ) . f_globals . get ( "__name__" , "__main__" ) except ( AttributeError , ValueError ) : # pragma : no cover pass # pragma : no cover return type_
def get_root ( w ) : """Simple method to access root for a widget"""
next_level = w while next_level . master : next_level = next_level . master return next_level
def slang_date ( self , locale = "en" ) : """" Returns human slang representation of date . Keyword Arguments : locale - - locale to translate to , e . g . ' fr ' for french . ( default : ' en ' - English )"""
dt = pendulum . instance ( self . datetime ( ) ) try : return _translate ( dt , locale ) except KeyError : pass delta = humanize . time . abs_timedelta ( timedelta ( seconds = ( self . epoch - now ( ) . epoch ) ) ) format_string = "DD MMM" if delta . days >= 365 : format_string += " YYYY" return dt . format ( format_string , locale = locale ) . title ( )
def load_history ( self , obj = None ) : """Load history from a text file in user home directory"""
if osp . isfile ( self . LOG_PATH ) : history = [ line . replace ( '\n' , '' ) for line in open ( self . LOG_PATH , 'r' ) . readlines ( ) ] else : history = [ ] return history
def enable_unique_tokens ( self ) : """Enable the use of unique access tokens on all grant types that support this option ."""
for grant_type in self . grant_types : if hasattr ( grant_type , "unique_token" ) : grant_type . unique_token = True
def sdk_normalize ( filename ) : """Normalize a path to strip out the SDK portion , normally so that it can be decided whether it is in a system path or not ."""
if filename . startswith ( '/Developer/SDKs/' ) : pathcomp = filename . split ( '/' ) del pathcomp [ 1 : 4 ] filename = '/' . join ( pathcomp ) return filename
def find_discrete ( start_time , end_time , f , epsilon = EPSILON , num = 12 ) : """Find the times when a function changes value . Searches between ` ` start _ time ` ` and ` ` end _ time ` ` , which should both be : class : ` ~ skyfield . timelib . Time ` objects , for the occasions where the function ` ` f ` ` changes from one value to another . Use this to search for events like sunrise or moon phases . A tuple of two arrays is returned . The first array gives the times at which the input function changes , and the second array specifies the new value of the function at each corresponding time . This is an expensive operation as it needs to repeatedly call the function to narrow down the times that it changes . It continues searching until it knows each time to at least an accuracy of ` ` epsilon ` ` Julian days . At each step , it creates an array of ` ` num ` ` new points between the lower and upper bound that it has established for each transition . These two values can be changed to tune the behavior of the search ."""
ts = start_time . ts jd0 = start_time . tt jd1 = end_time . tt if jd0 >= jd1 : raise ValueError ( 'your start_time {0} is later than your end_time {1}' . format ( start_time , end_time ) ) periods = ( jd1 - jd0 ) / f . rough_period if periods < 1.0 : periods = 1.0 jd = linspace ( jd0 , jd1 , periods * num // 1.0 ) end_mask = linspace ( 0.0 , 1.0 , num ) start_mask = end_mask [ : : - 1 ] o = multiply . outer while True : t = ts . tt_jd ( jd ) y = f ( t ) indices = flatnonzero ( diff ( y ) ) if not len ( indices ) : return indices , y [ 0 : 0 ] starts = jd . take ( indices ) ends = jd . take ( indices + 1 ) # Since we start with equal intervals , they all should fall # below epsilon at around the same time ; so for efficiency we # only test the first pair . if ends [ 0 ] - starts [ 0 ] <= epsilon : break jd = o ( starts , start_mask ) . flatten ( ) + o ( ends , end_mask ) . flatten ( ) return ts . tt_jd ( ends ) , y . take ( indices + 1 )
def walk_nodes ( self , node , original ) : """Iterate over the nodes recursively yielding the templatetag ' sass _ src '"""
try : # try with django - compressor < 2.1 nodelist = self . parser . get_nodelist ( node , original = original ) except TypeError : nodelist = self . parser . get_nodelist ( node , original = original , context = None ) for node in nodelist : if isinstance ( node , SassSrcNode ) : if node . is_sass : yield node else : for node in self . walk_nodes ( node , original = original ) : yield node
def dict_row_strategy ( column_names ) : """Dict row strategy , rows returned as dictionaries"""
# replace empty column names with indices column_names = [ ( name or idx ) for idx , name in enumerate ( column_names ) ] def row_factory ( row ) : return dict ( zip ( column_names , row ) ) return row_factory
def load_api_folder ( api_folder_path ) : """load api definitions from api folder . Args : api _ folder _ path ( str ) : api files folder . api file should be in the following format : " api " : { " def " : " api _ login " , " request " : { } , " validate " : [ ] " api " : { " def " : " api _ logout " , " request " : { } , " validate " : [ ] Returns : dict : api definition mapping . " api _ login " : { " function _ meta " : { " func _ name " : " api _ login " , " args " : [ ] , " kwargs " : { } } " request " : { } " api _ logout " : { " function _ meta " : { " func _ name " : " api _ logout " , " args " : [ ] , " kwargs " : { } } " request " : { }"""
api_definition_mapping = { } api_items_mapping = load_folder_content ( api_folder_path ) for api_file_path , api_items in api_items_mapping . items ( ) : # TODO : add JSON schema validation if isinstance ( api_items , list ) : for api_item in api_items : key , api_dict = api_item . popitem ( ) api_id = api_dict . get ( "id" ) or api_dict . get ( "def" ) or api_dict . get ( "name" ) if key != "api" or not api_id : raise exceptions . ParamsError ( "Invalid API defined in {}" . format ( api_file_path ) ) if api_id in api_definition_mapping : raise exceptions . ParamsError ( "Duplicated API ({}) defined in {}" . format ( api_id , api_file_path ) ) else : api_definition_mapping [ api_id ] = api_dict elif isinstance ( api_items , dict ) : if api_file_path in api_definition_mapping : raise exceptions . ParamsError ( "Duplicated API defined: {}" . format ( api_file_path ) ) else : api_definition_mapping [ api_file_path ] = api_items return api_definition_mapping
def get_list_header ( self ) : """Creates a list of dictionaries with the field names , labels , field links , field css classes , order _ url and order _ direction , this simplifies the creation of a table in a template ."""
result = [ ] for field_name in self . get_fields ( ) : item = { } if isinstance ( field_name , tuple ) : # custom property that is not a field of the model item [ "name" ] = field_name [ 0 ] item [ "label" ] = field_name [ 1 ] else : item [ "name" ] = field_name item [ "label" ] = field_name . title ( ) if item [ "name" ] in self . get_ordering_fields ( ) : item [ "order_url" ] , item [ "order_direction" ] = self . ordering_url ( item [ "name" ] ) result . append ( item ) return result
def SCG ( f , gradf , x , optargs = ( ) , maxiters = 500 , max_f_eval = np . inf , xtol = None , ftol = None , gtol = None ) : """Optimisation through Scaled Conjugate Gradients ( SCG ) f : the objective function gradf : the gradient function ( should return a 1D np . ndarray ) x : the initial condition Returns x the optimal value for x flog : a list of all the objective values function _ eval number of fn evaluations status : string describing convergence status"""
if xtol is None : xtol = 1e-6 if ftol is None : ftol = 1e-6 if gtol is None : gtol = 1e-5 sigma0 = 1.0e-7 fold = f ( x , * optargs ) # Initial function value . function_eval = 1 fnow = fold gradnew = gradf ( x , * optargs ) # Initial gradient . function_eval += 1 # if any ( np . isnan ( gradnew ) ) : # raise UnexpectedInfOrNan , " Gradient contribution resulted in a NaN value " current_grad = np . dot ( gradnew , gradnew ) gradold = gradnew . copy ( ) d = - gradnew # Initial search direction . success = True # Force calculation of directional derivs . nsuccess = 0 # nsuccess counts number of successes . beta = 1.0 # Initial scale parameter . betamin = 1.0e-15 # Lower bound on scale . betamax = 1.0e15 # Upper bound on scale . status = "Not converged" flog = [ fold ] iteration = 0 # Main optimization loop . while iteration < maxiters : # Calculate first and second directional derivatives . if success : mu = np . dot ( d , gradnew ) if mu >= 0 : # pragma : no cover d = - gradnew mu = np . dot ( d , gradnew ) kappa = np . dot ( d , d ) sigma = sigma0 / np . sqrt ( kappa ) xplus = x + sigma * d gplus = gradf ( xplus , * optargs ) function_eval += 1 theta = np . dot ( d , ( gplus - gradnew ) ) / sigma # Increase effective curvature and evaluate step size alpha . delta = theta + beta * kappa if delta <= 0 : # pragma : no cover delta = beta * kappa beta = beta - theta / kappa alpha = - mu / delta # Calculate the comparison ratio . xnew = x + alpha * d fnew = f ( xnew , * optargs ) function_eval += 1 Delta = 2. * ( fnew - fold ) / ( alpha * mu ) if Delta >= 0. : success = True nsuccess += 1 x = xnew fnow = fnew else : success = False fnow = fold # Store relevant variables flog . append ( fnow ) # Current function value iteration += 1 if success : # Test for termination if ( np . abs ( fnew - fold ) < ftol ) : status = 'converged - relative reduction in objective' break # return x , flog , function _ eval , status elif ( np . max ( np . abs ( alpha * d ) ) < xtol ) : status = 'converged - relative stepsize' break else : # Update variables for new position gradold = gradnew gradnew = gradf ( x , * optargs ) function_eval += 1 current_grad = np . dot ( gradnew , gradnew ) fold = fnew # If the gradient is zero then we are done . if current_grad <= gtol : status = 'converged - relative reduction in gradient' break # return x , flog , function _ eval , status # Adjust beta according to comparison ratio . if Delta < 0.25 : beta = min ( 4.0 * beta , betamax ) if Delta > 0.75 : beta = max ( 0.25 * beta , betamin ) # Update search direction using Polak - Ribiere formula , or re - start # in direction of negative gradient after nparams steps . if nsuccess == x . size : d = - gradnew beta = 1. # This is not in the original paper nsuccess = 0 elif success : Gamma = np . dot ( gradold - gradnew , gradnew ) / ( mu ) d = Gamma * d - gradnew else : # If we get here , then we haven ' t terminated in the given number of # iterations . status = "maxiter exceeded" return x , flog , function_eval , status
def fix_missing_locations ( node ) : """Some nodes require a line number and the column offset . Without that information the compiler will abort the compilation . Because it can be a dull task to add appropriate line numbers and column offsets when adding new nodes this function can help . It copies the line number and column offset of the parent node to the child nodes without this information . Unlike ` copy _ location ` this works recursive and won ' t touch nodes that already have a location information ."""
def _fix ( node , lineno , col_offset ) : if 'lineno' in node . _attributes : if not hasattr ( node , 'lineno' ) : node . lineno = lineno else : lineno = node . lineno if 'col_offset' in node . _attributes : if not hasattr ( node , 'col_offset' ) : node . col_offset = col_offset else : col_offset = node . col_offset for child in iter_child_nodes ( node ) : _fix ( child , lineno , col_offset ) _fix ( node , 1 , 0 ) return node
def _add_global_counter ( self ) : """Adds a global counter , called once for setup by @ property global _ step ."""
assert self . _global_step is None # Force this into the top - level namescope . Instead of forcing top - level # here , we could always call this in _ _ init _ _ ( ) and then keep whatever # namescopes are around then . with self . g . as_default ( ) , self . g . name_scope ( None ) : try : self . _global_step = self . g . get_tensor_by_name ( 'global_step:0' ) except KeyError : self . _global_step = tf . Variable ( 0 , name = 'global_step' , trainable = False )
def graph_memoized ( func ) : """Like memoized , but keep one cache per default graph ."""
# TODO it keeps the graph alive from . . compat import tfv1 GRAPH_ARG_NAME = '__IMPOSSIBLE_NAME_FOR_YOU__' @ memoized def func_with_graph_arg ( * args , ** kwargs ) : kwargs . pop ( GRAPH_ARG_NAME ) return func ( * args , ** kwargs ) @ functools . wraps ( func ) def wrapper ( * args , ** kwargs ) : assert GRAPH_ARG_NAME not in kwargs , "No Way!!" graph = tfv1 . get_default_graph ( ) kwargs [ GRAPH_ARG_NAME ] = graph return func_with_graph_arg ( * args , ** kwargs ) return wrapper
def get_autoscaling_group_properties ( asg_client , env , service ) : """Gets the autoscaling group properties based on the service name that is provided . This function will attempt the find the autoscaling group base on the following logic : 1 . If the service name provided matches the autoscaling group name 2 . If the service name provided matches the Name tag of the autoscaling group 3 . If the service name provided does not match the above , return None Args : clients : Instantiated boto3 autoscaling client env : Name of the environment to search for the autoscaling group service : Name of the service Returns : JSON object of the autoscaling group properties if it exists"""
try : # See if { { ENV } } - { { SERVICE } } matches ASG name response = asg_client . describe_auto_scaling_groups ( AutoScalingGroupNames = [ "{}-{}" . format ( env , service ) ] ) if len ( response [ "AutoScalingGroups" ] ) == 0 : # See if { { ENV } } - { { SERVICE } } matches ASG tag name response = asg_client . describe_tags ( Filters = [ { "Name" : "Key" , "Values" : [ "Name" ] } , { "Name" : "Value" , "Values" : [ "{}-{}" . format ( env , service ) ] } ] ) if len ( response [ "Tags" ] ) == 0 : # Query does not match either of the above , return None return None else : asg_name = response [ "Tags" ] [ 0 ] [ "ResourceId" ] response = asg_client . describe_auto_scaling_groups ( AutoScalingGroupNames = [ asg_name ] ) return response [ "AutoScalingGroups" ] else : return response [ "AutoScalingGroups" ] except ClientError as error : raise RuntimeError ( "Error in finding autoscaling group {} {}" . format ( env , service ) , error )
def AnalizarLiquidacion ( self , aut , liq = None , ajuste = False ) : "Método interno para analizar la respuesta de AFIP"
# proceso los datos básicos de la liquidación ( devuelto por consultar ) : if liq : self . params_out = dict ( pto_emision = liq . get ( 'ptoEmision' ) , nro_orden = liq . get ( 'nroOrden' ) , cuit_comprador = liq . get ( 'cuitComprador' ) , nro_act_comprador = liq . get ( 'nroActComprador' ) , nro_ing_bruto_comprador = liq . get ( 'nroIngBrutoComprador' ) , cod_tipo_operacion = liq . get ( 'codTipoOperacion' ) , es_liquidacion_propia = liq . get ( 'esLiquidacionPropia' ) , es_canje = liq . get ( 'esCanje' ) , cod_puerto = liq . get ( 'codPuerto' ) , des_puerto_localidad = liq . get ( 'desPuertoLocalidad' ) , cod_grano = liq . get ( 'codGrano' ) , cuit_vendedor = liq . get ( 'cuitVendedor' ) , nro_ing_bruto_vendedor = liq . get ( 'nroIngBrutoVendedor' ) , actua_corredor = liq . get ( 'actuaCorredor' ) , liquida_corredor = liq . get ( 'liquidaCorredor' ) , cuit_corredor = liq . get ( 'cuitCorredor' ) , comision_corredor = liq . get ( 'comisionCorredor' ) , nro_ing_bruto_corredor = liq . get ( 'nroIngBrutoCorredor' ) , fecha_precio_operacion = liq . get ( 'fechaPrecioOperacion' ) , precio_ref_tn = liq . get ( 'precioRefTn' ) , cod_grado_ref = liq . get ( 'codGradoRef' ) , cod_grado_ent = liq . get ( 'codGradoEnt' ) , factor_ent = liq . get ( 'factorEnt' ) , precio_flete_tn = liq . get ( 'precioFleteTn' ) , cont_proteico = liq . get ( 'contProteico' ) , alic_iva_operacion = liq . get ( 'alicIvaOperacion' ) , campania_ppal = liq . get ( 'campaniaPPal' ) , cod_localidad_procedencia = liq . get ( 'codLocalidadProcedencia' ) , cod_prov_procedencia = liq . get ( 'codProvProcedencia' ) , datos_adicionales = liq . get ( 'datosAdicionales' ) , peso_neto_sin_certificado = liq . get ( 'pesoNetoSinCertificado' ) , cod_localidad_procedencia_sin_certificado = liq . get ( 'codLocalidadProcedenciaSinCertificado' ) , cod_prov_procedencia_sin_certificado = liq . get ( 'codProvProcedenciaSinCertificado' ) , certificados = [ ] , ) if ajuste : self . params_out . update ( # ajustes : diferencia_peso_neto = liq . get ( 'diferenciaPesoNeto' ) , diferencia_precio_operacion = liq . get ( 'diferenciaPrecioOperacion' ) , cod_grado = liq . get ( 'codGrado' ) , val_grado = liq . get ( 'valGrado' ) , factor = liq . get ( 'factor' ) , diferencia_precio_flete_tn = liq . get ( 'diferenciaPrecioFleteTn' ) , concepto_importe_iva_0 = liq . get ( 'conceptoImporteIva0' ) , importe_ajustar_iva_0 = liq . get ( 'importeAjustarIva0' ) , concepto_importe_iva_105 = liq . get ( 'conceptoImporteIva105' ) , importe_ajustar_iva_105 = liq . get ( 'importeAjustarIva105' ) , concepto_importe_iva_21 = liq . get ( 'conceptoImporteIva21' ) , importe_ajustar_iva_21 = liq . get ( 'importeAjustarIva21' ) , ) # analizar detalle de importes ajustados discriminados por alicuota # ( por compatibildiad y consistencia se usan los mismos campos ) for it in liq . get ( "importes" , liq . get ( "importe" ) ) : # en ajustes LSG no se agrupan los importes en un subtipo . . . if 'importeReturn' in it : it = it [ 'importeReturn' ] [ 0 ] # TODO : revisar SOAP tasa = "iva_%s" % str ( it [ 'alicuota' ] ) . replace ( "." , "" ) . strip ( ) self . params_out [ "concepto_importe_%s" % tasa ] = it [ 'concepto' ] self . params_out [ "importe_ajustar_%s" % tasa ] = it [ 'importe' ] self . params_out [ "iva_calculado_%s" % tasa ] = it [ 'ivaCalculado' ] if 'certificados' in liq : for c in liq [ 'certificados' ] : cert = c [ 'certificado' ] self . params_out [ 'certificados' ] . append ( dict ( tipo_certificado_deposito = cert [ 'tipoCertificadoDeposito' ] , nro_certificado_deposito = cert [ 'nroCertificadoDeposito' ] , peso_neto = cert [ 'pesoNeto' ] , cod_localidad_procedencia = cert [ 'codLocalidadProcedencia' ] , cod_prov_procedencia = cert [ 'codProvProcedencia' ] , campania = cert [ 'campania' ] , fecha_cierre = cert [ 'fechaCierre' ] , ) ) self . params_out [ 'errores' ] = self . errores # proceso la respuesta de autorizar , ajustar ( y consultar ) : if aut : self . TotalDeduccion = aut . get ( 'totalDeduccion' ) self . TotalRetencion = aut . get ( 'totalRetencion' ) self . TotalRetencionAfip = aut . get ( 'totalRetencionAfip' ) self . TotalOtrasRetenciones = aut . get ( 'totalOtrasRetenciones' ) self . TotalNetoAPagar = aut . get ( 'totalNetoAPagar' ) self . TotalIvaRg4310_18 = aut . get ( 'totalIvaRg4310_18' ) self . TotalPagoSegunCondicion = aut . get ( 'totalPagoSegunCondicion' ) self . COE = str ( aut . get ( 'coe' , '' ) ) self . COEAjustado = aut . get ( 'coeAjustado' ) self . Estado = aut . get ( 'estado' , '' ) self . NroContrato = aut . get ( 'numeroContrato' , '' ) # actualizo parámetros de salida : self . params_out [ 'coe' ] = self . COE self . params_out [ 'coe_ajustado' ] = self . COEAjustado self . params_out [ 'estado' ] = self . Estado self . params_out [ 'total_deduccion' ] = self . TotalDeduccion self . params_out [ 'total_retencion' ] = self . TotalRetencion self . params_out [ 'total_retencion_afip' ] = self . TotalRetencionAfip self . params_out [ 'total_otras_retenciones' ] = self . TotalOtrasRetenciones self . params_out [ 'total_neto_a_pagar' ] = self . TotalNetoAPagar self . params_out [ 'total_iva_rg_4310_18' ] = self . TotalIvaRg4310_18 self . params_out [ 'total_pago_segun_condicion' ] = self . TotalPagoSegunCondicion # datos adicionales : self . NroOrden = self . params_out [ 'nro_orden' ] = aut . get ( 'nroOrden' ) self . params_out [ 'cod_tipo_ajuste' ] = aut . get ( 'codTipoAjuste' ) fecha = aut . get ( 'fechaLiquidacion' ) if fecha : fecha = str ( fecha ) self . params_out [ 'fecha_liquidacion' ] = fecha self . params_out [ 'importe_iva' ] = aut . get ( 'importeIva' ) self . params_out [ 'nro_op_comercial' ] = aut . get ( 'nroOpComercial' ) self . params_out [ 'operacion_con_iva' ] = aut . get ( 'operacionConIva' ) self . params_out [ 'precio_operacion' ] = aut . get ( 'precioOperacion' ) self . params_out [ 'total_peso_neto' ] = aut . get ( 'totalPesoNeto' ) self . params_out [ 'subtotal' ] = aut . get ( 'subTotal' ) # LSG ( especificos ) : self . params_out [ 'total_deducciones' ] = aut . get ( 'totalDeducciones' ) if 'todalPercepciones' in aut : # error de tipeo en el WSDL de AFIP . . . self . params_out [ 'total_percepciones' ] = aut . get ( 'todalPercepciones' ) else : self . params_out [ 'total_percepciones' ] = aut . get ( 'totalPercepciones' ) # sub estructuras : self . params_out [ 'retenciones' ] = [ ] self . params_out [ 'deducciones' ] = [ ] self . params_out [ 'percepciones' ] = [ ] for retret in aut . get ( "retenciones" , [ ] ) : retret = retret [ 'retencionReturn' ] self . params_out [ 'retenciones' ] . append ( { 'importe_retencion' : retret [ 'importeRetencion' ] , 'alicuota' : retret [ 'retencion' ] . get ( 'alicuota' ) , 'base_calculo' : retret [ 'retencion' ] . get ( 'baseCalculo' ) , 'codigo_concepto' : retret [ 'retencion' ] . get ( 'codigoConcepto' ) , 'detalle_aclaratorio' : ( retret [ 'retencion' ] . get ( 'detalleAclaratorio' ) or "" ) . replace ( "\n" , "" ) , 'importe_certificado_retencion' : retret [ 'retencion' ] . get ( 'importeCertificadoRetencion' ) , 'nro_certificado_retencion' : retret [ 'retencion' ] . get ( 'nroCertificadoRetencion' ) , 'fecha_certificado_retencion' : retret [ 'retencion' ] . get ( 'fechaCertificadoRetencion' ) , } ) for dedret in aut . get ( "deducciones" , [ ] ) : dedret = dedret [ 'deduccionReturn' ] self . params_out [ 'deducciones' ] . append ( { 'importe_deduccion' : dedret [ 'importeDeduccion' ] , 'importe_iva' : dedret . get ( 'importeIva' ) , 'alicuota' : dedret [ 'deduccion' ] . get ( 'alicuotaIva' ) , 'base_calculo' : dedret [ 'deduccion' ] . get ( 'baseCalculo' ) , 'codigo_concepto' : dedret [ 'deduccion' ] . get ( 'codigoConcepto' ) , 'detalle_aclaratorio' : dedret [ 'deduccion' ] . get ( 'detalleAclaratorio' , "" ) . replace ( "\n" , "" ) , 'dias_almacenaje' : dedret [ 'deduccion' ] . get ( 'diasAlmacenaje' ) , 'precio_pkg_diario' : dedret [ 'deduccion' ] . get ( 'precioPKGdiario' ) , 'comision_gastos_adm' : dedret [ 'deduccion' ] . get ( 'comisionGastosAdm' ) , } ) for perret in aut . get ( "percepciones" , [ ] ) : perret = perret . get ( 'percepcionReturn' , perret ) self . params_out [ 'percepciones' ] . append ( { 'importe_final' : perret [ 'percepcion' ] [ 'importeFinal' ] , 'alicuota' : perret [ 'percepcion' ] . get ( 'alicuota' ) , 'base_calculo' : perret [ 'percepcion' ] . get ( 'baseCalculo' ) , 'descripcion' : perret [ 'percepcion' ] . get ( 'descripcion' , "" ) . replace ( "\n" , "" ) , } )
def prettyDateDifference ( startTime , finishTime = None ) : """Get a datetime object or a int ( ) Epoch timestamp and return a pretty string like ' an hour ago ' , ' Yesterday ' , ' 3 months ago ' , ' just now ' , etc"""
from datetime import datetime if startTime is None : return None if not isinstance ( startTime , ( int , datetime ) ) : raise RuntimeError ( "Cannot parse time" ) endTime = finishTime or datetime . now ( ) if isinstance ( startTime , int ) : diff = endTime - datetime . fromtimestamp ( startTime ) elif isinstance ( startTime , datetime ) : diff = endTime - startTime else : diff = endTime - endTime second_diff = diff . seconds day_diff = diff . days if day_diff < 0 : return '' if day_diff == 0 : if second_diff < 10 : return "just now" if second_diff < 60 : return str ( second_diff ) + " seconds ago" if second_diff < 120 : return "a minute ago" if second_diff < 3600 : return str ( int ( second_diff / 60 ) ) + " minutes ago" if second_diff < 7200 : return "an hour ago" if second_diff < 86400 : return str ( int ( second_diff / 3600 ) ) + " hours ago" if day_diff == 1 : return "Yesterday" if day_diff < 7 : return str ( day_diff ) + " days ago"
def get_plugin_class ( self , typ ) : """get class by name"""
if typ in self . _class : return self . _class [ typ ] # try to import by same name try : importlib . import_module ( "%s.%s" % ( self . namespace , typ ) ) if typ in self . _class : return self . _class [ typ ] except ImportError as e : self . log . debug ( "ImportError " + str ( e ) ) raise ValueError ( "unknown plugin '%s'" % typ )
def post_load ( fn = None , pass_many = False , pass_original = False ) : """Register a method to invoke after deserializing an object . The method receives the deserialized data and returns the processed data . By default , receives a single datum at a time , transparently handling the ` ` many ` ` argument passed to the Schema . If ` ` pass _ many = True ` ` , the raw data ( which may be a collection ) and the value for ` ` many ` ` is passed . If ` ` pass _ original = True ` ` , the original data ( before deserializing ) will be passed as an additional argument to the method ."""
return set_hook ( fn , ( POST_LOAD , pass_many ) , pass_original = pass_original )
def _make_data ( data ) -> Tuple [ List [ Dict ] , List [ Dict ] ] : """Transform table data into JSON ."""
jsdata = [ ] for idx , row in data . iterrows ( ) : row . index = row . index . astype ( str ) rdict = row . to_dict ( ) rdict . update ( dict ( key = str ( idx ) ) ) jsdata . append ( rdict ) return jsdata , Table . _make_columns ( data . columns )
def get_readonly_fields ( self , request , obj = None ) : """Makes ` created _ by ` , ` create _ date ` & ` update _ date ` readonly when editing . Author : Himanshu Shankar ( https : / / himanshus . com )"""
# Get read only fields from super fields = list ( super ( CreateUpdateAdmin , self ) . get_readonly_fields ( request = request , obj = obj ) ) # Loop over ownership info field for k , v in self . ownership_info [ 'fields' ] . items ( ) : # Check if model has k attribute # and field k is readonly # and k is not already in fields # and k is not in excluded field # ( if not checked , form . Meta . exclude has same field twice ) if ( hasattr ( self . model , k ) and ( 'readonly' in v and v [ 'readonly' ] ) and k not in fields and ( not self . exclude or ( self . exclude and k not in self . exclude ) ) ) : fields . append ( k ) return tuple ( fields )
def handle_decoded_payload ( self , data ) : '''Override this method if you wish to handle the decoded data differently .'''
# Ensure payload is unicode . Disregard failure to decode binary blobs . if six . PY2 : data = salt . utils . data . decode ( data , keep = True ) if 'user' in data : log . info ( 'User %s Executing command %s with jid %s' , data [ 'user' ] , data [ 'fun' ] , data [ 'jid' ] ) else : log . info ( 'Executing command %s with jid %s' , data [ 'fun' ] , data [ 'jid' ] ) log . debug ( 'Command details %s' , data ) # Don ' t duplicate jobs log . trace ( 'Started JIDs: %s' , self . jid_queue ) if self . jid_queue is not None : if data [ 'jid' ] in self . jid_queue : return else : self . jid_queue . append ( data [ 'jid' ] ) if len ( self . jid_queue ) > self . opts [ 'minion_jid_queue_hwm' ] : self . jid_queue . pop ( 0 ) if isinstance ( data [ 'fun' ] , six . string_types ) : if data [ 'fun' ] == 'sys.reload_modules' : self . functions , self . returners , self . function_errors , self . executors = self . _load_modules ( ) self . schedule . functions = self . functions self . schedule . returners = self . returners process_count_max = self . opts . get ( 'process_count_max' ) process_count_max_sleep_secs = self . opts . get ( 'process_count_max_sleep_secs' ) if process_count_max > 0 : process_count = len ( salt . utils . minion . running ( self . opts ) ) while process_count >= process_count_max : log . warning ( 'Maximum number of processes (%s) reached while ' 'executing jid %s, waiting %s seconds...' , process_count_max , data [ 'jid' ] , process_count_max_sleep_secs ) yield tornado . gen . sleep ( process_count_max_sleep_secs ) process_count = len ( salt . utils . minion . running ( self . opts ) ) # We stash an instance references to allow for the socket # communication in Windows . You can ' t pickle functions , and thus # python needs to be able to reconstruct the reference on the other # side . instance = self multiprocessing_enabled = self . opts . get ( 'multiprocessing' , True ) if multiprocessing_enabled : if sys . platform . startswith ( 'win' ) : # let python reconstruct the minion on the other side if we ' re # running on windows instance = None with default_signals ( signal . SIGINT , signal . SIGTERM ) : process = SignalHandlingMultiprocessingProcess ( target = self . _target , args = ( instance , self . opts , data , self . connected ) ) else : process = threading . Thread ( target = self . _target , args = ( instance , self . opts , data , self . connected ) , name = data [ 'jid' ] ) if multiprocessing_enabled : with default_signals ( signal . SIGINT , signal . SIGTERM ) : # Reset current signals before starting the process in # order not to inherit the current signal handlers process . start ( ) else : process . start ( ) # TODO : remove the windows specific check ? if multiprocessing_enabled and not salt . utils . platform . is_windows ( ) : # we only want to join ( ) immediately if we are daemonizing a process process . join ( ) else : self . win_proc . append ( process )
def decode_exactly ( code , bits_per_char = 6 ) : """Decode a geohash on a hilbert curve as a lng / lat position with error - margins Decodes the geohash ` code ` as a lng / lat position with error - margins . It assumes , that the length of ` code ` corresponds to the precision ! And that each character in ` code ` encodes ` bits _ per _ char ` bits . Do not mix geohashes with different ` bits _ per _ char ` ! Parameters : code : str The geohash to decode . bits _ per _ char : int The number of bits per coding character Returns : Tuple [ float , float , float , float ] : ( lng , lat , lng - error , lat - error ) coordinate for the geohash ."""
assert bits_per_char in ( 2 , 4 , 6 ) if len ( code ) == 0 : return 0. , 0. , _LNG_INTERVAL [ 1 ] , _LAT_INTERVAL [ 1 ] bits = len ( code ) * bits_per_char level = bits >> 1 dim = 1 << level code_int = decode_int ( code , bits_per_char ) if CYTHON_AVAILABLE and bits <= MAX_BITS : x , y = hash2xy_cython ( code_int , dim ) else : x , y = _hash2xy ( code_int , dim ) lng , lat = _int2coord ( x , y , dim ) lng_err , lat_err = _lvl_error ( level ) # level of hilbert curve is bits / 2 return lng + lng_err , lat + lat_err , lng_err , lat_err
def _get_local_dict ( self ) : """Retrieve ( or initialize ) the thread - local data to use ."""
try : return getattr ( THREAD_STORE , self . namespace ) except AttributeError : local_var = dict ( * self . args , ** self . kwargs ) setattr ( THREAD_STORE , self . namespace , local_var ) return local_var
def pool_update ( self , pool_id , name = None , description = None , post_ids = None , is_active = None , category = None ) : """Update a pool ( Requires login ) ( UNTESTED ) . Parameters : pool _ id ( int ) : Where pool _ id is the pool id . name ( str ) : description ( str ) : post _ ids ( str ) : List of space delimited post ids . is _ active ( int ) : Can be : 1 , 0. category ( str ) : Can be : series , collection ."""
params = { 'pool[name]' : name , 'pool[description]' : description , 'pool[post_ids]' : post_ids , 'pool[is_active]' : is_active , 'pool[category]' : category } return self . _get ( 'pools/{0}.json' . format ( pool_id ) , params , method = 'PUT' , auth = True )
def inspect_config ( app ) : """Inspect the Sphinx configuration and update for slide - linking . If links from HTML to slides are enabled , make sure the sidebar configuration includes the template and add the necessary theme directory as a loader so the sidebar template can be located . If the sidebar configuration already includes ` ` slidelink . html ` ` ( in any key ) , the configuration will not be changed . If the configuration is not specified , we ' ll attempt to emulate what Sphinx does by default ."""
# avoid import cycles : / from hieroglyph import writer # only reconfigure Sphinx if we ' re generating HTML if app . builder . name not in HTML_BUILDERS : return if app . config . slide_link_html_to_slides : # add the slide theme dir as a Loader app . builder . templates . loaders . append ( SphinxFileSystemLoader ( os . path . join ( os . path . dirname ( __file__ ) , 'themes' , 'slides' , ) ) ) # add the " show slides " sidebar template if not app . config . html_sidebars : # no sidebars explicitly defined , mimic the old style # behavior + slide links app . config . html_sidebars = { '**' : [ 'localtoc.html' , 'relations.html' , 'sourcelink.html' , SLIDELINK_TEMPLATE , 'searchbox.html' , ] , } else : # sidebars defined , add the template if needed included = False for glob , templates in app . config . html_sidebars : if SLIDELINK_TEMPLATE in templates : included = True break if not included : # the slidelink template was not included ; append it # to the list of sidebars for all templates app . config . html_sidebars . setdefault ( '**' , [ ] ) . append ( SLIDELINK_TEMPLATE , ) if app . config . slide_link_html_sections_to_slides : # fix up the HTML Translator if sphinx . version_info >= ( 1 , 6 , 0 ) : override_translator = type ( 'SlideLinkTranslator' , ( app . builder . get_translator_class ( ) , object ) , { 'depart_title' : writer . depart_title , } , ) app . set_translator ( app . builder , override_translator ) else : app . builder . translator_class = type ( 'SlideLinkTranslator' , ( app . builder . translator_class , object ) , { 'depart_title' : writer . depart_title , } , )
def _decode ( cls , value ) : """Decode the given value , reverting ' % ' - encoded groups ."""
value = cls . _DEC_RE . sub ( lambda x : '%c' % int ( x . group ( 1 ) , 16 ) , value ) return json . loads ( value )
def vector_args ( self , args ) : """Yields each of the individual lane pairs from the arguments , in order from most significan to least significant"""
for i in reversed ( range ( self . _vector_count ) ) : pieces = [ ] for vec in args : pieces . append ( vec [ ( i + 1 ) * self . _vector_size - 1 : i * self . _vector_size ] ) yield pieces
def draw_eccs ( n , per = 10 , binsize = 0.1 , fuzz = 0.05 , maxecc = 0.97 ) : """draws eccentricities appropriate to given periods , generated according to empirical data from Multiple Star Catalog"""
if np . size ( per ) == 1 or np . std ( np . atleast_1d ( per ) ) == 0 : if np . size ( per ) > 1 : per = per [ 0 ] if per == 0 : es = np . zeros ( n ) else : ne = 0 while ne < 10 : mask = np . absolute ( np . log10 ( MSC_TRIPLEPERS ) - np . log10 ( per ) ) < binsize / 2. es = MSC_TRIPDATA . e [ mask ] ne = len ( es ) if ne < 10 : binsize *= 1.1 inds = rand . randint ( ne , size = n ) es = es [ inds ] * ( 1 + rand . normal ( size = n ) * fuzz ) else : longmask = ( per > 25 ) shortmask = ( per <= 25 ) es = np . zeros ( np . size ( per ) ) elongs = MSC_TRIPDATA . e [ MSC_TRIPLEPERS > 25 ] eshorts = MSC_TRIPDATA . e [ MSC_TRIPLEPERS <= 25 ] n = np . size ( per ) nlong = longmask . sum ( ) nshort = shortmask . sum ( ) nelongs = np . size ( elongs ) neshorts = np . size ( eshorts ) ilongs = rand . randint ( nelongs , size = nlong ) ishorts = rand . randint ( neshorts , size = nshort ) es [ longmask ] = elongs [ ilongs ] es [ shortmask ] = eshorts [ ishorts ] es = es * ( 1 + rand . normal ( size = n ) * fuzz ) es [ es > maxecc ] = maxecc return np . absolute ( es )
def add_detector ( self , detector_cls ) : """Add a ` ` Detector ` ` to scrubadub"""
if not issubclass ( detector_cls , detectors . base . Detector ) : raise TypeError ( ( '"%(detector_cls)s" is not a subclass of Detector' ) % locals ( ) ) # TODO : should add tests to make sure filth _ cls is actually a proper # filth _ cls name = detector_cls . filth_cls . type if name in self . _detectors : raise KeyError ( ( 'can not add Detector "%(name)s"---it already exists. ' 'Try removing it first.' ) % locals ( ) ) self . _detectors [ name ] = detector_cls ( )
def segment_volume ( seg ) : '''Compute the volume of a segment . Approximated as a conical frustum .'''
r0 = seg [ 0 ] [ COLS . R ] r1 = seg [ 1 ] [ COLS . R ] h = point_dist ( seg [ 0 ] , seg [ 1 ] ) return math . pi * h * ( ( r0 * r0 ) + ( r0 * r1 ) + ( r1 * r1 ) ) / 3.0
def items ( self ) : """Behave like ` dict . items ` for mapping types ( iterator over ( key , value ) pairs ) , and like ` iter ` for sequence types ( iterator over values ) ."""
if self . empty : return iter ( [ ] ) val = self . value if hasattr ( val , "iteritems" ) : return val . iteritems ( ) elif hasattr ( val , "items" ) : return val . items ( ) else : return iter ( self )
def clear_db_attribute ( self , table , record , column ) : """Clears values from ' column ' in ' record ' in ' table ' . This method is corresponding to the following ovs - vsctl command : : $ ovs - vsctl clear TBL REC COL"""
command = ovs_vsctl . VSCtlCommand ( 'clear' , ( table , record , column ) ) self . run_command ( [ command ] )
def trainHMM_fromFile ( wav_file , gt_file , hmm_model_name , mt_win , mt_step ) : '''This function trains a HMM model for segmentation - classification using a single annotated audio file ARGUMENTS : - wav _ file : the path of the audio filename - gt _ file : the path of the ground truth filename ( a csv file of the form < segment start in seconds > , < segment end in seconds > , < segment label > in each row - hmm _ model _ name : the name of the HMM model to be stored - mt _ win : mid - term window size - mt _ step : mid - term window step RETURNS : - hmm : an object to the resulting HMM - class _ names : a list of class _ names After training , hmm , class _ names , along with the mt _ win and mt _ step values are stored in the hmm _ model _ name file'''
[ seg_start , seg_end , seg_labs ] = readSegmentGT ( gt_file ) flags , class_names = segs2flags ( seg_start , seg_end , seg_labs , mt_step ) [ fs , x ] = audioBasicIO . readAudioFile ( wav_file ) [ F , _ , _ ] = aF . mtFeatureExtraction ( x , fs , mt_win * fs , mt_step * fs , round ( fs * 0.050 ) , round ( fs * 0.050 ) ) start_prob , transmat , means , cov = trainHMM_computeStatistics ( F , flags ) hmm = hmmlearn . hmm . GaussianHMM ( start_prob . shape [ 0 ] , "diag" ) hmm . startprob_ = start_prob hmm . transmat_ = transmat hmm . means_ = means hmm . covars_ = cov fo = open ( hmm_model_name , "wb" ) cPickle . dump ( hmm , fo , protocol = cPickle . HIGHEST_PROTOCOL ) cPickle . dump ( class_names , fo , protocol = cPickle . HIGHEST_PROTOCOL ) cPickle . dump ( mt_win , fo , protocol = cPickle . HIGHEST_PROTOCOL ) cPickle . dump ( mt_step , fo , protocol = cPickle . HIGHEST_PROTOCOL ) fo . close ( ) return hmm , class_names
def server_id ( self ) : "asks the server at the other end for its unique id ."
c_result = dbus . dbus_connection_get_server_id ( self . _dbobj ) result = ct . cast ( c_result , ct . c_char_p ) . value . decode ( ) dbus . dbus_free ( c_result ) return result
def add_entry ( self , row ) : """This will parse the VCF entry and also store it within the VCFFile . It will also return the VCFEntry as well ."""
var_call = VCFEntry ( self . individuals ) var_call . parse_entry ( row ) self . entries [ ( var_call . chrom , var_call . pos ) ] = var_call return var_call
def Nads_in_slab ( self ) : """Returns the TOTAL number of adsorbates in the slab on BOTH sides"""
return sum ( [ self . composition . as_dict ( ) [ a ] for a in self . ads_entries_dict . keys ( ) ] )
def save ( self , ** fields ) : """Save the instance to the remote Transifex server . If it was pre - populated , it updates the instance on the server , otherwise it creates a new object . Any values given in ` fields ` will be attempted to be saved on the object . The same goes for any other values already set to the object by ` model _ instance . attr = value ` . Raises : AttributeError : if a given field is not included in ` self . writable _ fields ` ,"""
for field in fields : if field in self . writable_fields : setattr ( self , field , fields [ field ] ) else : self . _handle_wrong_field ( field , ATTR_TYPE_WRITE ) if self . _populated_fields : self . _update ( ** self . _modified_fields ) else : self . _create ( ** self . _modified_fields )
def add_checkpoint_file ( self , filename ) : """Add filename as a checkpoint file for this DAG node @ param filename : checkpoint filename to add"""
if filename not in self . __checkpoint_files : self . __checkpoint_files . append ( filename ) if not isinstance ( self . job ( ) , CondorDAGManJob ) : if self . job ( ) . get_universe ( ) == 'grid' : self . add_checkpoint_macro ( filename )
def OneResult ( parser ) : "Parse like parser , but return exactly one result , not a tuple ."
def parse ( text ) : results = parser ( text ) assert len ( results ) == 1 , "Expected one result but got %r" % ( results , ) return results [ 0 ] return parse
def to_dict ( self , converter = None ) : """Returns a copy dict of the current object If a converter function is given , pass each value to it . Per default the values are converted by ` self . stringify ` ."""
if converter is None : converter = self . stringify out = dict ( ) for k , v in self . iteritems ( ) : out [ k ] = converter ( v ) return out
def _compare_by_version ( path1 , path2 ) : """Returns the current / latest learned path . Checks if given paths are from same source / peer and then compares their version number to determine which path is received later . If paths are from different source / peer return None ."""
if path1 . source == path2 . source : if path1 . source_version_num > path2 . source_version_num : return path1 else : return path2 return None
def setup_partitioning ( portal ) : """Setups the enhanced partitioning system"""
logger . info ( "Setting up the enhanced partitioning system" ) # Add " Create partition " transition add_create_partition_transition ( portal ) # Add getAncestorsUIDs index in analyses catalog add_partitioning_indexes ( portal ) # Adds metadata columns for partitioning add_partitioning_metadata ( portal ) # Setup default ID formatting for partitions set_partitions_id_formatting ( portal )
async def _async_supervisor ( func , animation_ , step , * args , ** kwargs ) : """Supervisor for running an animation with an asynchronous function . Args : func : A function to be run alongside an animation . animation _ : An infinite generator that produces strings for the animation . step : Seconds between each animation frame . * args : Arguments for func . * * kwargs : Keyword arguments for func . Returns : The result of func ( * args , * * kwargs ) Raises : Any exception that is thrown when executing func ."""
with ThreadPoolExecutor ( max_workers = 2 ) as pool : with _terminating_event ( ) as event : pool . submit ( animate_cli , animation_ , step , event ) result = await func ( * args , ** kwargs ) return result
def print_variables ( self ) : """Prints out magic variables available in config files alongside with their values and descriptions . May be useful for debugging . http : / / uwsgi - docs . readthedocs . io / en / latest / Configuration . html # magic - variables"""
print_out = partial ( self . print_out , format_options = 'green' ) print_out ( '===== variables =====' ) for var , hint in self . vars . get_descriptions ( ) . items ( ) : print_out ( ' %' + var + ' = ' + var + ' = ' + hint . replace ( '%' , '%%' ) ) print_out ( '=====================' ) return self
def __convertLongToString ( self , iValue ) : """convert a long hex integer to string remove ' 0x ' and ' L ' return string Args : iValue : long integer in hex format Returns : string of this long integer without " 0x " and " L " """
string = '' strValue = str ( hex ( iValue ) ) string = strValue . lstrip ( '0x' ) string = string . rstrip ( 'L' ) return string
def load_img ( path , grayscale = False , target_size = None ) : """Utility function to load an image from disk . Args : path : The image file path . grayscale : True to convert to grayscale image ( Default value = False ) target _ size : ( w , h ) to resize . ( Default value = None ) Returns : The loaded numpy image ."""
img = io . imread ( path , grayscale ) if target_size : img = transform . resize ( img , target_size , preserve_range = True ) . astype ( 'uint8' ) return img
def valid_ipv4 ( ip ) : """check if ip is a valid ipv4"""
match = _valid_ipv4 . match ( ip ) if match is None : return False octets = match . groups ( ) if len ( octets ) != 4 : return False first = int ( octets [ 0 ] ) if first < 1 or first > 254 : return False for i in range ( 1 , 4 ) : octet = int ( octets [ i ] ) if octet < 0 or octet > 255 : return False return True
def patch ( args ) : """% prog patch reference . fasta reads . fasta Run PBJelly with reference and reads ."""
from jcvi . formats . base import write_file from jcvi . formats . fasta import format p = OptionParser ( patch . __doc__ ) p . add_option ( "--cleanfasta" , default = False , action = "store_true" , help = "Clean FASTA to remove description [default: %default]" ) p . add_option ( "--highqual" , default = False , action = "store_true" , help = "Reads are of high quality [default: %default]" ) p . set_home ( "pbjelly" ) p . set_cpus ( ) opts , args = p . parse_args ( args ) if len ( args ) != 2 : sys . exit ( not p . print_help ( ) ) ref , reads = args cpus = opts . cpus cmd = op . join ( opts . pbjelly_home , "setup.sh" ) setup = "source {0}" . format ( cmd ) if not which ( "fakeQuals.py" ) : sh ( setup ) pf = ref . rsplit ( "." , 1 ) [ 0 ] pr , px = reads . rsplit ( "." , 1 ) # Remove description line if opts . cleanfasta : oref = pf + ".f.fasta" oreads = pr + ".f.fasta" format ( [ ref , oref ] ) format ( [ reads , oreads ] ) ref , reads = oref , oreads # Check if the FASTA has qual ref , refq = fake_quals ( ref ) convert_reads = not px in ( "fq" , "fastq" , "txt" ) if convert_reads : reads , readsq = fake_quals ( reads ) readsfiles = " " . join ( ( reads , readsq ) ) else : readsfiles = reads # Make directory structure dref , dreads = "data/reference" , "data/reads" cwd = os . getcwd ( ) reference = op . join ( cwd , "{0}/{1}" . format ( dref , ref ) ) reads = op . join ( cwd , "{0}/{1}" . format ( dreads , reads ) ) if not op . exists ( reference ) : sh ( "mkdir -p {0}" . format ( dref ) ) sh ( "cp {0} {1}/" . format ( " " . join ( ( ref , refq ) ) , dref ) ) if not op . exists ( reads ) : sh ( "mkdir -p {0}" . format ( dreads ) ) sh ( "cp {0} {1}/" . format ( readsfiles , dreads ) ) outputDir = cwd p = Protocol ( outputDir , reference , reads , highqual = opts . highqual ) p . write_xml ( ) # Build the pipeline runsh = [ setup ] for action in "setup|mapping|support|extraction" . split ( "|" ) : runsh . append ( "Jelly.py {0} Protocol.xml" . format ( action ) ) runsh . append ( 'Jelly.py assembly Protocol.xml -x "--nproc={0}"' . format ( cpus ) ) runsh . append ( "Jelly.py output Protocol.xml" ) runfile = "run.sh" contents = "\n" . join ( runsh ) write_file ( runfile , contents )
def get_ports_strings_from_list ( data ) : """Transform a list of port numbers to the list of strings with port ranges Example : [ 10 , 12 , 13 , 14 , 15 ] - > [ ' 10 ' , ' 12-15 ' ]"""
if len ( data ) == 0 : return [ ] # Transform diff _ ports list to the ranges list first = 0 result = [ ] for it in range ( 1 , len ( data ) ) : if data [ first ] == data [ it ] - ( it - first ) : continue result . append ( PortsRangeHelper . PortsRange ( start = data [ first ] , end = data [ it - 1 ] ) ) first = it # Update tuples with strings , representing ranges result . append ( PortsRangeHelper . PortsRange ( start = data [ first ] , end = data [ - 1 ] ) ) result = [ str ( x . start ) if x . start == x . end else "%i-%i" % ( x . start , x . end ) for x in result ] return result
def asString ( self ) : """Returns this query with an AsString function added to it . : return < Query >"""
q = self . copy ( ) q . addFunction ( Query . Function . AsString ) return q
def utils_doc ( * args ) : '''. . versionadded : : Neon Return the docstrings for all utils modules . Optionally , specify a module or a function to narrow the selection . The strings are aggregated into a single document on the master for easy reading . Multiple modules / functions can be specified . CLI Example : . . code - block : : bash salt ' * ' sys . utils _ doc salt ' * ' sys . utils _ doc data stringutils salt ' * ' sys . utils _ doc stringutils . to _ unicode salt ' * ' sys . utils _ doc data . encode data . decode'''
docs = { } if not args : for fun in __utils__ : docs [ fun ] = __utils__ [ fun ] . __doc__ return _strip_rst ( docs ) for module in args : _use_fnmatch = False if '*' in module : target_mod = module _use_fnmatch = True elif module : # allow both " sys " and " sys . " to match sys , without also matching # sysctl target_mod = module + '.' if not module . endswith ( '.' ) else module else : target_mod = '' if _use_fnmatch : for fun in fnmatch . filter ( __utils__ , target_mod ) : docs [ fun ] = __utils__ [ fun ] . __doc__ else : for fun in __utils__ : if fun == module or fun . startswith ( target_mod ) : docs [ fun ] = __utils__ [ fun ] . __doc__ return _strip_rst ( docs )
def load_config ( filename , config_dir = None , copy_default_config = True ) : """Loads the specified config file . Parameters filename : : obj : ` str ` Config file name , e . g . ' config _ grid . cfg ' . config _ dir : : obj : ` str ` , optional Path to config file . If None uses default edisgo config directory specified in config file ' config _ system . cfg ' in section ' user _ dirs ' by subsections ' root _ dir ' and ' config _ dir ' . Default : None . copy _ default _ config : Boolean If True copies a default config file into ` config _ dir ` if the specified config file does not exist . Default : True ."""
if not config_dir : config_file = os . path . join ( get_default_config_path ( ) , filename ) else : config_file = os . path . join ( config_dir , filename ) # config file does not exist - > copy default if not os . path . isfile ( config_file ) : if copy_default_config : logger . info ( 'Config file {} not found, I will create a ' 'default version' . format ( config_file ) ) make_directory ( config_dir ) shutil . copy ( os . path . join ( package_path , 'config' , filename . replace ( '.cfg' , '_default.cfg' ) ) , config_file ) else : message = 'Config file {} not found.' . format ( config_file ) logger . error ( message ) raise FileNotFoundError ( message ) if len ( cfg . read ( config_file ) ) == 0 : message = 'Config file {} not found or empty.' . format ( config_file ) logger . error ( message ) raise FileNotFoundError ( message ) global _loaded _loaded = True
def set_date_range ( self , start = None , end = None ) : """Update date range of stats , charts , etc . If None then the original date range is used . So to reset to the original range , just call with no args . Args : * start ( date ) : start date * end ( end ) : end date"""
start = self . _start if start is None else pd . to_datetime ( start ) end = self . _end if end is None else pd . to_datetime ( end ) self . _update ( self . _prices . loc [ start : end ] )
def Hash ( self ) : """Get the hash of the transaction . Returns : UInt256:"""
if not self . __hash : ba = bytearray ( binascii . unhexlify ( self . GetHashData ( ) ) ) hash = Crypto . Hash256 ( ba ) self . __hash = UInt256 ( data = hash ) return self . __hash
def reset ( self ) : """Reset the instance - reset rows and header"""
self . _hline_string = None self . _row_size = None self . _header = [ ] self . _rows = [ ]