signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def access_entries ( self ) : """List [ google . cloud . bigquery . dataset . AccessEntry ] : Dataset ' s access entries . ` ` role ` ` augments the entity type and must be present * * unless * * the entity type is ` ` view ` ` . Raises : TypeError : If ' value ' is not a sequence ValueError : If any item in the sequence is not an : class : ` ~ google . cloud . bigquery . dataset . AccessEntry ` ."""
entries = self . _properties . get ( "access" , [ ] ) return [ AccessEntry . from_api_repr ( entry ) for entry in entries ]
def set_corner_widgets ( self , corner_widgets ) : """Set tabs corner widgets corner _ widgets : dictionary of ( corner , widgets ) corner : Qt . TopLeftCorner or Qt . TopRightCorner widgets : list of widgets ( may contains integers to add spacings )"""
assert isinstance ( corner_widgets , dict ) assert all ( key in ( Qt . TopLeftCorner , Qt . TopRightCorner ) for key in corner_widgets ) self . corner_widgets . update ( corner_widgets ) for corner , widgets in list ( self . corner_widgets . items ( ) ) : cwidget = QWidget ( ) cwidget . hide ( ) prev_widget = self . cornerWidget ( corner ) if prev_widget : prev_widget . close ( ) self . setCornerWidget ( cwidget , corner ) clayout = QHBoxLayout ( ) clayout . setContentsMargins ( 0 , 0 , 0 , 0 ) for widget in widgets : if isinstance ( widget , int ) : clayout . addSpacing ( widget ) else : clayout . addWidget ( widget ) cwidget . setLayout ( clayout ) cwidget . show ( )
def pool_by_environmentvip ( self , environment_vip_id ) : """Method to return list object pool by environment vip id Param environment _ vip _ id : environment vip id Return list object pool"""
uri = 'api/v3/pool/environment-vip/%s/' % environment_vip_id return super ( ApiPool , self ) . get ( uri )
def prepare ( self ) : """Prepare the handler , ensuring RabbitMQ is connected or start a new connection attempt ."""
super ( RabbitMQRequestHandler , self ) . prepare ( ) if self . _rabbitmq_is_closed : self . _connect_to_rabbitmq ( )
def _write_fragments ( self , fragments ) : """: param fragments : A generator of messages"""
answer = tornado . gen . Future ( ) if not fragments : answer . set_result ( None ) return answer io_loop = IOLoop . current ( ) def _write_fragment ( future ) : if future and future . exception ( ) : return answer . set_exc_info ( future . exc_info ( ) ) try : fragment = fragments . next ( ) except StopIteration : return answer . set_result ( None ) io_loop . add_future ( self . writer . put ( fragment ) , _write_fragment ) _write_fragment ( None ) return answer
def unpack_flags ( value , flags ) : """Multiple flags might be packed in the same field ."""
try : return [ flags [ value ] ] except KeyError : return [ flags [ k ] for k in sorted ( flags . keys ( ) ) if k & value > 0 ]
def normalize ( alias ) : """Normalizes an alias by removing adverbs defined in IGNORED _ WORDS"""
# Convert from CamelCase to snake _ case alias = re . sub ( r'([a-z])([A-Z])' , r'\1_\2' , alias ) # Ignore words words = alias . lower ( ) . split ( '_' ) words = filter ( lambda w : w not in IGNORED_WORDS , words ) return '_' . join ( words )
def Inorm_bol_bb ( Teff = 5772. , logg = 4.43 , abun = 0.0 , atm = 'blackbody' , photon_weighted = False ) : """@ Teff : value or array of effective temperatures @ logg : surface gravity ; not used , for class compatibility only @ abun : abundances ; not used , for class compatibility only @ atm : atmosphere model , must be blackbody , otherwise exception is raised @ photon _ weighted : intensity weighting scheme ; must be False , otherwise exception is raised Computes normal bolometric intensity using the Stefan - Boltzmann law , Inorm _ bol _ bb = 1 / \ pi \ sigma T ^ 4 . If photon - weighted intensity is requested , Inorm _ bol _ bb is multiplied by a conversion factor that comes from integrating lambda / hc P ( lambda ) over all lambda . Input parameters mimick the Passband class Inorm method for calling convenience ."""
if atm != 'blackbody' : raise ValueError ( 'atmosphere must be set to blackbody for Inorm_bol_bb.' ) if photon_weighted : factor = 2.6814126821264836e22 / Teff else : factor = 1.0 # convert scalars to vectors if necessary : if not hasattr ( Teff , '__iter__' ) : Teff = np . array ( ( Teff , ) ) return factor * sigma_sb . value * Teff ** 4 / np . pi
def from_response ( cls , response , attrs ) : """Create an index from returned Dynamo data"""
proj = response [ 'Projection' ] hash_key = attrs . get ( response [ 'KeySchema' ] [ 0 ] [ 'AttributeName' ] ) range_key = None if len ( response [ 'KeySchema' ] ) > 1 : range_key = attrs [ response [ 'KeySchema' ] [ 1 ] [ 'AttributeName' ] ] throughput = Throughput . from_response ( response [ 'ProvisionedThroughput' ] ) index = cls ( proj [ 'ProjectionType' ] , response [ 'IndexName' ] , hash_key , range_key , proj . get ( 'NonKeyAttributes' ) , throughput ) index . response = response return index
def http_session ( self ) : """HTTP Session property : return : vk _ requests . utils . VerboseHTTPSession instance"""
if self . _http_session is None : session = VerboseHTTPSession ( ) session . headers . update ( self . DEFAULT_HTTP_HEADERS ) self . _http_session = session return self . _http_session
def del_repo ( repo , basedir = None , ** kwargs ) : # pylint : disable = W0613 '''Delete a repo from < basedir > ( default basedir : all dirs in ` reposdir ` yum option ) . If the . repo file in which the repo exists does not contain any other repo configuration , the file itself will be deleted . CLI Examples : . . code - block : : bash salt ' * ' pkg . del _ repo myrepo salt ' * ' pkg . del _ repo myrepo basedir = / path / to / dir salt ' * ' pkg . del _ repo myrepo basedir = / path / to / dir , / path / to / another / dir'''
# this is so we know which dirs are searched for our error messages below basedirs = _normalize_basedir ( basedir ) repos = list_repos ( basedirs ) if repo not in repos : return 'Error: the {0} repo does not exist in {1}' . format ( repo , basedirs ) # Find out what file the repo lives in repofile = '' for arepo in repos : if arepo == repo : repofile = repos [ arepo ] [ 'file' ] # See if the repo is the only one in the file onlyrepo = True for arepo in six . iterkeys ( repos ) : if arepo == repo : continue if repos [ arepo ] [ 'file' ] == repofile : onlyrepo = False # If this is the only repo in the file , delete the file itself if onlyrepo : os . remove ( repofile ) return 'File {0} containing repo {1} has been removed' . format ( repofile , repo ) # There must be other repos in this file , write the file with them header , filerepos = _parse_repo_file ( repofile ) content = header for stanza in six . iterkeys ( filerepos ) : if stanza == repo : continue comments = '' if 'comments' in six . iterkeys ( filerepos [ stanza ] ) : comments = salt . utils . pkg . rpm . combine_comments ( filerepos [ stanza ] [ 'comments' ] ) del filerepos [ stanza ] [ 'comments' ] content += '\n[{0}]' . format ( stanza ) for line in filerepos [ stanza ] : content += '\n{0}={1}' . format ( line , filerepos [ stanza ] [ line ] ) content += '\n{0}\n' . format ( comments ) with salt . utils . files . fopen ( repofile , 'w' ) as fileout : fileout . write ( salt . utils . stringutils . to_str ( content ) ) return 'Repo {0} has been removed from {1}' . format ( repo , repofile )
def dispatch ( self , request , start_response ) : """Handles dispatch to apiserver handlers . This typically ends up calling start _ response and returning the entire body of the response . Args : request : An ApiRequest , the request from the user . start _ response : A function with semantics defined in PEP - 333. Returns : A string , the body of the response ."""
# Check if this matches any of our special handlers . dispatched_response = self . dispatch_non_api_requests ( request , start_response ) if dispatched_response is not None : return dispatched_response # Call the service . try : return self . call_backend ( request , start_response ) except errors . RequestError as error : return self . _handle_request_error ( request , error , start_response )
def updateLayoutParameters ( self , algorithmName , body , verbose = None ) : """Updates the Layout parameters for the Layout algorithm specified by the ` algorithmName ` parameter . : param algorithmName : Name of the layout algorithm : param body : A list of Layout Parameters with Values . : param verbose : print more : returns : default : successful operation"""
response = api ( url = self . ___url + 'apply/layouts/' + str ( algorithmName ) + '/parameters' , method = "PUT" , body = body , verbose = verbose ) return response
def warning ( f , * args , ** kwargs ) : """Automatically log progress on function entry and exit . Default logging value : warning . * Logging with values contained in the parameters of the decorated function * Message ( args [ 0 ] ) may be a string to be formatted with parameters passed to the decorated function . Each ' { varname } ' will be replaced by the value of the parameter of the same name . * Keyword parameters * - log : : integer - Specifies a custom level of logging to pass to the active logger . - Default : WARNING * Exceptions : * - IndexError and ValueError - will be returned if * args contains a string that does not correspond to a parameter name of the decorated function , or if there are more ' { } ' s than there are * args ."""
kwargs . update ( { 'log' : logging . WARNING } ) return _stump ( f , * args , ** kwargs )
def get_my_feed ( self , limit = 150 , offset = 20 , sort = "updated" , nid = None ) : """Get my feed : type limit : int : param limit : Number of posts from feed to get , starting from ` ` offset ` ` : type offset : int : param offset : Offset starting from bottom of feed : type sort : str : param sort : How to sort feed that will be retrieved ; only current known value is " updated " : type nid : str : param nid : This is the ID of the network to get the feed from . This is optional and only to override the existing ` network _ id ` entered when created the class"""
r = self . request ( method = "network.get_my_feed" , nid = nid , data = dict ( limit = limit , offset = offset , sort = sort ) ) return self . _handle_error ( r , "Could not retrieve your feed." )
def match ( self , string ) : """Match a string against the template . If the string matches the template , return a dict mapping template parameter names to converted values , otherwise return ` ` None ` ` . > > > t = Template ( ' Hello my name is { name } ! ' ) > > > t . match ( ' Hello my name is David ! ' ) { ' name ' : ' David ' } > > > t . match ( ' This string does not match . ' )"""
m = self . regex . match ( string ) if m : c = self . type_converters return dict ( ( k , c [ k ] ( v ) if k in c else v ) for k , v in m . groupdict ( ) . iteritems ( ) ) return None
def ed25519_private_key_from_string ( string ) : """Create an ed25519 private key from ` ` string ` ` , which is a seed . Args : string ( str ) : the string to use as a seed . Returns : Ed25519PrivateKey : the private key"""
try : return Ed25519PrivateKey . from_private_bytes ( base64 . b64decode ( string ) ) except ( UnsupportedAlgorithm , Base64Error ) as exc : raise ScriptWorkerEd25519Error ( "Can't create Ed25519PrivateKey: {}!" . format ( str ( exc ) ) )
def _set_account_info ( self ) : """Connect to the AWS IAM API via boto3 and run the GetUser operation on the current user . Use this to set ` ` self . aws _ account _ id ` ` and ` ` self . aws _ region ` ` ."""
if 'AWS_DEFAULT_REGION' in os . environ : logger . debug ( 'Connecting to IAM with region_name=%s' , os . environ [ 'AWS_DEFAULT_REGION' ] ) kwargs = { 'region_name' : os . environ [ 'AWS_DEFAULT_REGION' ] } elif 'AWS_REGION' in os . environ : logger . debug ( 'Connecting to IAM with region_name=%s' , os . environ [ 'AWS_REGION' ] ) kwargs = { 'region_name' : os . environ [ 'AWS_REGION' ] } else : logger . debug ( 'Connecting to IAM without specified region' ) kwargs = { } conn = client ( 'iam' , ** kwargs ) self . aws_account_id = conn . get_user ( ) [ 'User' ] [ 'Arn' ] . split ( ':' ) [ 4 ] # region conn = client ( 'lambda' , ** kwargs ) self . aws_region = conn . _client_config . region_name logger . info ( 'Found AWS account ID as %s; region: %s' , self . aws_account_id , self . aws_region )
def add ( self , item ) : """Add a row to the table . List can be passed and are automatically converted to Rows . : param item : Item an element to add to the rows can be list or Row object : type item : row , list"""
if isinstance ( item , list ) : self . rows . append ( Row ( item ) ) elif isinstance ( item , Row ) : self . rows . append ( item ) else : raise InvalidMessageItemError ( item , item . __class__ )
def reset ( self ) : """Clean any processing data , and prepare object for reuse"""
self . current_table = None self . tables = [ ] self . data = [ { } ] self . additional_data = { } self . lines = [ ] self . set_state ( 'document' ) self . current_file = None self . set_of_energies = set ( )
def hexbin ( self , x , y , size , orientation = "pointytop" , palette = "Viridis256" , line_color = None , fill_color = None , aspect_scale = 1 , ** kwargs ) : '''Perform a simple equal - weight hexagonal binning . A : class : ` ~ bokeh . models . _ glyphs . HexTile ` glyph will be added to display the binning . The : class : ` ~ bokeh . models . sources . ColumnDataSource ` for the glyph will have columns ` ` q ` ` , ` ` r ` ` , and ` ` count ` ` , where ` ` q ` ` and ` ` r ` ` are ` axial coordinates ` _ for a tile , and ` ` count ` ` is the associated bin count . It is often useful to set ` ` match _ aspect = True ` ` on the associated plot , so that hexagonal tiles are all regular ( i . e . not " stretched " ) in screen space . For more sophisticated use - cases , e . g . weighted binning or individually scaling hex tiles , use : func : ` hex _ tile ` directly , or consider a higher level library such as HoloViews . Args : x ( array [ float ] ) : A NumPy array of x - coordinates to bin into hexagonal tiles . y ( array [ float ] ) : A NumPy array of y - coordinates to bin into hexagonal tiles size ( float ) : The size of the hexagonal tiling to use . The size is defined as distance from the center of a hexagon to a corner . In case the aspect scaling is not 1-1 , then specifically ` size ` is the distance from the center to the " top " corner with the ` " pointytop " ` orientation , and the distance from the center to a " side " corner with the " flattop " orientation . orientation ( " pointytop " or " flattop " , optional ) : Whether the hexagonal tiles should be oriented with a pointed corner on top , or a flat side on top . ( default : " pointytop " ) palette ( str or seq [ color ] , optional ) : A palette ( or palette name ) to use to colormap the bins according to count . ( default : ' Viridis256 ' ) If ` ` fill _ color ` ` is supplied , it overrides this value . line _ color ( color , optional ) : The outline color for hex tiles , or None ( default : None ) fill _ color ( color , optional ) : An optional fill color for hex tiles , or None . If None , then the ` ` palette ` ` will be used to color map the tiles by count . ( default : None ) aspect _ scale ( float ) : Match a plot ' s aspect ratio scaling . When working with a plot with ` ` aspect _ scale ! = 1 ` ` , this parameter can be set to match the plot , in order to draw regular hexagons ( instead of " stretched " ones ) . This is roughly equivalent to binning in " screen space " , and it may be better to use axis - aligned rectangular bins when plot aspect scales are not one . Any additional keyword arguments are passed to : func : ` hex _ tile ` . Returns ( Glyphrender , DataFrame ) A tuple with the ` ` HexTile ` ` renderer generated to display the binning , and a Pandas ` ` DataFrame ` ` with columns ` ` q ` ` , ` ` r ` ` , and ` ` count ` ` , where ` ` q ` ` and ` ` r ` ` are ` axial coordinates ` _ for a tile , and ` ` count ` ` is the associated bin count . Example : . . bokeh - plot : : : source - position : above import numpy as np from bokeh . models import HoverTool from bokeh . plotting import figure , show x = 2 + 2 * np . random . standard _ normal ( 500) y = 2 + 2 * np . random . standard _ normal ( 500) p = figure ( match _ aspect = True , tools = " wheel _ zoom , reset " ) p . background _ fill _ color = ' # 440154' p . grid . visible = False p . hexbin ( x , y , size = 0.5 , hover _ color = " pink " , hover _ alpha = 0.8) hover = HoverTool ( tooltips = [ ( " count " , " @ c " ) , ( " ( q , r ) " , " ( @ q , @ r ) " ) ] ) p . add _ tools ( hover ) show ( p ) . . _ axial coordinates : https : / / www . redblobgames . com / grids / hexagons / # coordinates - axial'''
from . . util . hex import hexbin bins = hexbin ( x , y , size , orientation , aspect_scale = aspect_scale ) if fill_color is None : fill_color = linear_cmap ( 'c' , palette , 0 , max ( bins . counts ) ) source = ColumnDataSource ( data = dict ( q = bins . q , r = bins . r , c = bins . counts ) ) r = self . hex_tile ( q = "q" , r = "r" , size = size , orientation = orientation , aspect_scale = aspect_scale , source = source , line_color = line_color , fill_color = fill_color , ** kwargs ) return ( r , bins )
def find ( self , addr , what , max_search = None , max_symbolic_bytes = None , default = None , step = 1 , disable_actions = False , inspect = True , chunk_size = None ) : """Returns the address of bytes equal to ' what ' , starting from ' start ' . Note that , if you don ' t specify a default value , this search could cause the state to go unsat if no possible matching byte exists . : param addr : The start address . : param what : What to search for ; : param max _ search : Search at most this many bytes . : param max _ symbolic _ bytes : Search through at most this many symbolic bytes . : param default : The default value , if what you ' re looking for wasn ' t found . : param step : The stride that the search should use while scanning memory : param disable _ actions : Whether to inhibit the creation of SimActions for memory access : param inspect : Whether to trigger SimInspect breakpoints : returns : An expression representing the address of the matching byte ."""
addr = _raw_ast ( addr ) what = _raw_ast ( what ) default = _raw_ast ( default ) if isinstance ( what , bytes ) : # Convert it to a BVV what = claripy . BVV ( what , len ( what ) * self . state . arch . byte_width ) r , c , m = self . _find ( addr , what , max_search = max_search , max_symbolic_bytes = max_symbolic_bytes , default = default , step = step , disable_actions = disable_actions , inspect = inspect , chunk_size = chunk_size ) if o . AST_DEPS in self . state . options and self . category == 'reg' : r = SimActionObject ( r , reg_deps = frozenset ( ( addr , ) ) ) return r , c , m
def GetCampaignFeeds ( client , feed , placeholder_type ) : """Get a list of Feed Item Ids used by a campaign via a given Campaign Feed . Args : client : an AdWordsClient instance . feed : a Campaign Feed . placeholder _ type : the Placeholder Type . Returns : A list of Feed Item Ids ."""
campaign_feed_service = client . GetService ( 'CampaignFeedService' , 'v201809' ) campaign_feeds = [ ] more_pages = True selector = { 'fields' : [ 'CampaignId' , 'MatchingFunction' , 'PlaceholderTypes' ] , 'predicates' : [ { 'field' : 'Status' , 'operator' : 'EQUALS' , 'values' : [ 'ENABLED' ] } , { 'field' : 'FeedId' , 'operator' : 'EQUALS' , 'values' : [ feed [ 'id' ] ] } , { 'field' : 'PlaceholderTypes' , 'operator' : 'CONTAINS_ANY' , 'values' : [ placeholder_type ] } ] , 'paging' : { 'startIndex' : 0 , 'numberResults' : PAGE_SIZE } } while more_pages : page = campaign_feed_service . get ( selector ) if 'entries' in page : campaign_feeds . extend ( page [ 'entries' ] ) selector [ 'paging' ] [ 'startIndex' ] += PAGE_SIZE more_pages = selector [ 'paging' ] [ 'startIndex' ] < int ( page [ 'totalNumEntries' ] ) return campaign_feeds
def target_in_range ( self , target : "Unit" , bonus_distance : Union [ int , float ] = 0 ) -> bool : """Includes the target ' s radius when calculating distance to target"""
if self . can_attack_ground and not target . is_flying : unit_attack_range = self . ground_range elif self . can_attack_air and ( target . is_flying or target . type_id == UnitTypeId . COLOSSUS ) : unit_attack_range = self . air_range else : unit_attack_range = - 1 return ( self . position . _distance_squared ( target . position ) <= ( self . radius + target . radius + unit_attack_range - bonus_distance ) ** 2 )
def GetKey ( self , public_key_hash ) : """Get the KeyPair belonging to the public key hash . Args : public _ key _ hash ( UInt160 ) : a public key hash to get the KeyPair for . Returns : KeyPair : If successful , the KeyPair belonging to the public key hash , otherwise None"""
if public_key_hash . ToBytes ( ) in self . _keys . keys ( ) : return self . _keys [ public_key_hash . ToBytes ( ) ] return None
def rawfile_within_timeframe ( rawfile , timeframe ) : """Checks whether the given raw filename timestamp falls within [ start , end ] timeframe ."""
matches = re . search ( r'-(\d{8})-' , rawfile ) if matches : ftime = datetime . strptime ( matches . group ( 1 ) , "%Y%m%d" ) ftime = pytz . utc . localize ( ftime ) return ftime . date ( ) >= timeframe [ 0 ] . date ( ) and ftime . date ( ) <= timeframe [ 1 ] . date ( )
def generate ( env ) : "Add RPCGEN Builders and construction variables for an Environment ."
client = Builder ( action = rpcgen_client , suffix = '_clnt.c' , src_suffix = '.x' ) header = Builder ( action = rpcgen_header , suffix = '.h' , src_suffix = '.x' ) service = Builder ( action = rpcgen_service , suffix = '_svc.c' , src_suffix = '.x' ) xdr = Builder ( action = rpcgen_xdr , suffix = '_xdr.c' , src_suffix = '.x' ) env . Append ( BUILDERS = { 'RPCGenClient' : client , 'RPCGenHeader' : header , 'RPCGenService' : service , 'RPCGenXDR' : xdr } ) env [ 'RPCGEN' ] = 'rpcgen' env [ 'RPCGENFLAGS' ] = SCons . Util . CLVar ( '' ) env [ 'RPCGENCLIENTFLAGS' ] = SCons . Util . CLVar ( '' ) env [ 'RPCGENHEADERFLAGS' ] = SCons . Util . CLVar ( '' ) env [ 'RPCGENSERVICEFLAGS' ] = SCons . Util . CLVar ( '' ) env [ 'RPCGENXDRFLAGS' ] = SCons . Util . CLVar ( '' )
def log_difference ( lx , ly ) : """Returns log ( exp ( lx ) - exp ( ly ) ) without leaving log space ."""
# Negative log of double - precision infinity li = - 709.78271289338397 diff = ly - lx # Make sure log - difference can succeed if np . any ( diff >= 0 ) : raise ValueError ( 'Cannot compute log(x-y), because y>=x for some elements.' ) # Otherwise evaluate log - difference return lx + np . log ( 1. - np . exp ( diff ) )
def use_gl ( target = 'gl2' ) : """Let Vispy use the target OpenGL ES 2.0 implementation Also see ` ` vispy . use ( ) ` ` . Parameters target : str The target GL backend to use . Available backends : * gl2 - Use ES 2.0 subset of desktop ( i . e . normal ) OpenGL * gl + - Use the desktop ES 2.0 subset plus all non - deprecated GL functions on your system ( requires PyOpenGL ) * es2 - Use the ES2 library ( Angle / DirectX on Windows ) * pyopengl2 - Use ES 2.0 subset of pyopengl ( for fallback and testing ) * dummy - Prevent usage of gloo . gl ( for when rendering occurs elsewhere ) You can use vispy ' s config option " gl _ debug " to check for errors on each API call . Or , one can specify it as the target , e . g . " gl2 debug " . ( Debug does not apply to ' gl + ' , since PyOpenGL has its own debug mechanism )"""
target = target or 'gl2' target = target . replace ( '+' , 'plus' ) # Get options target , _ , options = target . partition ( ' ' ) debug = config [ 'gl_debug' ] or 'debug' in options # Select modules to import names from try : mod = __import__ ( target , globals ( ) , level = 1 ) except ImportError as err : msg = 'Could not import gl target "%s":\n%s' % ( target , str ( err ) ) raise RuntimeError ( msg ) # Apply global current_backend current_backend = mod _clear_namespace ( ) if 'plus' in target : # Copy PyOpenGL funcs , extra funcs , constants , no debug _copy_gl_functions ( mod . _pyopengl2 , globals ( ) ) _copy_gl_functions ( mod , globals ( ) , True ) elif debug : _copy_gl_functions ( _debug_proxy , globals ( ) ) else : _copy_gl_functions ( mod , globals ( ) )
def _sigma_pi_midE ( self , Tp ) : """Geant 4.10.0 model for 2 GeV < Tp < 5 GeV"""
m_p = self . _m_p Qp = ( Tp - self . _Tth ) / m_p multip = - 6e-3 + 0.237 * Qp - 0.023 * Qp ** 2 return self . _sigma_inel ( Tp ) * multip
def getAttributeValue ( self , namespaceURI , localName ) : '''Keyword arguments : namespaceURI - - namespace of attribute localName - - local name of attribute'''
if self . hasAttribute ( namespaceURI , localName ) : attr = self . node . getAttributeNodeNS ( namespaceURI , localName ) return attr . value return None
def dim ( self , dim_index ) : """Get an SDim instance given a dimension index number . Args : : dim _ index index number of the dimension ( numbering starts at 0) C library equivalent : SDgetdimid"""
id = _C . SDgetdimid ( self . _id , dim_index ) _checkErr ( 'dim' , id , 'invalid SDS identifier or dimension index' ) return SDim ( self , id , dim_index )
def locatedExpr ( expr ) : """Helper to decorate a returned token with its starting and ending locations in the input string . This helper adds the following results names : - locn _ start = location where matched expression begins - locn _ end = location where matched expression ends - value = the actual parsed results Be careful if the input text contains ` ` < TAB > ` ` characters , you may want to call : class : ` ParserElement . parseWithTabs ` Example : : wd = Word ( alphas ) for match in locatedExpr ( wd ) . searchString ( " ljsdf123lksdjjf123lkkjj1222 " ) : print ( match ) prints : : [ [ 0 , ' ljsdf ' , 5 ] ] [ [ 8 , ' lksdjjf ' , 15 ] ] [ [ 18 , ' lkkjj ' , 23 ] ]"""
locator = Empty ( ) . setParseAction ( lambda s , l , t : l ) return Group ( locator ( "locn_start" ) + expr ( "value" ) + locator . copy ( ) . leaveWhitespace ( ) ( "locn_end" ) )
def dafus ( insum , nd , ni ) : """Unpack an array summary into its double precision and integer components . http : / / naif . jpl . nasa . gov / pub / naif / toolkit _ docs / C / cspice / dafus _ c . html : param insum : Array summary . : type insum : Array of floats : param nd : Number of double precision components . : type nd : int : param ni : Number of integer components . : type ni : int : return : Double precision components , Integer components . : rtype : tuple"""
insum = stypes . toDoubleVector ( insum ) dc = stypes . emptyDoubleVector ( nd ) ic = stypes . emptyIntVector ( ni ) nd = ctypes . c_int ( nd ) ni = ctypes . c_int ( ni ) libspice . dafus_c ( insum , nd , ni , dc , ic ) return stypes . cVectorToPython ( dc ) , stypes . cVectorToPython ( ic )
def _parse_match_info ( match , soccer = False ) : """Parse string containing info of a specific match : param match : Match data : type match : string : param soccer : Set to true if match contains soccer data , defaults to False : type soccer : bool , optional : return : Dictionary containing match information : rtype : dict"""
match_info = { } i_open = match . index ( '(' ) i_close = match . index ( ')' ) match_info [ 'league' ] = match [ i_open + 1 : i_close ] . strip ( ) match = match [ i_close + 1 : ] i_vs = match . index ( 'vs' ) i_colon = match . index ( ':' ) match_info [ 'home_team' ] = match [ 0 : i_vs ] . replace ( '#' , ' ' ) . strip ( ) match_info [ 'away_team' ] = match [ i_vs + 2 : i_colon ] . replace ( '#' , ' ' ) . strip ( ) match = match [ i_colon : ] if soccer : i_hyph = match . index ( '-' ) match_info [ 'match_score' ] = match [ 1 : i_hyph + 2 ] . strip ( ) match = match [ i_hyph + 1 : ] i_hyph = match . index ( '-' ) match_info [ 'match_time' ] = match [ i_hyph + 1 : ] . strip ( ) else : match_info [ 'match_score' ] = match [ 1 : ] . strip ( ) return match_info
def moments ( self ) : """The first two time delay weighted statistical moments of the MA coefficients ."""
moment1 = statstools . calc_mean_time ( self . delays , self . coefs ) moment2 = statstools . calc_mean_time_deviation ( self . delays , self . coefs , moment1 ) return numpy . array ( [ moment1 , moment2 ] )
def wait_for_shutdown_signal ( self , please_stop = False , # ASSIGN SIGNAL TO STOP EARLY allow_exit = False , # ALLOW " exit " COMMAND ON CONSOLE TO ALSO STOP THE APP wait_forever = True # IGNORE CHILD THREADS , NEVER EXIT . False = > IF NO CHILD THREADS LEFT , THEN EXIT ) : """FOR USE BY PROCESSES THAT NEVER DIE UNLESS EXTERNAL SHUTDOWN IS REQUESTED CALLING THREAD WILL SLEEP UNTIL keyboard interrupt , OR please _ stop , OR " exit " : param please _ stop : : param allow _ exit : : param wait _ forever : : Assume all needed threads have been launched . When done : return :"""
self_thread = Thread . current ( ) if self_thread != MAIN_THREAD or self_thread != self : Log . error ( "Only the main thread can sleep forever (waiting for KeyboardInterrupt)" ) if isinstance ( please_stop , Signal ) : # MUTUAL SIGNALING MAKES THESE TWO EFFECTIVELY THE SAME SIGNAL self . please_stop . on_go ( please_stop . go ) please_stop . on_go ( self . please_stop . go ) else : please_stop = self . please_stop if not wait_forever : # TRIGGER SIGNAL WHEN ALL CHILDREN THEADS ARE DONE with self_thread . child_lock : pending = copy ( self_thread . children ) children_done = AndSignals ( please_stop , len ( pending ) ) children_done . signal . on_go ( self . please_stop . go ) for p in pending : p . stopped . on_go ( children_done . done ) try : if allow_exit : _wait_for_exit ( please_stop ) else : _wait_for_interrupt ( please_stop ) except KeyboardInterrupt as _ : Log . alert ( "SIGINT Detected! Stopping..." ) except SystemExit as _ : Log . alert ( "SIGTERM Detected! Stopping..." ) finally : self . stop ( )
def version ( * names , ** kwargs ) : '''Returns a string representing the package version or an empty string if not installed . If more than one package name is specified , a dict of name / version pairs is returned . . . note : : This function can accessed using ` ` pkg . info ` ` in addition to ` ` pkg . version ` ` , to more closely match the CLI usage of ` ` pkg ( 8 ) ` ` . jail Get package version information for the specified jail chroot Get package version information for the specified chroot ( ignored if ` ` jail ` ` is specified ) root Get package version information for the specified root ( ignored if ` ` jail ` ` is specified ) with _ origin : False Return a nested dictionary containing both the origin name and version for each specified package . . . versionadded : : 2014.1.0 CLI Example : . . code - block : : bash salt ' * ' pkg . version < package name > salt ' * ' pkg . version < package name > jail = < jail name or id > salt ' * ' pkg . version < package1 > < package2 > < package3 > . . .'''
with_origin = kwargs . pop ( 'with_origin' , False ) ret = __salt__ [ 'pkg_resource.version' ] ( * names , ** kwargs ) if not salt . utils . data . is_true ( with_origin ) : return ret # Put the return value back into a dict since we ' re adding a subdict if len ( names ) == 1 : ret = { names [ 0 ] : ret } origins = __context__ . get ( 'pkg.origin' , { } ) return dict ( [ ( x , { 'origin' : origins . get ( x , '' ) , 'version' : y } ) for x , y in six . iteritems ( ret ) ] )
def secondaries ( self ) : """return list of secondaries members"""
return [ { "_id" : self . host2id ( member ) , "host" : member , "server_id" : self . _servers . host_to_server_id ( member ) } for member in self . get_members_in_state ( 2 ) ]
def _get_pretty_table ( self , indent : int = 0 , align : int = ALIGN_CENTER , border : bool = False ) -> PrettyTable : """Returns the table format of the scheme , i . e . : < table name > | < field1 > | < field2 > . . . | value1 ( field1 ) | value1 ( field2) | value2 ( field1 ) | value2 ( field2) | value3 ( field1 ) | value3 ( field2)"""
rows = self . rows columns = self . columns # Add the column color . if self . _headers_color != Printer . NORMAL and len ( rows ) > 0 and len ( columns ) > 0 : # We need to copy the lists so that we wont insert colors in the original ones . rows [ 0 ] = rows [ 0 ] [ : ] columns = columns [ : ] columns [ 0 ] = self . _headers_color + columns [ 0 ] # Write the table itself in NORMAL color . rows [ 0 ] [ 0 ] = Printer . NORMAL + str ( rows [ 0 ] [ 0 ] ) table = PrettyTable ( columns , border = border , max_width = get_console_width ( ) - indent ) table . align = self . _ALIGN_DICTIONARY [ align ] for row in rows : table . add_row ( row ) # Set the max width according to the columns size dict , or by default size limit when columns were not provided . for column , max_width in self . _column_size_map . items ( ) : table . max_width [ column ] = max_width return table
def _rank_cycle_function ( self , cycle , function , ranks ) : """Dijkstra ' s shortest paths algorithm . See also : - http : / / en . wikipedia . org / wiki / Dijkstra ' s _ algorithm"""
import heapq Q = [ ] Qd = { } p = { } visited = set ( [ function ] ) ranks [ function ] = 0 for call in compat_itervalues ( function . calls ) : if call . callee_id != function . id : callee = self . functions [ call . callee_id ] if callee . cycle is cycle : ranks [ callee ] = 1 item = [ ranks [ callee ] , function , callee ] heapq . heappush ( Q , item ) Qd [ callee ] = item while Q : cost , parent , member = heapq . heappop ( Q ) if member not in visited : p [ member ] = parent visited . add ( member ) for call in compat_itervalues ( member . calls ) : if call . callee_id != member . id : callee = self . functions [ call . callee_id ] if callee . cycle is cycle : member_rank = ranks [ member ] rank = ranks . get ( callee ) if rank is not None : if rank > 1 + member_rank : rank = 1 + member_rank ranks [ callee ] = rank Qd_callee = Qd [ callee ] Qd_callee [ 0 ] = rank Qd_callee [ 1 ] = member heapq . _siftdown ( Q , 0 , Q . index ( Qd_callee ) ) else : rank = 1 + member_rank ranks [ callee ] = rank item = [ rank , member , callee ] heapq . heappush ( Q , item ) Qd [ callee ] = item
def _apply_advanced_config ( config_spec , advanced_config , vm_extra_config = None ) : '''Sets configuration parameters for the vm config _ spec vm . ConfigSpec object advanced _ config config key value pairs vm _ extra _ config Virtual machine vm _ ref . config . extraConfig object'''
log . trace ( 'Configuring advanced configuration ' 'parameters %s' , advanced_config ) if isinstance ( advanced_config , str ) : raise salt . exceptions . ArgumentValueError ( 'The specified \'advanced_configs\' configuration ' 'option cannot be parsed, please check the parameters' ) for key , value in six . iteritems ( advanced_config ) : if vm_extra_config : for option in vm_extra_config : if option . key == key and option . value == str ( value ) : continue else : option = vim . option . OptionValue ( key = key , value = value ) config_spec . extraConfig . append ( option )
def activate ( self , engine ) : """Activates the Component . : param engine : Container to attach the Component to . : type engine : QObject : return : Method success . : rtype : bool"""
LOGGER . debug ( "> Activating '{0}' Component." . format ( self . __class__ . __name__ ) ) self . __engine = engine self . __settings = self . __engine . settings self . __settings_section = self . name self . __default_script_editor_directory = os . path . join ( self . __engine . user_application_data_directory , Constants . io_directory , self . __default_script_editor_directory ) not foundations . common . path_exists ( self . __default_script_editor_directory ) and os . makedirs ( self . __default_script_editor_directory ) self . __default_session_directory = os . path . join ( self . __default_script_editor_directory , self . __default_session_directory ) not foundations . common . path_exists ( self . __default_session_directory ) and os . makedirs ( self . __default_session_directory ) self . __default_script_editor_file = os . path . join ( self . __default_script_editor_directory , self . __default_script_editor_file ) self . __console = code . InteractiveConsole ( self . __engine . locals ) self . activated = True return True
def prop ( key , dct_or_obj ) : """Implementation of prop ( get _ item ) that also supports object attributes : param key : : param dct _ or _ obj : : return :"""
# Note that hasattr is a builtin and getattr is a ramda function , hence the different arg position if isinstance ( dict , dct_or_obj ) : if has ( key , dct_or_obj ) : return dct_or_obj [ key ] else : raise Exception ( "No key %s found for dict %s" % ( key , dct_or_obj ) ) elif isinstance ( list , dct_or_obj ) : if isint ( key ) : return dct_or_obj [ key ] else : raise Exception ( "Key %s not expected for list type: %s" % ( key , dct_or_obj ) ) elif isinstance ( object , dct_or_obj ) : if hasattr ( dct_or_obj , key ) : return getattr ( key , dct_or_obj ) else : raise Exception ( "No key %s found for objects %s" % ( key , dct_or_obj ) ) else : raise Exception ( "%s is neither a dict nor objects" % dct_or_obj )
async def _auth_cram_md5 ( self , username , password ) : """Performs an authentication attemps using the CRAM - MD5 mechanism . Protocol : 1 . Send ' AUTH CRAM - MD5 ' to server ; 2 . If the server replies with a 334 return code , we can go on : 1 ) The challenge ( sent by the server ) is base64 - decoded ; 2 ) The decoded challenge is hashed using HMAC - MD5 and the user password as key ( shared secret ) ; 3 ) The hashed challenge is converted to a string of lowercase hexadecimal digits ; 4 ) The username and a space character are prepended to the hex digits ; 5 ) The concatenation is base64 - encoded and sent to the server . 6 ) If the server replies with a return code of 235 , user is authenticated . Args : username ( str ) : Identifier of the user trying to authenticate . password ( str ) : Password for the user . Raises : ConnectionResetError : If the connection with the server is unexpectedely lost . SMTPAuthenticationError : If the authentication attempt fails . Returns : ( int , str ) : A ( code , message ) 2 - tuple containing the server response ."""
mechanism = "CRAM-MD5" code , message = await self . do_cmd ( "AUTH" , mechanism , success = ( 334 , ) ) decoded_challenge = base64 . b64decode ( message ) challenge_hash = hmac . new ( key = password . encode ( "utf-8" ) , msg = decoded_challenge , digestmod = "md5" ) hex_hash = challenge_hash . hexdigest ( ) response = "{} {}" . format ( username , hex_hash ) encoded_response = SMTP . b64enc ( response ) try : code , message = await self . do_cmd ( encoded_response , success = ( 235 , 503 ) ) except SMTPCommandFailedError as e : raise SMTPAuthenticationError ( e . code , e . message , mechanism ) return code , message
def hashstr_arr ( arr , lbl = 'arr' , pathsafe = False , ** kwargs ) : r"""Args : arr ( ndarray ) : lbl ( str ) : ( default = ' arr ' ) pathsafe ( bool ) : ( default = False ) Returns : str : arr _ hashstr CommandLine : python - m utool . util _ hash - - test - hashstr _ arr python - m utool . util _ hash hashstr _ arr : 2 Example0: > > > # ENABLE _ DOCTEST > > > from utool . util _ hash import * # NOQA > > > import numpy as np > > > arr = np . array ( [ [ 1 , 2 , 3 ] , [ 4 , 5 , 6 ] ] , dtype = np . float64) > > > lbl = ' arr ' > > > kwargs = { } > > > pathsafe = False > > > arr _ hashstr = hashstr _ arr ( arr , lbl , pathsafe , alphabet = ALPHABET _ 27) > > > result = ( ' arr _ hashstr = % s ' % ( str ( arr _ hashstr ) , ) ) > > > print ( result ) arr _ hashstr = arr ( ( 2,3 ) daukyreqnhfejkfs ) Example1: > > > # ENABLE _ DOCTEST > > > from utool . util _ hash import * # NOQA > > > import numpy as np > > > arr = np . array ( [ [ 1 , 2 , 3 ] , [ 4 , 5 , 6 ] ] , dtype = np . float64) > > > kwargs = { } > > > lbl = ' arr ' > > > pathsafe = True > > > arr _ hashstr = hashstr _ arr ( arr , lbl , pathsafe , alphabet = ALPHABET _ 27) > > > result = ( ' arr _ hashstr = % s ' % ( str ( arr _ hashstr ) , ) ) > > > print ( result ) arr _ hashstr = arr - _ 2,3 _ daukyreqnhfejkfs -"""
if isinstance ( arr , list ) : arr = tuple ( arr ) # force arrays into a tuple for hashability # TODO : maybe for into numpy array instead ? tuples might have problems if pathsafe : lbrace1 , rbrace1 , lbrace2 , rbrace2 = '_' , '_' , '-' , '-' else : lbrace1 , rbrace1 , lbrace2 , rbrace2 = '(' , ')' , '(' , ')' if isinstance ( arr , tuple ) : arr_shape = lbrace1 + str ( len ( arr ) ) + rbrace1 else : # Arr should be an ndarray here . append info about the ndarray arr_shape = lbrace1 + ',' . join ( list ( map ( str , arr . shape ) ) ) + rbrace1 arr_hashstr_ = hashstr ( arr , ** kwargs ) arr_hashstr = '' . join ( [ lbl , lbrace2 , arr_shape , arr_hashstr_ , rbrace2 ] ) return arr_hashstr
def getRealContributions ( self ) : """Get the real number of contributions ( private + public ) ."""
datefrom = datetime . now ( ) - relativedelta ( days = 366 ) dateto = datefrom + relativedelta ( months = 1 ) - relativedelta ( days = 1 ) private = 0 while datefrom < datetime . now ( ) : fromstr = datefrom . strftime ( "%Y-%m-%d" ) tostr = dateto . strftime ( "%Y-%m-%d" ) url = self . server + self . name url += "?tab=overview&from=" + fromstr + "&to=" + tostr data = GitHubUser . __getDataFromURL ( url ) web = BeautifulSoup ( data , "lxml" ) aux = "f4 lh-condensed m-0 text-gray" pcontribs = web . find_all ( "span" , { "class" : aux } ) aux = web . find_all ( 'span' , { 'class' : 'text-gray m-0' } ) noContribs = False for compr in aux : if "had no activity during this period." in compr . text : noContribs = True try : if not noContribs : for contrib in pcontribs : contribution = None contribution = contrib . text contribution = contribution . lstrip ( ) . replace ( "," , "" ) contribution = contribution . replace ( "\n" , " " ) contribution = contribution . partition ( " " ) [ 0 ] private += int ( contribution ) except IndexError as error : print ( "There was an error with the user " + self . name ) print ( error ) except AttributeError as error : print ( "There was an error with the user " + self . name ) print ( error ) datefrom += relativedelta ( months = 1 ) dateto += relativedelta ( months = 1 ) self . private = private self . public = self . contributions - private if self . public < 0 : # Is not exact self . public = 0
def _fail ( self , request_id , failure , duration ) : """Publish a CommandFailedEvent ."""
self . listeners . publish_command_failure ( duration , failure , self . name , request_id , self . sock_info . address , self . op_id )
def _recompress_archive ( archive , verbosity = 0 , interactive = True ) : """Try to recompress an archive to smaller size ."""
format , compression = get_archive_format ( archive ) if compression : # only recompress the compression itself ( eg . for . tar . xz ) format = compression tmpdir = util . tmpdir ( ) tmpdir2 = util . tmpdir ( ) base , ext = os . path . splitext ( os . path . basename ( archive ) ) archive2 = util . get_single_outfile ( tmpdir2 , base , extension = ext ) try : # extract kwargs = dict ( verbosity = verbosity , format = format , outdir = tmpdir ) path = _extract_archive ( archive , ** kwargs ) # compress to new file olddir = os . getcwd ( ) os . chdir ( path ) try : kwargs = dict ( verbosity = verbosity , interactive = interactive , format = format ) files = tuple ( os . listdir ( path ) ) _create_archive ( archive2 , files , ** kwargs ) finally : os . chdir ( olddir ) # check file sizes and replace if new file is smaller filesize = util . get_filesize ( archive ) filesize2 = util . get_filesize ( archive2 ) if filesize2 < filesize : # replace file os . remove ( archive ) shutil . move ( archive2 , archive ) diffsize = filesize - filesize2 return "... recompressed file is now %s smaller." % util . strsize ( diffsize ) finally : shutil . rmtree ( tmpdir , onerror = rmtree_log_error ) shutil . rmtree ( tmpdir2 , onerror = rmtree_log_error ) return "... recompressed file is not smaller, leaving archive as is."
def load ( obj , cls , default_factory ) : """Create or load an object if necessary . Parameters obj : ` object ` or ` dict ` or ` None ` cls : ` type ` default _ factory : ` function ` Returns ` object `"""
if obj is None : return default_factory ( ) if isinstance ( obj , dict ) : return cls . load ( obj ) return obj
def _new_render ( response ) : """Decorator for the TemplateResponse . render ( ) function"""
orig_render = response . __class__ . render # No arguments , is used as bound method . def _inner_render ( ) : try : return orig_render ( response ) except HttpRedirectRequest as e : return HttpResponseRedirect ( e . url , status = e . status ) return _inner_render
def _fetch_langs ( ) : """Fetch ( scrape ) languages from Google Translate . Google Translate loads a JavaScript Array of ' languages codes ' that can be spoken . We intersect this list with all the languages Google Translate provides to get the ones that support text - to - speech . Returns : dict : A dictionnary of languages from Google Translate"""
# Load HTML page = requests . get ( URL_BASE ) soup = BeautifulSoup ( page . content , 'html.parser' ) # JavaScript URL # The < script src = ' ' > path can change , but not the file . # Ex : / zyx / abc / 20180211 / desktop _ module _ main . js js_path = soup . find ( src = re . compile ( JS_FILE ) ) [ 'src' ] js_url = "{}/{}" . format ( URL_BASE , js_path ) # Load JavaScript js_contents = requests . get ( js_url ) . text # Approximately extract TTS - enabled language codes # RegEx pattern search because minified variables can change . # Extra garbage will be dealt with later as we keep languages only . # In : " [ . . . ] Fv = { af : 1 , ar : 1 , [ . . . ] , zh : 1 , " zh - cn " : 1 , " zh - tw " : 1 } [ . . . ] " # Out : [ ' is ' , ' 12 ' , [ . . . ] , ' af ' , ' ar ' , [ . . . ] , ' zh ' , ' zh - cn ' , ' zh - tw ' ] pattern = r'[{,\"](\w{2}|\w{2}-\w{2,3})(?=:1|\":1)' tts_langs = re . findall ( pattern , js_contents ) # Build lang . dict . from main page ( JavaScript object populating lang . menu ) # Filtering with the TTS - enabled languages # In : " { code : ' auto ' , name : ' Detect language ' } , { code : ' af ' , name : ' Afrikaans ' } , [ . . . ] " # re . findall : [ ( ' auto ' , ' Detect language ' ) , ( ' af ' , ' Afrikaans ' ) , [ . . . ] ] # Out : { ' af ' : ' Afrikaans ' , [ . . . ] } trans_pattern = r"{code:'(?P<lang>.+?[^'])',name:'(?P<name>.+?[^'])'}" trans_langs = re . findall ( trans_pattern , page . text ) return { lang : name for lang , name in trans_langs if lang in tts_langs }
def find_connectable_ip ( host , port = None ) : """Resolve a hostname to an IP , preferring IPv4 addresses . We prefer IPv4 so that we don ' t change behavior from previous IPv4 - only implementations , and because some drivers ( e . g . , FirefoxDriver ) do not support IPv6 connections . If the optional port number is provided , only IPs that listen on the given port are considered . : Args : - host - A hostname . - port - Optional port number . : Returns : A single IP address , as a string . If any IPv4 address is found , one is returned . Otherwise , if any IPv6 address is found , one is returned . If neither , then None is returned ."""
try : addrinfos = socket . getaddrinfo ( host , None ) except socket . gaierror : return None ip = None for family , _ , _ , _ , sockaddr in addrinfos : connectable = True if port : connectable = is_connectable ( port , sockaddr [ 0 ] ) if connectable and family == socket . AF_INET : return sockaddr [ 0 ] if connectable and not ip and family == socket . AF_INET6 : ip = sockaddr [ 0 ] return ip
def keyframe ( self , keyframe ) : """Set keyframe ."""
if self . _keyframe == keyframe : return if self . _keyframe is not None : raise RuntimeError ( 'cannot reset keyframe' ) if len ( self . _offsetscounts [ 0 ] ) != len ( keyframe . dataoffsets ) : raise RuntimeError ( 'incompatible keyframe' ) if keyframe . is_tiled : pass if keyframe . is_contiguous : self . _offsetscounts = ( [ self . _offsetscounts [ 0 ] [ 0 ] ] , [ keyframe . is_contiguous [ 1 ] ] ) else : self . _offsetscounts = clean_offsetscounts ( * self . _offsetscounts ) self . _keyframe = keyframe
def is_valid_single_address ( self , single_address ) : """Check if a potentially ambiguous single address spec really exists . : param single _ address : A SingleAddress spec . : return : True if given spec exists , False otherwise ."""
if not isinstance ( single_address , SingleAddress ) : raise TypeError ( 'Parameter "{}" is of type {}, expecting type {}.' . format ( single_address , type ( single_address ) , SingleAddress ) ) try : return bool ( self . scan_specs ( [ single_address ] ) ) except AddressLookupError : return False
def get_battery_level ( self ) : """Reads the battery level descriptor on the device . Returns : int . If successful this will be a positive value representing the current battery level as a percentage . On error , - 1 is returned ."""
battery_level = self . get_characteristic_handle_from_uuid ( UUID_BATTERY_LEVEL ) if battery_level is None : logger . warn ( 'Failed to find handle for battery level' ) return None level = self . dongle . _read_attribute ( self . conn_handle , battery_level ) if level is None : return - 1 return ord ( level )
def adjust_jobs_priority ( self , high_value_jobs , priority = 1 ) : """For every job priority determine if we need to increase or decrease the job priority Currently , high value jobs have a priority of 1 and a timeout of 0."""
# Only job priorities that don ' t have an expiration date ( 2 weeks for new jobs or year 2100 # for jobs update via load _ preseed ) are updated for jp in JobPriority . objects . filter ( expiration_date__isnull = True ) : if jp . unique_identifier ( ) not in high_value_jobs : if jp . priority != SETA_LOW_VALUE_PRIORITY : logger . warning ( 'Decreasing priority of %s' , jp . unique_identifier ( ) ) jp . priority = SETA_LOW_VALUE_PRIORITY jp . save ( update_fields = [ 'priority' ] ) elif jp . priority != priority : logger . warning ( 'Increasing priority of %s' , jp . unique_identifier ( ) ) jp . priority = priority jp . save ( update_fields = [ 'priority' ] )
def export_kml_file ( self ) : """Generate KML element tree from ` ` Placemarks ` ` . Returns : etree . ElementTree : KML element tree depicting ` ` Placemarks ` `"""
kml = create_elem ( 'kml' ) kml . Document = create_elem ( 'Document' ) for place in sorted ( self . values ( ) , key = lambda x : x . name ) : kml . Document . append ( place . tokml ( ) ) return etree . ElementTree ( kml )
def get_values ( js_dict , value = 'value' ) : """Get values from input data . Args : js _ dict ( dict ) : dictionary containing dataset data and metadata . value ( string , optional ) : name of the value column . Defaults to ' value ' . Returns : values ( list ) : list of dataset values ."""
values = js_dict [ value ] if type ( values ) is list : if type ( values [ 0 ] ) is not dict or tuple : return values # being not a list of dicts or tuples leaves us with a dict . . . values = { int ( key ) : value for ( key , value ) in values . items ( ) } if js_dict . get ( 'size' ) : max_val = np . prod ( np . array ( ( js_dict [ 'size' ] ) ) ) else : max_val = np . prod ( np . array ( ( js_dict [ 'dimension' ] [ 'size' ] ) ) ) vals = max_val * [ None ] for ( key , value ) in values . items ( ) : vals [ key ] = value values = vals return values
def plot_pdf ( self , names = None , Nbest = 5 , lw = 2 ) : """Plots Probability density functions of the distributions : param str , list names : names can be a single distribution name , or a list of distribution names , or kept as None , in which case , the first Nbest distribution will be taken ( default to best 5)"""
assert Nbest > 0 if Nbest > len ( self . distributions ) : Nbest = len ( self . distributions ) if isinstance ( names , list ) : for name in names : pylab . plot ( self . x , self . fitted_pdf [ name ] , lw = lw , label = name ) elif names : pylab . plot ( self . x , self . fitted_pdf [ names ] , lw = lw , label = names ) else : try : names = self . df_errors . sort_values ( by = "sumsquare_error" ) . index [ 0 : Nbest ] except : names = self . df_errors . sort ( "sumsquare_error" ) . index [ 0 : Nbest ] for name in names : if name in self . fitted_pdf . keys ( ) : pylab . plot ( self . x , self . fitted_pdf [ name ] , lw = lw , label = name ) else : print ( "%s was not fitted. no parameters available" % name ) pylab . grid ( True ) pylab . legend ( )
def show ( self ) : """Prints the content of this method to stdout . This will print the method signature and the decompiled code ."""
args , ret = self . method . get_descriptor ( ) [ 1 : ] . split ( ")" ) if self . code : # We patch the descriptor here and add the registers , if code is available args = args . split ( " " ) reg_len = self . code . get_registers_size ( ) nb_args = len ( args ) start_reg = reg_len - nb_args args = [ "{} v{}" . format ( a , start_reg + i ) for i , a in enumerate ( args ) ] print ( "METHOD {} {} {} ({}){}" . format ( self . method . get_class_name ( ) , self . method . get_access_flags_string ( ) , self . method . get_name ( ) , ", " . join ( args ) , ret ) ) bytecode . PrettyShow ( self , self . basic_blocks . gets ( ) , self . method . notes )
def fill ( args ) : """% prog fill frag _ reads _ corr . fastb Run FillFragments on ` frag _ reads _ corr . fastb ` ."""
p = OptionParser ( fill . __doc__ ) p . add_option ( "--stretch" , default = 3 , type = "int" , help = "MAX_STRETCH to pass to FillFragments [default: %default]" ) p . set_cpus ( ) opts , args = p . parse_args ( args ) if len ( args ) != 1 : sys . exit ( not p . print_help ( ) ) fastb , = args assert fastb == "frag_reads_corr.fastb" pcfile = "frag_reads_corr.k28.pc.info" nthreads = " NUM_THREADS={0}" . format ( opts . cpus ) maxstretch = " MAX_STRETCH={0}" . format ( opts . stretch ) if need_update ( fastb , pcfile ) : cmd = "PathReads READS_IN=frag_reads_corr" cmd += nthreads sh ( cmd ) filledfastb = "filled_reads.fastb" if need_update ( pcfile , filledfastb ) : cmd = "FillFragments PAIRS_OUT=frag_reads_corr_cpd" cmd += " PRECORRECT_LIBSTATS=True" cmd += maxstretch cmd += nthreads sh ( cmd ) filledfasta = "filled_reads.fasta" if need_update ( filledfastb , filledfasta ) : cmd = "Fastb2Fasta IN=filled_reads.fastb OUT=filled_reads.fasta" sh ( cmd )
def transform_points ( points , matrix , translate = True ) : """Returns points , rotated by transformation matrix If points is ( n , 2 ) , matrix must be ( 3,3) if points is ( n , 3 ) , matrix must be ( 4,4) Parameters points : ( n , d ) float Points where d is 2 or 3 matrix : ( 3,3 ) or ( 4,4 ) float Homogenous rotation matrix translate : bool Apply translation from matrix or not Returns transformed : ( n , d ) float Transformed points"""
points = np . asanyarray ( points , dtype = np . float64 ) # no points no cry if len ( points ) == 0 : return points . copy ( ) matrix = np . asanyarray ( matrix , dtype = np . float64 ) if ( len ( points . shape ) != 2 or ( points . shape [ 1 ] + 1 != matrix . shape [ 1 ] ) ) : raise ValueError ( 'matrix shape ({}) doesn\'t match points ({})' . format ( matrix . shape , points . shape ) ) # check to see if we ' ve been passed an identity matrix identity = np . abs ( matrix - np . eye ( matrix . shape [ 0 ] ) ) . max ( ) if identity < 1e-8 : return np . ascontiguousarray ( points . copy ( ) ) dimension = points . shape [ 1 ] column = np . zeros ( len ( points ) ) + int ( bool ( translate ) ) stacked = np . column_stack ( ( points , column ) ) transformed = np . dot ( matrix , stacked . T ) . T [ : , : dimension ] transformed = np . ascontiguousarray ( transformed ) return transformed
def parse ( cls , request : web . Request ) -> AuthWidgetData : """Parse request as Telegram auth widget data . : param request : : return : : obj : ` AuthWidgetData ` : raise : : obj : ` aiohttp . web . HTTPBadRequest `"""
try : query = dict ( request . query ) query [ 'id' ] = int ( query [ 'id' ] ) query [ 'auth_date' ] = int ( query [ 'auth_date' ] ) widget = AuthWidgetData ( ** query ) except ( ValueError , KeyError ) : raise web . HTTPBadRequest ( text = 'Invalid auth data' ) else : return widget
def rotate_v1 ( array , k ) : """Rotate the entire array ' k ' times T ( n ) - O ( nk ) : type array : List [ int ] : type k : int : rtype : void Do not return anything , modify array in - place instead ."""
array = array [ : ] n = len ( array ) for i in range ( k ) : # unused variable is not a problem temp = array [ n - 1 ] for j in range ( n - 1 , 0 , - 1 ) : array [ j ] = array [ j - 1 ] array [ 0 ] = temp return array
def main ( ) : """Command - line tool to inspect model embeddings ."""
setup_main_logger ( file_logging = False ) params = argparse . ArgumentParser ( description = 'Shows nearest neighbours of input tokens in the embedding space.' ) params . add_argument ( '--model' , '-m' , required = True , help = 'Model folder to load config from.' ) params . add_argument ( '--checkpoint' , '-c' , required = False , type = int , default = None , help = 'Optional specific checkpoint to load parameters from. Best params otherwise.' ) params . add_argument ( '--side' , '-s' , required = True , choices = [ 'source' , 'target' ] , help = 'what embeddings to look at' ) params . add_argument ( '--norm' , '-n' , action = 'store_true' , help = 'normalize embeddings to unit length' ) params . add_argument ( '-k' , type = int , default = 5 , help = 'Number of neighbours to print' ) params . add_argument ( '--gamma' , '-g' , type = float , default = 1.0 , help = 'Softmax distribution steepness.' ) args = params . parse_args ( ) embeddings ( args )
def sequencetyper ( self ) : """Determines the sequence type of each strain based on comparisons to sequence type profiles"""
for sample in self . metadata : if sample . general . bestassemblyfile != 'NA' : if type ( sample [ self . analysistype ] . allelenames ) == list : # Initialise variables header = 0 # Iterate through the genomes # for sample in self . metadata : genome = sample . name # Initialise self . bestmatch [ genome ] with an int that will eventually be replaced by the # of matches self . bestmatch [ genome ] = defaultdict ( int ) if sample [ self . analysistype ] . profile != 'NA' : # Create the profiledata variable to avoid writing self . profiledata [ self . analysistype ] profiledata = sample [ self . analysistype ] . profiledata # For each gene in plusdict [ genome ] for gene in sample [ self . analysistype ] . allelenames : # Clear the appropriate count and lists multiallele = [ ] multipercent = [ ] # Go through the alleles in plusdict for allele in self . plusdict [ genome ] [ gene ] : percentid = list ( self . plusdict [ genome ] [ gene ] [ allele ] . keys ( ) ) [ 0 ] # " N " alleles screw up the allele splitter function if allele != "N" : # Use the alleleSplitter function to get the allele number # allelenumber , alleleprenumber = allelesplitter ( allele ) # Append as appropriate - alleleNumber is treated as an integer for proper sorting multiallele . append ( int ( allele ) ) multipercent . append ( percentid ) # If the allele is " N " else : # Append " N " and a percent identity of 0 multiallele . append ( "N" ) multipercent . append ( 0 ) if not multiallele : multiallele . append ( "N" ) multipercent . append ( 0 ) # if self . analysistype = = ' rmlst ' : # # For whatever reason , the rMLST profile scheme treat multiple allele hits as ' N ' s . # multiallele = multiallele if len ( multiallele ) = = 1 else [ ' N ' ] # if multipercent : # multipercent = multipercent if len ( multiallele ) = = 1 else [ 0 , 0] # else : # multipercent = [ 0] # Populate self . bestdict with genome , gene , alleles joined with a space ( this was made like # this because allele is a list generated by the . iteritems ( ) above self . bestdict [ genome ] [ gene ] [ " " . join ( str ( allele ) for allele in sorted ( multiallele ) ) ] = multipercent [ 0 ] # Find the profile with the most alleles in common with the query genome for sequencetype in profiledata : # The number of genes in the analysis header = len ( profiledata [ sequencetype ] ) # refallele is the allele number of the sequence type refallele = profiledata [ sequencetype ] [ gene ] # If there are multiple allele matches for a gene in the reference profile e . g . 10 692 if len ( refallele . split ( " " ) ) > 1 : # Map the split ( on a space ) alleles as integers - if they are treated as integers , # the alleles will sort properly intrefallele = map ( int , refallele . split ( " " ) ) # Create a string of the joined , sorted alleles sortedrefallele = " " . join ( str ( allele ) for allele in sorted ( intrefallele ) ) else : # Use the reference allele as the sortedRefAllele sortedrefallele = refallele for allele , percentid in self . bestdict [ genome ] [ gene ] . items ( ) : # If the allele in the query genome matches the allele in the reference profile , add # the result to the bestmatch dictionary . Genes with multiple alleles were sorted # the same , strings with multiple alleles will match : 10 692 will never be 692 10 if allele == sortedrefallele and float ( percentid ) == 100.00 : # Increment the number of matches to each profile self . bestmatch [ genome ] [ sequencetype ] += 1 # Special handling of BACT000060 and BACT000065 genes . When the reference profile # has an allele of ' N ' , and the query allele doesn ' t , set the allele to ' N ' , and # count it as a match elif gene == 'BACT000060' or gene == 'BACT000065' : if sortedrefallele == 'N' and allele != 'N' : # Increment the number of matches to each profile self . bestmatch [ genome ] [ sequencetype ] += 1 elif allele == sortedrefallele and sortedrefallele == 'N' : # Increment the number of matches to each profile self . bestmatch [ genome ] [ sequencetype ] += 1 # Get the best number of matches # From : https : / / stackoverflow . com / questions / 613183 / sort - a - python - dictionary - by - value try : sortedmatches = sorted ( self . bestmatch [ genome ] . items ( ) , key = operator . itemgetter ( 1 ) , reverse = True ) [ 0 ] [ 1 ] # If there are no matches , set : sortedmatches to zero except IndexError : sortedmatches = 0 # Otherwise , the query profile matches the reference profile if int ( sortedmatches ) == header : # Iterate through best match for sequencetype , matches in self . bestmatch [ genome ] . items ( ) : if matches == sortedmatches : for gene in profiledata [ sequencetype ] : # Populate resultProfile with the genome , best match to profile , # of matches # to the profile , gene , query allele ( s ) , reference allele ( s ) , and % identity self . resultprofile [ genome ] [ sequencetype ] [ sortedmatches ] [ gene ] [ list ( self . bestdict [ genome ] [ gene ] . keys ( ) ) [ 0 ] ] = str ( list ( self . bestdict [ genome ] [ gene ] . values ( ) ) [ 0 ] ) sample [ self . analysistype ] . sequencetype = sequencetype sample [ self . analysistype ] . matchestosequencetype = matches # If there are fewer matches than the total number of genes in the typing scheme elif 0 < int ( sortedmatches ) < header : mismatches = [ ] # Iterate through the sequence types and the number of matches in bestDict for each genome for sequencetype , matches in self . bestmatch [ genome ] . items ( ) : # If the number of matches for a profile matches the best number of matches if matches == sortedmatches : # Iterate through the gene in the analysis for gene in profiledata [ sequencetype ] : # Get the reference allele as above refallele = profiledata [ sequencetype ] [ gene ] # As above get the reference allele split and ordered as necessary if len ( refallele . split ( " " ) ) > 1 : intrefallele = map ( int , refallele . split ( " " ) ) sortedrefallele = " " . join ( str ( allele ) for allele in sorted ( intrefallele ) ) else : sortedrefallele = refallele # Populate self . mlstseqtype with the genome , best match to profile , # of matches # to the profile , gene , query allele ( s ) , reference allele ( s ) , and % identity if self . updateprofile : self . mlstseqtype [ genome ] [ sequencetype ] [ sortedmatches ] [ gene ] [ str ( list ( self . bestdict [ genome ] [ gene ] . keys ( ) ) [ 0 ] ) ] [ sortedrefallele ] = str ( list ( self . bestdict [ genome ] [ gene ] . values ( ) ) [ 0 ] ) else : self . resultprofile [ genome ] [ sequencetype ] [ sortedmatches ] [ gene ] [ list ( self . bestdict [ genome ] [ gene ] . keys ( ) ) [ 0 ] ] = str ( list ( self . bestdict [ genome ] [ gene ] ) [ 0 ] ) # = str ( list ( self . bestdict [ genome ] [ gene ] . values ( ) ) [ 0 ] ) if sortedrefallele != list ( self . bestdict [ sample . name ] [ gene ] . keys ( ) ) [ 0 ] : mismatches . append ( ( { gene : ( '{} ({})' . format ( list ( self . bestdict [ sample . name ] [ gene ] . keys ( ) ) [ 0 ] , sortedrefallele ) ) } ) ) if not self . updateprofile or self . analysistype == 'mlst' : self . resultprofile [ genome ] [ sequencetype ] [ sortedmatches ] [ gene ] [ list ( self . bestdict [ genome ] [ gene ] . keys ( ) ) [ 0 ] ] = str ( list ( self . bestdict [ genome ] [ gene ] . values ( ) ) [ 0 ] ) sample [ self . analysistype ] . mismatchestosequencetype = mismatches sample [ self . analysistype ] . sequencetype = sequencetype sample [ self . analysistype ] . matchestosequencetype = matches # Add the new profile to the profile file ( if the option is enabled ) if self . updateprofile and self . analysistype != 'mlst' : self . reprofiler ( int ( header ) , genome , sample ) elif sortedmatches == 0 : for gene in sample [ self . analysistype ] . allelenames : # Populate the results profile with negative values for sequence type and sorted matches self . resultprofile [ genome ] [ 'NA' ] [ sortedmatches ] [ gene ] [ 'NA' ] = 0 # Add the new profile to the profile file ( if the option is enabled ) if self . updateprofile : self . reprofiler ( int ( header ) , genome , sample ) sample [ self . analysistype ] . sequencetype = 'NA' sample [ self . analysistype ] . matchestosequencetype = 'NA' sample [ self . analysistype ] . mismatchestosequencetype = 'NA' else : sample [ self . analysistype ] . matchestosequencetype = 'NA' sample [ self . analysistype ] . mismatchestosequencetype = 'NA' sample [ self . analysistype ] . sequencetype = 'NA' dotter ( ) else : sample [ self . analysistype ] . matchestosequencetype = 'NA' sample [ self . analysistype ] . mismatchestosequencetype = 'NA' sample [ self . analysistype ] . sequencetype = 'NA' else : sample [ self . analysistype ] . matchestosequencetype = 'NA' sample [ self . analysistype ] . mismatchestosequencetype = 'NA' sample [ self . analysistype ] . sequencetype = 'NA'
def release_all ( self , keys : Sequence [ str ] ) -> Set [ str ] : """Releases all of the given keys . : param keys : the keys to release : return : the names of the keys that were released"""
released : List [ str ] = [ ] for key in keys : released . append ( self . release ( key ) ) return set ( filter ( None , released ) )
def read ( self , symbol , as_of = None , date_range = None , from_version = None , allow_secondary = None , ** kwargs ) : """Read data for the named symbol . Returns a VersionedItem object with a data and metdata element ( as passed into write ) . Parameters symbol : ` str ` symbol name for the item as _ of : ` str ` or ` int ` or ` datetime . datetime ` Return the data as it was as _ of the point in time . ` int ` : specific version number ` str ` : snapshot name which contains the version ` datetime . datetime ` : the version of the data that existed as _ of the requested point in time date _ range : ` arctic . date . DateRange ` DateRange to read data for . Applies to Pandas data , with a DateTime index returns only the part of the data that falls in the DateRange . allow _ secondary : ` bool ` or ` None ` Override the default behavior for allowing reads from secondary members of a cluster : ` None ` : use the settings from the top - level ` Arctic ` object used to query this version store . ` True ` : allow reads from secondary members ` False ` : only allow reads from primary members Returns VersionedItem namedtuple which contains a . data and . metadata element"""
try : read_preference = self . _read_preference ( allow_secondary ) _version = self . _read_metadata ( symbol , as_of = as_of , read_preference = read_preference ) return self . _do_read ( symbol , _version , from_version , date_range = date_range , read_preference = read_preference , ** kwargs ) except ( OperationFailure , AutoReconnect ) as e : # Log the exception so we know how often this is happening log_exception ( 'read' , e , 1 ) # If we ' ve failed to read from the secondary , then it ' s possible the # secondary has lagged . In this case direct the query to the primary . _version = mongo_retry ( self . _read_metadata ) ( symbol , as_of = as_of , read_preference = ReadPreference . PRIMARY ) return self . _do_read_retry ( symbol , _version , from_version , date_range = date_range , read_preference = ReadPreference . PRIMARY , ** kwargs ) except Exception as e : log_exception ( 'read' , e , 1 ) raise
def single ( fun , name , test = None , queue = False , ** kwargs ) : '''Execute a single state function with the named kwargs , returns False if insufficient data is sent to the command By default , the values of the kwargs will be parsed as YAML . So , you can specify lists values , or lists of single entry key - value maps , as you would in a YAML salt file . Alternatively , JSON format of keyword values is also supported . CLI Example : . . code - block : : bash salt ' * ' state . single pkg . installed name = vim'''
conflict = _check_queue ( queue , kwargs ) if conflict is not None : return conflict comps = fun . split ( '.' ) if len ( comps ) < 2 : __context__ [ 'retcode' ] = salt . defaults . exitcodes . EX_STATE_COMPILER_ERROR return 'Invalid function passed' kwargs . update ( { 'state' : comps [ 0 ] , 'fun' : comps [ 1 ] , '__id__' : name , 'name' : name } ) orig_test = __opts__ . get ( 'test' , None ) opts = salt . utils . state . get_sls_opts ( __opts__ , ** kwargs ) opts [ 'test' ] = _get_test_value ( test , ** kwargs ) pillar_override = kwargs . get ( 'pillar' ) pillar_enc = kwargs . get ( 'pillar_enc' ) if pillar_enc is None and pillar_override is not None and not isinstance ( pillar_override , dict ) : raise SaltInvocationError ( 'Pillar data must be formatted as a dictionary, unless pillar_enc ' 'is specified.' ) try : st_ = salt . state . State ( opts , pillar_override , pillar_enc = pillar_enc , proxy = __proxy__ , initial_pillar = _get_initial_pillar ( opts ) ) except NameError : st_ = salt . state . State ( opts , pillar_override , pillar_enc = pillar_enc , initial_pillar = _get_initial_pillar ( opts ) ) err = st_ . verify_data ( kwargs ) if err : __context__ [ 'retcode' ] = salt . defaults . exitcodes . EX_STATE_COMPILER_ERROR return err st_ . _mod_init ( kwargs ) snapper_pre = _snapper_pre ( opts , kwargs . get ( '__pub_jid' , 'called localy' ) ) ret = { '{0[state]}_|-{0[__id__]}_|-{0[name]}_|-{0[fun]}' . format ( kwargs ) : st_ . call ( kwargs ) } _set_retcode ( ret ) # Work around Windows multiprocessing bug , set _ _ opts _ _ [ ' test ' ] back to # value from before this function was run . _snapper_post ( opts , kwargs . get ( '__pub_jid' , 'called localy' ) , snapper_pre ) __opts__ [ 'test' ] = orig_test return ret
def reynolds ( target , u0 , b , temperature = 'pore.temperature' ) : r"""Uses exponential model by Reynolds [ 1 ] for the temperature dependance of shear viscosity Parameters target : OpenPNM Object The object for which these values are being calculated . This controls the length of the calculated array , and also provides access to other necessary thermofluid properties . u0 , b : float , array _ like Coefficients of the viscosity exponential model ( mu = u0 * Exp ( - b * T ) where T is the temperature in Kelvin temperature : string The dictionary key containing the temperature values ( K ) . Can be either a pore or throat array . [1 ] Reynolds O . ( 1886 ) . Phil Trans Royal Soc London , v . 177 , p . 157."""
value = u0 * sp . exp ( b * target [ temperature ] ) return value
def _delete_example ( self , request ) : """Deletes the specified example . Args : request : A request that should contain ' index ' . Returns : An empty response ."""
index = int ( request . args . get ( 'index' ) ) if index >= len ( self . examples ) : return http_util . Respond ( request , { 'error' : 'invalid index provided' } , 'application/json' , code = 400 ) del self . examples [ index ] self . updated_example_indices = set ( [ i if i < index else i - 1 for i in self . updated_example_indices ] ) self . generate_sprite ( [ ex . SerializeToString ( ) for ex in self . examples ] ) return http_util . Respond ( request , { } , 'application/json' )
def write_file ( file_path , content ) : """Write file at the specified path with content . If file exists , it will be overwritten ."""
handler = open ( file_path , 'w+' ) handler . write ( content ) handler . close ( )
def interface_type ( self , ift ) : """Set the CoRE Link Format if attribute of the resource . : param ift : the CoRE Link Format if attribute"""
if not isinstance ( ift , str ) : ift = str ( ift ) self . _attributes [ "if" ] = ift
def find_response_component ( self , api_id = None , signature_id = None ) : '''Find one or many repsonse components . Args : api _ id ( str ) : Api id associated with the component ( s ) to be retrieved . signature _ id ( str ) : Signature id associated with the component ( s ) to be retrieved . Returns : A list of dictionaries containing component data'''
if not api_id and not signature_id : raise ValueError ( 'At least one of api_id and signature_id is required' ) components = list ( ) if self . response_data : for component in self . response_data : if ( api_id and component [ 'api_id' ] ) == api_id or ( signature_id and component [ 'signature_id' ] == signature_id ) : components . append ( component ) return components
def write ( self , data ) : """Write data to the transport . This is an invalid operation if the stream is not writable , that is , if it is closed . During TLS negotiation , the data is buffered ."""
if not isinstance ( data , ( bytes , bytearray , memoryview ) ) : raise TypeError ( 'data argument must be byte-ish (%r)' , type ( data ) ) if not self . _state . is_writable or self . _closing : raise self . _invalid_state ( "write() called" ) if not data : return if not self . _buffer : self . _loop . add_writer ( self . _raw_fd , self . _write_ready ) self . _buffer . extend ( data )
def get_permissions_assignments ( self , obj = None , permission = None ) : """: param permission : return only roles having this permission : returns : an dict where keys are ` permissions ` and values ` roles ` iterable ."""
session = None if obj is not None : assert isinstance ( obj , Entity ) session = object_session ( obj ) if obj . id is None : obj = None if session is None : session = db . session ( ) pa = session . query ( PermissionAssignment . permission , PermissionAssignment . role ) . filter ( PermissionAssignment . object == obj ) if permission : pa = pa . filter ( PermissionAssignment . permission == permission ) results = { } for permission , role in pa . yield_per ( 1000 ) : results . setdefault ( permission , set ( ) ) . add ( role ) return results
def write_command ( self , request_id , msg , docs ) : """A proxy for SocketInfo . write _ command that handles event publishing ."""
if self . publish : duration = datetime . datetime . now ( ) - self . start_time self . _start ( request_id , docs ) start = datetime . datetime . now ( ) try : reply = self . sock_info . write_command ( request_id , msg ) if self . publish : duration = ( datetime . datetime . now ( ) - start ) + duration self . _succeed ( request_id , reply , duration ) except OperationFailure as exc : if self . publish : duration = ( datetime . datetime . now ( ) - start ) + duration self . _fail ( request_id , exc . details , duration ) raise finally : self . start_time = datetime . datetime . now ( ) return reply
def nll ( data , model ) : """Negative log likelihood given data and a model Parameters Returns float Negative log likelihood Examples > > > import macroeco . models as md > > > import macroeco . compare as comp > > > # Generate random data > > > rand _ samp = md . logser . rvs ( p = 0.9 , size = 100) > > > # Get nll for p = 0.9 > > > comp . nll ( rand _ samp , md . logser ( p = 0.9 ) ) 237.6871819262054 > > > # Get the nll for the MLE for p > > > mle _ p = md . logser . fit _ mle ( rand _ samp ) > > > comp . nll ( rand _ samp , md . logser ( * mle _ p ) ) 235.2841347820297"""
try : log_lik_vals = model . logpmf ( data ) except : log_lik_vals = model . logpdf ( data ) return - np . sum ( log_lik_vals )
def convert_cmus_output ( self , cmus_output ) : """Change the newline separated string of output data into a dictionary which can then be used to replace the strings in the config format . cmus _ output : A string with information about cmus that is newline seperated . Running cmus - remote - Q in a terminal will show you what you ' re dealing with ."""
cmus_output = cmus_output . split ( '\n' ) cmus_output = [ x . replace ( 'tag ' , '' ) for x in cmus_output if not x in '' ] cmus_output = [ x . replace ( 'set ' , '' ) for x in cmus_output ] status = { } partitioned = ( item . partition ( ' ' ) for item in cmus_output ) status = { item [ 0 ] : item [ 2 ] for item in partitioned } status [ 'duration' ] = self . convert_time ( status [ 'duration' ] ) status [ 'position' ] = self . convert_time ( status [ 'position' ] ) return status
def plot ( self , normed = False , scale_errors_by = 1.0 , scale_histogram_by = 1.0 , plt = plt , errors = False , ** kwargs ) : """Plots the histogram with Poisson ( sqrt ( n ) ) error bars - scale _ errors _ by multiplies the error bars by its argument - scale _ histogram _ by multiplies the histogram AND the error bars by its argument - plt thing to call . errorbar on ( pylab , figure , axes , whatever the matplotlib guys come up with next )"""
if not CAN_PLOT : raise ValueError ( "matplotlib did not import, so can't plot your histogram..." ) if errors : kwargs . setdefault ( 'linestyle' , 'none' ) yerr = np . sqrt ( self . histogram ) if normed : y = self . normalized_histogram yerr /= self . n else : y = self . histogram . astype ( np . float ) yerr *= scale_errors_by * scale_histogram_by y *= scale_histogram_by plt . errorbar ( self . bin_centers , y , yerr , marker = '.' , ** kwargs ) else : kwargs . setdefault ( 'linestyle' , 'steps-mid' ) plt . plot ( self . bin_centers , self . histogram , ** kwargs )
def next ( self , vRef ) : """Get the reference number of the vdata following a given vdata . Args : : vRef Reference number of the vdata preceding the one we require . Set to - 1 to get the first vdata in the HDF file . Knowing its reference number , the vdata can then be opened ( attached ) by passing this reference number to the attach ( ) method . Returns : : Reference number of the vdata following the one given by argument vref An exception is raised if no vdata follows the one given by vRef . C library equivalent : VSgetid"""
num = _C . VSgetid ( self . _hdf_inst . _id , vRef ) _checkErr ( 'next' , num , 'cannot get next vdata' ) return num
def searchNs ( self , node , nameSpace ) : """Search a Ns registered under a given name space for a document . recurse on the parents until it finds the defined namespace or return None otherwise . @ nameSpace can be None , this is a search for the default namespace . We don ' t allow to cross entities boundaries . If you don ' t declare the namespace within those you will be in troubles ! ! ! A warning is generated to cover this case ."""
if node is None : node__o = None else : node__o = node . _o ret = libxml2mod . xmlSearchNs ( self . _o , node__o , nameSpace ) if ret is None : raise treeError ( 'xmlSearchNs() failed' ) __tmp = xmlNs ( _obj = ret ) return __tmp
def _get_average_time_stamp ( action_set ) : """Return the average time stamp for the rules in this action set ."""
# This is the average value of the iteration counter upon the most # recent update of each rule in this action set . total_time_stamps = sum ( rule . time_stamp * rule . numerosity for rule in action_set ) total_numerosity = sum ( rule . numerosity for rule in action_set ) return total_time_stamps / ( total_numerosity or 1 )
def _new_name ( method , old_name ) : """Return a method with a deprecation warning ."""
# Looks suspiciously like a decorator , but isn ' t ! @ wraps ( method ) def _method ( * args , ** kwargs ) : warnings . warn ( "method '{}' has been deprecated, please rename to '{}'" . format ( old_name , method . __name__ ) , DeprecationWarning , ) return method ( * args , ** kwargs ) deprecated_msg = """ Note: .. deprecated:: 2.2.0 Please use `~{}` """ . format ( method . __name__ ) if getattr ( _method , "__doc__" ) : _method . __doc__ += deprecated_msg return _method
def convert_unicode_2_utf8 ( input ) : '''Return a copy of ` input ` with every str component encoded from unicode to utf - 8.'''
if isinstance ( input , dict ) : try : # python - 2.6 return dict ( ( convert_unicode_2_utf8 ( key ) , convert_unicode_2_utf8 ( value ) ) for key , value in input . iteritems ( ) ) except AttributeError : # since python - 2.7 cf . http : / / stackoverflow . com / a / 1747827 # [ the ugly eval ( ' . . . ' ) is required for a valid syntax on # python - 2.6 , cf . http : / / stackoverflow . com / a / 25049535] return eval ( '''{convert_unicode_2_utf8(key): convert_unicode_2_utf8(value) for key, value in input.items()}''' ) elif isinstance ( input , list ) : return [ convert_unicode_2_utf8 ( element ) for element in input ] # elif order relevant : python2 vs . python3 # cf . http : / / stackoverflow . com / a / 19877309 elif isinstance ( input , str ) : return input else : try : if eval ( '''isinstance(input, unicode)''' ) : return input . encode ( 'utf-8' ) except NameError : # unicode does not exist in python - 3 . x pass return input
def _dataToString ( self , data ) : """Conversion function used to convert the ( default ) data to the display value ."""
if self . specialValueText is not None and data == self . minValue : return self . specialValueText else : return "{}{:.{}f}{}" . format ( self . prefix , data , self . decimals , self . suffix )
def build_path ( operation , ns ) : """Build a path URI for an operation ."""
try : return ns . url_for ( operation , _external = False ) except BuildError as error : # we are missing some URI path parameters uri_templates = { argument : "{{{}}}" . format ( argument ) for argument in error . suggested . arguments } # flask will sometimes try to quote ' { ' and ' } ' characters return unquote ( ns . url_for ( operation , _external = False , ** uri_templates ) )
def _get_base_command ( self ) : """Returns the base command plus command - line options . Handles everything up to and including the classpath . The positional training parameters are added by the _ input _ handler _ decorator method ."""
cd_command = '' . join ( [ 'cd ' , str ( self . WorkingDir ) , ';' ] ) jvm_command = "java" jvm_args = self . _commandline_join ( [ self . Parameters [ k ] for k in self . _jvm_parameters ] ) cp_args = '-cp "%s" %s' % ( self . _get_jar_fp ( ) , self . TrainingClass ) command_parts = [ cd_command , jvm_command , jvm_args , cp_args ] return self . _commandline_join ( command_parts ) . strip ( )
def checktype ( self , val , kind , ** kargs ) : """Raise TypeError if val does not satisfy kind ."""
if not isinstance ( val , kind ) : raise TypeError ( 'Expected {}; got {}' . format ( self . str_kind ( kind ) , self . str_valtype ( val ) ) )
def solve ( graph , debug = False , anim = None ) : # TODO : check docstring """Do MV routing for given nodes in ` graph ` . Translate data from node objects to appropriate format before . Args graph : : networkx : ` NetworkX Graph Obj < > ` NetworkX graph object with nodes debug : bool , defaults to False If True , information is printed while routing anim : AnimationDing0 AnimationDing0 object Returns : networkx : ` NetworkX Graph Obj < > ` NetworkX graph object with nodes and edges See Also ding0 . tools . animation . AnimationDing0 : for a more detailed description on anim parameter ."""
# TODO : Implement debug mode ( pass to solver ) to get more information while routing ( print routes , draw network , . . ) # translate DING0 graph to routing specs specs = ding0_graph_to_routing_specs ( graph ) # create routing graph using specs RoutingGraph = Graph ( specs ) timeout = 30000 # create solver objects savings_solver = savings . ClarkeWrightSolver ( ) local_search_solver = local_search . LocalSearchSolver ( ) start = time . time ( ) # create initial solution using Clarke and Wright Savings methods savings_solution = savings_solver . solve ( RoutingGraph , timeout , debug , anim ) # OLD , MAY BE USED LATER - Guido , please don ' t declare a variable later = now ( ) : ) : # if not savings _ solution . is _ complete ( ) : # print ( ' = = = Solution is not a complete solution ! = = = ' ) if debug : logger . debug ( 'ClarkeWrightSolver solution:' ) util . print_solution ( savings_solution ) logger . debug ( 'Elapsed time (seconds): {}' . format ( time . time ( ) - start ) ) # savings _ solution . draw _ network ( ) # improve initial solution using local search local_search_solution = local_search_solver . solve ( RoutingGraph , savings_solution , timeout , debug , anim ) # this line is for debug plotting purposes : # local _ search _ solution = savings _ solution if debug : logger . debug ( 'Local Search solution:' ) util . print_solution ( local_search_solution ) logger . debug ( 'Elapsed time (seconds): {}' . format ( time . time ( ) - start ) ) # local _ search _ solution . draw _ network ( ) return routing_solution_to_ding0_graph ( graph , local_search_solution )
def _sha256 ( path ) : """Calculate the sha256 hash of the file at path ."""
sha256hash = hashlib . sha256 ( ) chunk_size = 8192 with open ( path , "rb" ) as buff : while True : buffer = buff . read ( chunk_size ) if not buffer : break sha256hash . update ( buffer ) return sha256hash . hexdigest ( )
def _pecl ( command , defaults = False ) : '''Execute the command passed with pecl'''
cmdline = 'pecl {0}' . format ( command ) if salt . utils . data . is_true ( defaults ) : cmdline = 'yes ' "''" + ' | ' + cmdline ret = __salt__ [ 'cmd.run_all' ] ( cmdline , python_shell = True ) if ret [ 'retcode' ] == 0 : return ret [ 'stdout' ] else : log . error ( 'Problem running pecl. Is php-pear installed?' ) return ''
def asciigraph ( self , values = None , max_height = None , max_width = None , label = False ) : '''Accepts a list of y values and returns an ascii graph Optionally values can also be a dictionary with a key of timestamp , and a value of value . InGraphs returns data in this format for example .'''
result = '' border_fill_char = '*' start_ctime = None end_ctime = None if not max_width : max_width = 180 # If this is a dict of timestamp - > value , sort the data , store the start / end time , and convert values to a list of values if isinstance ( values , dict ) : time_series_sorted = sorted ( list ( values . items ( ) ) , key = lambda x : x [ 0 ] ) # Sort timestamp / value dict by the timestamps start_timestamp = time_series_sorted [ 0 ] [ 0 ] end_timestamp = time_series_sorted [ - 1 ] [ 0 ] start_ctime = datetime . fromtimestamp ( float ( start_timestamp ) ) . ctime ( ) end_ctime = datetime . fromtimestamp ( float ( end_timestamp ) ) . ctime ( ) values = self . _scale_x_values_timestamps ( values = time_series_sorted , max_width = max_width ) values = [ value for value in values if value is not None ] if not max_height : max_height = min ( 20 , max ( values ) ) stdev = statistics . stdev ( values ) mean = statistics . mean ( values ) # Do value adjustments adjusted_values = list ( values ) adjusted_values = self . _scale_x_values ( values = values , max_width = max_width ) upper_value = max ( adjusted_values ) # Getting upper / lower after scaling x values so we don ' t label a spike we can ' t see lower_value = min ( adjusted_values ) adjusted_values = self . _scale_y_values ( values = adjusted_values , new_min = 0 , new_max = max_height , scale_old_from_zero = False ) adjusted_values = self . _round_floats_to_ints ( values = adjusted_values ) # Obtain Ascii Graph String field = self . _get_ascii_field ( adjusted_values ) graph_string = self . _draw_ascii_graph ( field = field ) # Label the graph if label : top_label = 'Upper value: {upper_value:.2f} ' . format ( upper_value = upper_value ) . ljust ( max_width , border_fill_char ) result += top_label + '\n' result += '{graph_string}\n' . format ( graph_string = graph_string ) if label : lower = f'Lower value: {lower_value:.2f} ' stats = f' Mean: {mean:.2f} *** Std Dev: {stdev:.2f} ******' fill_length = max_width - len ( lower ) - len ( stats ) stat_label = f'{lower}{"*" * fill_length}{stats}\n' result += stat_label if start_ctime and end_ctime : fill_length = max_width - len ( start_ctime ) - len ( end_ctime ) result += f'{start_ctime}{" " * fill_length}{end_ctime}\n' return result
def save_file ( self , filename = 'StockChart' ) : """save htmlcontent as . html file"""
filename = filename + '.html' with open ( filename , 'w' ) as f : # self . buildhtml ( ) f . write ( self . htmlcontent ) f . closed
def get_vep_info ( vep_string , vep_header ) : """Make the vep annotations into a dictionaries A vep dictionary will have the vep column names as keys and the vep annotations as values . The dictionaries are stored in a list Args : vep _ string ( string ) : A string with the CSQ annotation vep _ header ( list ) : A list with the vep header Return : vep _ annotations ( list ) : A list of vep dicts"""
vep_annotations = [ dict ( zip ( vep_header , vep_annotation . split ( '|' ) ) ) for vep_annotation in vep_string . split ( ',' ) ] return vep_annotations
def computeStatistics ( tped , tfam , snps ) : """Computes the completion and concordance of each SNPs . : param tped : a representation of the ` ` tped ` ` . : param tfam : a representation of the ` ` tfam ` ` : param snps : the position of the duplicated markers in the ` ` tped ` ` . : type tped : numpy . array : type tfam : list : type snps : dict : returns : a tuple containing the completion of duplicated markers ( : py : class : ` numpy . array ` ) as first element , and the concordance ( : py : class : ` dict ` ) of duplicated markers , as last element . A marker ' s completion is compute using this formula ( where : math : ` G _ i ` is the set of genotypes for the marker : math : ` i ` ) : . . math : : Completion _ i = \\ frac { | | g \\ in G _ i \\ textrm { where } g \\ neq 0 | | } { | | G _ i | | } The pairwise concordance between duplicated markers is compute as follow ( where : math : ` G _ i ` and : math : ` G _ j ` are the sets of genotypes for markers : math : ` i ` and : math : ` j ` , respectively ) : . . math : : Concordance _ { i , j } = \\ frac { | | g \\ in G _ i \\ cup G _ j \\ textrm { where } g _ i = g _ j \\ neq 0 | | | | g \\ in G _ i \\ cup G _ j \\ textrm { where } g \\ neq 0 | | Hence , we only computes the numerators and denominators of the completion and concordance , for future reference . . . note : : When the genotypes are not comparable , the function tries to flip one of the genotype to see if it becomes comparable ."""
# The completion data type completion = np . array ( [ [ 0 for i in xrange ( len ( tped ) ) ] , [ 0 for i in xrange ( len ( tped ) ) ] ] ) # The concordance data type concordance = { } for snpID in snps . keys ( ) : nbDup = len ( snps [ snpID ] ) concordance [ snpID ] = [ np . asmatrix ( np . zeros ( ( nbDup , nbDup ) , dtype = int ) ) , np . asmatrix ( np . zeros ( ( nbDup , nbDup ) , dtype = int ) ) ] # The women and the no sex menIndex = np . where ( tfam [ : , 4 ] == "1" ) womenIndex = np . where ( tfam [ : , 4 ] == "2" ) noSexIndex = np . where ( tfam [ : , 4 ] == "0" ) for snpID , indexes in snps . iteritems ( ) : nbDup = len ( indexes ) currGenotypes = tped [ indexes , 4 : ] chromosome , position = snpID # if chromosome = = " 24 " : # # Remove the heterozygous men # menToRemove = getIndexOfHeteroMen ( currGenotypes , menIndex ) # # Remove the women and the no sex # currGenotypes = np . delete ( currGenotypes , # np . hstack ( ( womenIndex , noSexIndex , # menToRemove ) ) , 1) # elif chromosome = = " 23 " : # # Remove the heterozygous men # menToRemove = getIndexOfHeteroMen ( currGenotypes , menIndex ) # # Remove the no sex # currGenotypes = np . delete ( currGenotypes , # np . hstack ( ( noSexIndex , menToRemove ) ) , for i in xrange ( nbDup ) : # Compute completion here completion [ 0 ] [ indexes [ i ] ] = len ( np . where ( currGenotypes [ i ] != "0 0" ) [ 0 ] ) completion [ 1 ] [ indexes [ i ] ] = len ( currGenotypes [ i ] ) for j in xrange ( i + 1 , nbDup ) : # Compute concordance here # Removing samples with at least one null genotype nullGenotypeIndexes = np . where ( np . any ( currGenotypes [ [ i , j ] ] == "0 0" , 0 ) ) subGenotypes = np . delete ( currGenotypes , nullGenotypeIndexes , 1 , ) # Finding the errors in the subseted genotypes errorIndexes = np . where ( subGenotypes [ i ] != subGenotypes [ j ] ) [ 0 ] nbDiff = len ( errorIndexes ) for k in errorIndexes : # Getting the genotypes genotype1 = set ( subGenotypes [ i , k ] . split ( " " ) ) genotype2 = set ( subGenotypes [ j , k ] . split ( " " ) ) # Checking for flips if len ( genotype1 ) == len ( genotype2 ) : # Both have the same number of different alleles , # so they might be flipped genotype2 = flipGenotype ( genotype2 ) if genotype1 == genotype2 : # The genotypes are equivalent after the flip nbDiff -= 1 # Updating the concordance nbTot = len ( subGenotypes [ i ] ) concordance [ snpID ] [ 0 ] [ i , j ] = nbTot - nbDiff concordance [ snpID ] [ 0 ] [ j , i ] = nbTot - nbDiff if nbTot == 0 : # We will have a division by 0 . . . nbTot = 1 concordance [ snpID ] [ 1 ] [ i , j ] = nbTot concordance [ snpID ] [ 1 ] [ j , i ] = nbTot for snpID in concordance . iterkeys ( ) : for i in range ( len ( concordance [ snpID ] [ 0 ] ) ) : concordance [ snpID ] [ 0 ] [ i , i ] = 1 concordance [ snpID ] [ 1 ] [ i , i ] = 1 return completion , concordance
def parse_editable ( editable_req , default_vcs = None ) : """Parses svn + http : / / blahblah @ rev # egg = Foobar into a requirement ( Foobar ) and a URL"""
url = editable_req if os . path . isdir ( url ) and os . path . exists ( os . path . join ( url , 'setup.py' ) ) : # Treating it as code that has already been checked out url = path_to_url ( url ) if url . lower ( ) . startswith ( 'file:' ) : return None , url for version_control in vcs : if url . lower ( ) . startswith ( '%s:' % version_control ) : url = '%s+%s' % ( version_control , url ) if '+' not in url : if default_vcs : url = default_vcs + '+' + url else : raise InstallationError ( '--editable=%s should be formatted with svn+URL, git+URL, hg+URL or bzr+URL' % editable_req ) vc_type = url . split ( '+' , 1 ) [ 0 ] . lower ( ) if not vcs . get_backend ( vc_type ) : raise InstallationError ( 'For --editable=%s only svn (svn+URL), Git (git+URL), Mercurial (hg+URL) and Bazaar (bzr+URL) is currently supported' % editable_req ) match = re . search ( r'(?:#|#.*?&)egg=([^&]*)' , editable_req ) if ( not match or not match . group ( 1 ) ) and vcs . get_backend ( vc_type ) : parts = [ p for p in editable_req . split ( '#' , 1 ) [ 0 ] . split ( '/' ) if p ] if parts [ - 2 ] in ( 'tags' , 'branches' , 'tag' , 'branch' ) : req = parts [ - 3 ] elif parts [ - 1 ] == 'trunk' : req = parts [ - 2 ] else : raise InstallationError ( '--editable=%s is not the right format; it must have #egg=Package' % editable_req ) else : req = match . group ( 1 ) # # FIXME : use package _ to _ requirement ? match = re . search ( r'^(.*?)(?:-dev|-\d.*)$' , req ) if match : # Strip off - dev , - 0.2 , etc . req = match . group ( 1 ) return req , url
def average_hudson_fst ( ac1 , ac2 , blen ) : """Estimate average Fst between two populations and standard error using the block - jackknife . Parameters ac1 : array _ like , int , shape ( n _ variants , n _ alleles ) Allele counts array from the first population . ac2 : array _ like , int , shape ( n _ variants , n _ alleles ) Allele counts array from the second population . blen : int Block size ( number of variants ) . Returns fst : float Estimated value of the statistic using all data . se : float Estimated standard error . vb : ndarray , float , shape ( n _ blocks , ) Value of the statistic in each block . vj : ndarray , float , shape ( n _ blocks , ) Values of the statistic from block - jackknife resampling ."""
# calculate per - variant values num , den = hudson_fst ( ac1 , ac2 , fill = np . nan ) # calculate overall estimate fst = np . nansum ( num ) / np . nansum ( den ) # compute the numerator and denominator within each block num_bsum = moving_statistic ( num , statistic = np . nansum , size = blen ) den_bsum = moving_statistic ( den , statistic = np . nansum , size = blen ) # calculate the statistic values in each block vb = num_bsum / den_bsum # estimate standard error _ , se , vj = jackknife ( ( num_bsum , den_bsum ) , statistic = lambda n , d : np . sum ( n ) / np . sum ( d ) ) return fst , se , vb , vj
def from_json ( cls , data ) : """Create a data type from a dictionary . Args : data : Data as a dictionary . " name " : data type name of the data type as a string " data _ type " : the class name of the data type as a string " base _ unit " : the base unit of the data type"""
assert 'name' in data , 'Required keyword "name" is missing!' assert 'data_type' in data , 'Required keyword "data_type" is missing!' if cls . _type_enumeration is None : cls . _type_enumeration = _DataTypeEnumeration ( import_modules = False ) if data [ 'data_type' ] == 'GenericType' : assert 'base_unit' in data , 'Keyword "base_unit" is missing and is required for GenericType.' return cls . _type_enumeration . _GENERICTYPE ( data [ 'name' ] , data [ 'base_unit' ] ) elif data [ 'data_type' ] in cls . _type_enumeration . _TYPES : clss = cls . _type_enumeration . _TYPES [ data [ 'data_type' ] ] if data [ 'data_type' ] == data [ 'name' ] . title ( ) . replace ( ' ' , '' ) : return clss ( ) else : instance = clss ( ) instance . _name = data [ 'name' ] return instance else : raise ValueError ( 'Data Type {} could not be recognized' . format ( data [ 'data_type' ] ) )