signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def commit_check ( ) : '''Perform a commit check on the configuration CLI Example : . . code - block : : bash salt ' device _ name ' junos . commit _ check'''
conn = __proxy__ [ 'junos.conn' ] ( ) ret = { } ret [ 'out' ] = True try : conn . cu . commit_check ( ) ret [ 'message' ] = 'Commit check succeeded.' except Exception as exception : ret [ 'message' ] = 'Commit check failed with {0}' . format ( exception ) ret [ 'out' ] = False return ret
def _delete_objects_not_in_list ( self , cont , object_prefix = "" ) : """Finds all the objects in the specified container that are not present in the self . _ local _ files list , and deletes them ."""
objnames = set ( cont . get_object_names ( prefix = object_prefix , full_listing = True ) ) localnames = set ( self . _local_files ) to_delete = list ( objnames . difference ( localnames ) ) self . _sync_summary [ "deleted" ] += len ( to_delete ) # We don ' t need to wait around for this to complete . Store the thread # reference in case it is needed at some point . self . _thread = self . bulk_delete ( cont , to_delete , async_ = True )
def run ( self , raw_args = None ) : """Parses the given arguments ( if these are None , then argparse ' s parser defaults to parsing sys . argv ) , inits a Core instance , calls its lint method with the respective arguments , and then exits ."""
args = self . parser . parse_args ( raw_args ) core = Core ( ) try : report = core . lint ( ** vars ( args ) ) except Exception as err : self . parser . error ( str ( err ) ) print ( report ) self . parser . exit ( )
def _to_str ( uri : URIRef ) -> str : """Convert a FHIR style URI into a tag name to be used to retrieve data from a JSON representation Example : http : / / hl7 . org / fhir / Provenance . agent . whoReference - - > whoReference : param uri : URI to convert : return : tag name"""
local_name = str ( uri ) . replace ( str ( FHIR ) , '' ) return local_name . rsplit ( '.' , 1 ) [ 1 ] if '.' in local_name else local_name
def get_authors ( filepath : str ) -> List [ str ] : """Open file and check for author info ."""
str_oneline = r'(^__author__ = )(\[.*?\])' # type " str comp_oneline = re . compile ( str_oneline , re . MULTILINE ) # type : Pattern [ str ] with open ( filepath ) as file_open : file_read = file_open . read ( ) # type : str match = comp_oneline . findall ( file_read ) if match : inner_list_as_str = match [ 0 ] [ 1 ] # type : str inner_list = eval_str_to_list ( inner_list_as_str ) # type : List [ str ] return inner_list return list ( )
def list_present ( name , acl_type , acl_names = None , perms = '' , recurse = False , force = False ) : '''Ensure a Linux ACL list is present Takes a list of acl names and add them to the given path name The acl path acl _ type The type of the acl is used for it can be ' user ' or ' group ' acl _ names The list of users or groups perms Set the permissions eg . : rwx recurse Set the permissions recursive in the path force Wipe out old permissions and ensure only the new permissions are set'''
if acl_names is None : acl_names = [ ] ret = { 'name' : name , 'result' : True , 'changes' : { } , 'comment' : '' } _octal = { 'r' : 4 , 'w' : 2 , 'x' : 1 , '-' : 0 } _octal_perms = sum ( [ _octal . get ( i , i ) for i in perms ] ) if not os . path . exists ( name ) : ret [ 'comment' ] = '{0} does not exist' . format ( name ) ret [ 'result' ] = False return ret __current_perms = __salt__ [ 'acl.getfacl' ] ( name ) if acl_type . startswith ( ( 'd:' , 'default:' ) ) : _acl_type = ':' . join ( acl_type . split ( ':' ) [ 1 : ] ) _current_perms = __current_perms [ name ] . get ( 'defaults' , { } ) _default = True else : _acl_type = acl_type _current_perms = __current_perms [ name ] _default = False _origin_group = _current_perms . get ( 'comment' , { } ) . get ( 'group' , None ) _origin_owner = _current_perms . get ( 'comment' , { } ) . get ( 'owner' , None ) _current_acl_types = [ ] diff_perms = False for key in _current_perms [ acl_type ] : for current_acl_name in key . keys ( ) : _current_acl_types . append ( current_acl_name . encode ( 'utf-8' ) ) diff_perms = _octal_perms == key [ current_acl_name ] [ 'octal' ] if acl_type == 'user' : try : _current_acl_types . remove ( _origin_owner ) except ValueError : pass else : try : _current_acl_types . remove ( _origin_group ) except ValueError : pass diff_acls = set ( _current_acl_types ) ^ set ( acl_names ) if not diff_acls and diff_perms and not force : ret = { 'name' : name , 'result' : True , 'changes' : { } , 'comment' : 'Permissions and {}s are in the desired state' . format ( acl_type ) } return ret # The getfacl execution module lists default with empty names as being # applied to the user / group that owns the file , e . g . , # default : group : : rwx would be listed as default : group : root : rwx # In this case , if acl _ name is empty , we really want to search for root # but still uses ' ' for other # We search through the dictionary getfacl returns for the owner of the # file if acl _ name is empty . if acl_names == '' : _search_names = __current_perms [ name ] . get ( 'comment' ) . get ( _acl_type , '' ) else : _search_names = acl_names if _current_perms . get ( _acl_type , None ) or _default : try : users = { } for i in _current_perms [ _acl_type ] : if i and next ( six . iterkeys ( i ) ) in _search_names : users . update ( i ) except ( AttributeError , KeyError ) : users = None if users : changes = { } for count , search_name in enumerate ( _search_names ) : if search_name in users : if users [ search_name ] [ 'octal' ] == sum ( [ _octal . get ( i , i ) for i in perms ] ) : ret [ 'comment' ] = 'Permissions are in the desired state' else : changes . update ( { 'new' : { 'acl_name' : ', ' . join ( acl_names ) , 'acl_type' : acl_type , 'perms' : _octal_perms } , 'old' : { 'acl_name' : ', ' . join ( acl_names ) , 'acl_type' : acl_type , 'perms' : six . text_type ( users [ search_name ] [ 'octal' ] ) } } ) if __opts__ [ 'test' ] : ret . update ( { 'comment' : 'Updated permissions will be applied for ' '{0}: {1} -> {2}' . format ( acl_names , six . text_type ( users [ search_name ] [ 'octal' ] ) , perms ) , 'result' : None , 'changes' : changes } ) return ret try : if force : __salt__ [ 'acl.wipefacls' ] ( name , recursive = recurse , raise_err = True ) for acl_name in acl_names : __salt__ [ 'acl.modfacl' ] ( acl_type , acl_name , perms , name , recursive = recurse , raise_err = True ) ret . update ( { 'comment' : 'Updated permissions for ' '{0}' . format ( acl_names ) , 'result' : True , 'changes' : changes } ) except CommandExecutionError as exc : ret . update ( { 'comment' : 'Error updating permissions for ' '{0}: {1}' . format ( acl_names , exc . strerror ) , 'result' : False } ) else : changes = { 'new' : { 'acl_name' : ', ' . join ( acl_names ) , 'acl_type' : acl_type , 'perms' : perms } } if __opts__ [ 'test' ] : ret . update ( { 'comment' : 'New permissions will be applied for ' '{0}: {1}' . format ( acl_names , perms ) , 'result' : None , 'changes' : changes } ) ret [ 'result' ] = None return ret try : if force : __salt__ [ 'acl.wipefacls' ] ( name , recursive = recurse , raise_err = True ) for acl_name in acl_names : __salt__ [ 'acl.modfacl' ] ( acl_type , acl_name , perms , name , recursive = recurse , raise_err = True ) ret . update ( { 'comment' : 'Applied new permissions for ' '{0}' . format ( ', ' . join ( acl_names ) ) , 'result' : True , 'changes' : changes } ) except CommandExecutionError as exc : ret . update ( { 'comment' : 'Error updating permissions for {0}: ' '{1}' . format ( acl_names , exc . strerror ) , 'result' : False } ) else : changes = { 'new' : { 'acl_name' : ', ' . join ( acl_names ) , 'acl_type' : acl_type , 'perms' : perms } } if __opts__ [ 'test' ] : ret . update ( { 'comment' : 'New permissions will be applied for ' '{0}: {1}' . format ( acl_names , perms ) , 'result' : None , 'changes' : changes } ) ret [ 'result' ] = None return ret try : if force : __salt__ [ 'acl.wipefacls' ] ( name , recursive = recurse , raise_err = True ) for acl_name in acl_names : __salt__ [ 'acl.modfacl' ] ( acl_type , acl_name , perms , name , recursive = recurse , raise_err = True ) ret . update ( { 'comment' : 'Applied new permissions for ' '{0}' . format ( ', ' . join ( acl_names ) ) , 'result' : True , 'changes' : changes } ) except CommandExecutionError as exc : ret . update ( { 'comment' : 'Error updating permissions for {0}: ' '{1}' . format ( acl_names , exc . strerror ) , 'result' : False } ) else : ret [ 'comment' ] = 'ACL Type does not exist' ret [ 'result' ] = False return ret
def deprecate_module_attribute ( mod , deprecated ) : """Return a wrapped object that warns about deprecated accesses"""
deprecated = set ( deprecated ) class Wrapper ( object ) : def __getattr__ ( self , attr ) : if attr in deprecated : warnings . warn ( "Property %s is deprecated" % attr ) return getattr ( mod , attr ) def __setattr__ ( self , attr , value ) : if attr in deprecated : warnings . warn ( "Property %s is deprecated" % attr ) return setattr ( mod , attr , value ) return Wrapper ( )
def dictionary ( element_name , # type : Text children , # type : List [ Processor ] required = True , # type : bool alias = None , # type : Optional [ Text ] hooks = None # type : Optional [ Hooks ] ) : # type : ( . . . ) - > RootProcessor """Create a processor for dictionary values . : param element _ name : Name of the XML element containing the dictionary value . Can also be specified using supported XPath syntax . : param children : List of declxml processor objects for processing the children contained within the dictionary . : param required : Indicates whether the value is required when parsing and serializing . : param alias : If specified , then this is used as the name of the value when read from XML . If not specified , then the element _ name is used as the name of the value . : param hooks : A Hooks object . : return : A declxml processor object ."""
processor = _Dictionary ( element_name , children , required , alias ) return _processor_wrap_if_hooks ( processor , hooks )
def save_data ( self , trigger_id , ** data ) : """let ' s save the data : param trigger _ id : trigger ID from which to save data : param data : the data to check to be used and save : type trigger _ id : int : type data : dict : return : the status of the save statement : rtype : boolean"""
# convert the format to be released in Markdown status = False data [ 'output_format' ] = 'md' title , content = super ( ServiceReddit , self ) . save_data ( trigger_id , ** data ) if self . token : trigger = Reddit . objects . get ( trigger_id = trigger_id ) if trigger . share_link : status = self . reddit . subreddit ( trigger . subreddit ) . submit ( title = title , url = content ) else : status = self . reddit . subreddit ( trigger . subreddit ) . submit ( title = title , selftext = content ) sentence = str ( 'reddit submission {} created' ) . format ( title ) logger . debug ( sentence ) else : msg = "no token or link provided for trigger ID {} " . format ( trigger_id ) logger . critical ( msg ) update_result ( trigger_id , msg = msg , status = False ) return status
def get_bundle_list ( self , href = None , limit = None , embed_items = None , embed_tracks = None , embed_metadata = None , embed_insights = None ) : """Get a list of available bundles . ' href ' the relative href to the bundle list to retriev . If None , the first bundle list will be returned . ' limit ' the maximum number of bundles to include in the result . ' embed _ items ' whether or not to expand the bundle data into the result . ' embed _ tracks ' whether or not to expand the bundle track data into the result . ' embed _ metadata ' whether or not to expand the bundle metadata into the result . ' embed _ insights ' whether or not to expand the bundle insights into the result . NB : providing values for ' limit ' , ' embed _ * ' will override either the API default or the values in the provided href . Returns a data structure equivalent to the JSON returned by the API . If the response status is not 2xx , throws an APIException . If the JSON to python data struct conversion fails , throws an APIDataException ."""
# Argument error checking . assert limit is None or limit > 0 j = None if href is None : j = self . _get_first_bundle_list ( limit , embed_items , embed_tracks , embed_metadata , embed_insights ) else : j = self . _get_additional_bundle_list ( href , limit , embed_items , embed_tracks , embed_metadata , embed_insights ) # Convert the JSON to a python data struct . return self . _parse_json ( j )
def _compile ( self , lines ) : '''Set the correct render method ( boolean or function call ) and read variables from the current line .'''
m = self . __class__ . RE_IF . match ( lines . current ) if m is None : raise DefineBlockError ( 'Incorrect block definition at line {}, {}\nShould be ' 'something like: #if @foo:' . format ( lines . pos , lines . current ) ) args = m . group ( 3 ) self . _evaluate = m . group ( 2 ) . replace ( '.' , '-' ) self . _isbool = args is None if not self . _isbool : args = args . strip ( '() \t' ) self . _args = [ arg . strip ( '@ \t' ) . replace ( '.' , '-' ) for arg in args . split ( ',' ) ]
def get_profile ( self , name , retry = google . api_core . gapic_v1 . method . DEFAULT , timeout = google . api_core . gapic_v1 . method . DEFAULT , metadata = None , ) : """Gets the specified profile . Example : > > > from google . cloud import talent _ v4beta1 > > > client = talent _ v4beta1 . ProfileServiceClient ( ) > > > name = client . profile _ path ( ' [ PROJECT ] ' , ' [ TENANT ] ' , ' [ PROFILE ] ' ) > > > response = client . get _ profile ( name ) Args : name ( str ) : Required . Resource name of the profile to get . The format is " projects / { project \ _ id } / tenants / { tenant \ _ id } / profiles / { profile \ _ id } " , for example , " projects / api - test - project / tenants / foo / profiles / bar " . retry ( Optional [ google . api _ core . retry . Retry ] ) : A retry object used to retry requests . If ` ` None ` ` is specified , requests will not be retried . timeout ( Optional [ float ] ) : The amount of time , in seconds , to wait for the request to complete . Note that if ` ` retry ` ` is specified , the timeout applies to each individual attempt . metadata ( Optional [ Sequence [ Tuple [ str , str ] ] ] ) : Additional metadata that is provided to the method . Returns : A : class : ` ~ google . cloud . talent _ v4beta1 . types . Profile ` instance . Raises : google . api _ core . exceptions . GoogleAPICallError : If the request failed for any reason . google . api _ core . exceptions . RetryError : If the request failed due to a retryable error and retry attempts failed . ValueError : If the parameters are invalid ."""
# Wrap the transport method to add retry and timeout logic . if "get_profile" not in self . _inner_api_calls : self . _inner_api_calls [ "get_profile" ] = google . api_core . gapic_v1 . method . wrap_method ( self . transport . get_profile , default_retry = self . _method_configs [ "GetProfile" ] . retry , default_timeout = self . _method_configs [ "GetProfile" ] . timeout , client_info = self . _client_info , ) request = profile_service_pb2 . GetProfileRequest ( name = name ) return self . _inner_api_calls [ "get_profile" ] ( request , retry = retry , timeout = timeout , metadata = metadata )
def proxy_image ( self , s = 0 , c = 0 , z = 0 , t = 0 ) : """Return a : class : ` jicimagelib . image . MicroscopyImage ` instance . : param s : series : param c : channel : param z : zslice : param t : timepoint : returns : : class : ` jicimagelib . image . MicroscopyImage `"""
for proxy_image in self : if proxy_image . is_me ( s = s , c = c , z = z , t = t ) : return proxy_image
def _set_internal_value ( self , new_internal_value ) : """This is supposed to be only used by fitting engines : param new _ internal _ value : new value in internal representation : return : none"""
if new_internal_value != self . _internal_value : self . _internal_value = new_internal_value # Call callbacks if any for callback in self . _callbacks : callback ( self )
def en_last ( self ) : """Report the energies from the last SCF present in the output . Returns a | dict | providing the various energy values from the last SCF cycle performed in the output . Keys are those of : attr : ` ~ opan . output . OrcaOutput . p _ en ` . Any energy value not relevant to the parsed output is assigned as | None | . Returns last _ ens | dict | of | npfloat _ | - - Energies from the last SCF present in the output ."""
# Initialize the return dict last_ens = dict ( ) # Iterate and store for ( k , l ) in self . en . items ( ) : last_ens . update ( { k : l [ - 1 ] if l != [ ] else None } ) # # next ( k , l ) # Should be ready to return ? return last_ens
def lambda_from_file ( python_file ) : """Reads a python file and returns a awslambda . Code object : param python _ file : : return :"""
lambda_function = [ ] with open ( python_file , 'r' ) as f : lambda_function . extend ( f . read ( ) . splitlines ( ) ) return awslambda . Code ( ZipFile = ( Join ( '\n' , lambda_function ) ) )
def kill ( self ) : """Kill the browser . This is useful when the browser is stuck ."""
if self . process : self . process . kill ( ) self . process . wait ( )
def plot_correlations ( self , iabscissa = 1 ) : """spectrum of correlation matrix and largest correlation"""
if not hasattr ( self , 'corrspec' ) : self . load ( ) if len ( self . corrspec ) < 2 : return self x = self . corrspec [ : , iabscissa ] y = self . corrspec [ : , 6 : ] # principle axes ys = self . corrspec [ : , : 6 ] # " special " values from matplotlib . pyplot import semilogy , hold , text , grid , axis , title self . _enter_plotting ( ) semilogy ( x , y , '-c' ) hold ( True ) semilogy ( x [ : ] , np . max ( y , 1 ) / np . min ( y , 1 ) , '-r' ) text ( x [ - 1 ] , np . max ( y [ - 1 , : ] ) / np . min ( y [ - 1 , : ] ) , 'axis ratio' ) if ys is not None : semilogy ( x , 1 + ys [ : , 2 ] , '-b' ) text ( x [ - 1 ] , 1 + ys [ - 1 , 2 ] , '1 + min(corr)' ) semilogy ( x , 1 - ys [ : , 5 ] , '-b' ) text ( x [ - 1 ] , 1 - ys [ - 1 , 5 ] , '1 - max(corr)' ) semilogy ( x [ : ] , 1 + ys [ : , 3 ] , '-k' ) text ( x [ - 1 ] , 1 + ys [ - 1 , 3 ] , '1 + max(neg corr)' ) semilogy ( x [ : ] , 1 - ys [ : , 4 ] , '-k' ) text ( x [ - 1 ] , 1 - ys [ - 1 , 4 ] , '1 - min(pos corr)' ) grid ( True ) ax = array ( axis ( ) ) # ax [ 1 ] = max ( minxend , ax [ 1 ] ) axis ( ax ) title ( 'Spectrum (roots) of correlation matrix' ) # pyplot . xticks ( xticklocs ) self . _xlabel ( iabscissa ) self . _finalize_plotting ( ) return self
def handle_arguments ( self , string , root , opening , closing ) : """Handles phrase - arguments . Sets the override and increment flags if found . Also makes sure that the argument sequence is at the start of the phrase and else warns about the unescaped meta characters . If the arguments are indeed at the start but do not match the arguments regular expression , an error is raised . Arguments : string ( str ) : The string being parsed . root ( str ) : The current root phrase . opening ( int ) : The index of the opening paranthese . closing ( int ) : The index of the closing paranthese . Returns : The ( possibly escaped ) string , the root phrase ( if no escaping , then with arguments and flags ) and the next meta match . Raises : errors . ParseError : If the arguments are invalid ."""
# The actual argument string ( ignore whitespace ) args = string [ opening + 1 : closing ] . replace ( " " , "" ) # The argument sequence must be at the start of the phrase # and must match the allowed argument regular expression if opening > 0 or not self . arguments . match ( args ) : if opening == 0 : raise errors . ParseError ( "Invalid argument sequence!" ) # If escape _ meta does indeed escape a character and removes # a backward slash , the positions ' opening ' and ' closing ' are no # longer valid . escape _ meta does a search for the next meta # character though , which is then the closing parantheses , # so we can use its index value ( in the now escaped string ) string , meta = self . escape_meta ( string , opening ) string , meta = self . escape_meta ( string , meta . start ( ) ) return string , root , meta if "!" in args : root . override = True args = args . replace ( "!" , "" ) if "+" in args : root . increment = True args = args . replace ( "+" , "" ) root . arguments = [ int ( i ) for i in args . split ( "," ) if i ] # Remove the argument string including parantheses string = string [ closing + 1 : ] meta = self . meta . search ( string ) return string , root , meta
def pwd ( self , ** kwargs ) : """Returns the cwd Optional kwargs : node = < node > If specified , return only the directory name at depth < node > ."""
b_node = False node = 0 for key , val in kwargs . items ( ) : if key == 'node' : b_node = True node = int ( val ) str_path = self . cwd ( ) if b_node : l_path = str_path . split ( '/' ) if len ( l_path ) >= node + 1 : str_path = str_path . split ( '/' ) [ node ] return str_path
def create_file_system ( name , performance_mode = 'generalPurpose' , keyid = None , key = None , profile = None , region = None , creation_token = None , ** kwargs ) : '''Creates a new , empty file system . name ( string ) - The name for the new file system performance _ mode ( string ) - The PerformanceMode of the file system . Can be either generalPurpose or maxIO creation _ token ( string ) - A unique name to be used as reference when creating an EFS . This will ensure idempotency . Set to name if not specified otherwise returns ( dict ) - A dict of the data for the elastic file system CLI Example : . . code - block : : bash salt ' my - minion ' boto _ efs . create _ file _ system efs - name generalPurpose'''
if creation_token is None : creation_token = name tags = { "Key" : "Name" , "Value" : name } client = _get_conn ( key = key , keyid = keyid , profile = profile , region = region ) response = client . create_file_system ( CreationToken = creation_token , PerformanceMode = performance_mode ) if 'FileSystemId' in response : client . create_tags ( FileSystemId = response [ 'FileSystemId' ] , Tags = tags ) if 'Name' in response : response [ 'Name' ] = name return response
def default_get ( self , fields ) : """To get default values for the object . @ param self : The object pointer . @ param fields : List of fields for which we want default values @ return : A dictionary which of fields with values ."""
if self . _context is None : self . _context = { } res = super ( QuickRoomReservation , self ) . default_get ( fields ) if self . _context : keys = self . _context . keys ( ) if 'date' in keys : res . update ( { 'check_in' : self . _context [ 'date' ] } ) if 'room_id' in keys : roomid = self . _context [ 'room_id' ] res . update ( { 'room_id' : int ( roomid ) } ) return res
def _detect_sse3 ( self ) : "Does this compiler support SSE3 intrinsics ?"
self . _print_support_start ( 'SSE3' ) result = self . hasfunction ( '__m128 v; _mm_hadd_ps(v,v)' , include = '<pmmintrin.h>' , extra_postargs = [ '-msse3' ] ) self . _print_support_end ( 'SSE3' , result ) return result
def get_schema_type ( cls , schema ) : """Get schema type for the argument : param schema : Schema to analyze : return : COMPILED _ TYPE constant : rtype : str | None"""
schema_type = type ( schema ) # Marker if issubclass ( schema_type , markers . Marker ) : return const . COMPILED_TYPE . MARKER # Marker Type elif issubclass ( schema_type , six . class_types ) and issubclass ( schema , markers . Marker ) : return const . COMPILED_TYPE . MARKER # CompiledSchema elif isinstance ( schema , CompiledSchema ) : return const . COMPILED_TYPE . SCHEMA else : return primitive_type ( schema )
def get_cached_image ( self , width , height , zoom , parameters = None , clear = False ) : """Get ImageSurface object , if possible , cached The method checks whether the image was already rendered . This is done by comparing the passed size and parameters with those of the last image . If they are equal , the cached image is returned . Otherwise , a new ImageSurface with the specified dimensions is created and returned . : param width : The width of the image : param height : The height of the image : param zoom : The current scale / zoom factor : param parameters : The parameters used for the image : param clear : If True , the cache is emptied , thus the image won ' t be retrieved from cache : returns : The flag is True when the image is retrieved from the cache , otherwise False ; The cached image surface or a blank one with the desired size ; The zoom parameter when the image was stored : rtype : bool , ImageSurface , float"""
global MAX_ALLOWED_AREA if not parameters : parameters = { } if self . __compare_parameters ( width , height , zoom , parameters ) and not clear : return True , self . __image , self . __zoom # Restrict image surface size to prevent excessive use of memory while True : try : self . __limiting_multiplicator = 1 area = width * zoom * self . __zoom_multiplicator * height * zoom * self . __zoom_multiplicator if area > MAX_ALLOWED_AREA : self . __limiting_multiplicator = sqrt ( MAX_ALLOWED_AREA / area ) image = ImageSurface ( self . __format , int ( ceil ( width * zoom * self . multiplicator ) ) , int ( ceil ( height * zoom * self . multiplicator ) ) ) break # If we reach this point , the area was successfully allocated and we can break the loop except Error : MAX_ALLOWED_AREA *= 0.8 self . __set_cached_image ( image , width , height , zoom , parameters ) return False , self . __image , zoom
def AddPathIfNotExists ( env_dict , key , path , sep = os . pathsep ) : """This function will take ' key ' out of the dictionary ' env _ dict ' , then add the path ' path ' to that key if it is not already there . This treats the value of env _ dict [ key ] as if it has a similar format to the PATH variable . . . a list of paths separated by tokens . The ' path ' will get added to the list if it is not already there ."""
try : is_list = 1 paths = env_dict [ key ] if not is_List ( env_dict [ key ] ) : paths = paths . split ( sep ) is_list = 0 if os . path . normcase ( path ) not in list ( map ( os . path . normcase , paths ) ) : paths = [ path ] + paths if is_list : env_dict [ key ] = paths else : env_dict [ key ] = sep . join ( paths ) except KeyError : env_dict [ key ] = path
def meff_SO ( self , ** kwargs ) : '''Returns the split - off hole effective mass calculated from Eg _ Gamma ( T ) , Delta _ SO , Ep and F . Interpolation of Eg _ Gamma ( T ) , Delta _ SO , Ep and luttinger1 , and then calculation of meff _ SO is recommended for alloys .'''
Eg = self . Eg_Gamma ( ** kwargs ) Delta_SO = self . Delta_SO ( ** kwargs ) Ep = self . Ep ( ** kwargs ) luttinger1 = self . luttinger1 ( ** kwargs ) return 1. / ( luttinger1 - ( Ep * Delta_SO ) / ( 3 * Eg * ( Eg + Delta_SO ) ) )
def _retrieve_stack_host_zone_name ( awsclient , default_stack_name = None ) : """Use service discovery to get the host zone name from the default stack : return : Host zone name as string"""
global _host_zone_name if _host_zone_name is not None : return _host_zone_name env = get_env ( ) if env is None : print ( "Please set environment..." ) # TODO : why is there a sys . exit in library code used by cloudformation ! ! ! sys . exit ( ) if default_stack_name is None : # TODO why ' dp - < env > ' ? - this should not be hardcoded ! default_stack_name = 'dp-%s' % env default_stack_output = get_outputs_for_stack ( awsclient , default_stack_name ) if HOST_ZONE_NAME__STACK_OUTPUT_NAME not in default_stack_output : print ( "Please debug why default stack '{}' does not contain '{}'..." . format ( default_stack_name , HOST_ZONE_NAME__STACK_OUTPUT_NAME , ) ) # TODO : why is there a sys . exit in library code used by cloudformation ! ! ! sys . exit ( ) _host_zone_name = default_stack_output [ HOST_ZONE_NAME__STACK_OUTPUT_NAME ] + "." return _host_zone_name
def __decompressContent ( coding , pgctnt ) : '''Decompress returned HTTP content depending on the specified encoding . Currently supports identity / none , deflate , and gzip , which should cover 99 % + of the content on the internet .'''
log . trace ( "Decompressing %s byte content with compression type: %s" , len ( pgctnt ) , coding ) if coding == 'deflate' : pgctnt = zlib . decompress ( pgctnt , - zlib . MAX_WBITS ) elif coding == 'gzip' : buf = io . BytesIO ( pgctnt ) f = gzip . GzipFile ( fileobj = buf ) pgctnt = f . read ( ) elif coding == "sdch" : raise ValueError ( "SDCH compression is not currently supported" ) elif coding == "br" : raise ValueError ( "Brotli compression is not currently supported" ) elif coding == "compress" : raise ValueError ( "LZW compression is not currently supported" ) elif coding == 'identity' : pass log . trace ( "Content size after decompression: %s" , len ( pgctnt ) ) return pgctnt
def ValidateChildren ( self , problems ) : """Validate StopTimes and headways of this trip ."""
assert self . _schedule , "Trip must be in a schedule to ValidateChildren" # TODO : validate distance values in stop times ( if applicable ) self . ValidateNoDuplicateStopSequences ( problems ) stoptimes = self . GetStopTimes ( problems ) stoptimes . sort ( key = lambda x : x . stop_sequence ) self . ValidateTripStartAndEndTimes ( problems , stoptimes ) self . ValidateStopTimesSequenceHasIncreasingTimeAndDistance ( problems , stoptimes ) self . ValidateShapeDistTraveledSmallerThanMaxShapeDistance ( problems , stoptimes ) self . ValidateDistanceFromStopToShape ( problems , stoptimes ) self . ValidateFrequencies ( problems )
def __shapeIndex ( self , i = None ) : """Returns the offset in a . shp file for a shape based on information in the . shx index file ."""
shx = self . shx if not shx : return None if not self . _offsets : # File length ( 16 - bit word * 2 = bytes ) - header length shx . seek ( 24 ) shxRecordLength = ( unpack ( ">i" , shx . read ( 4 ) ) [ 0 ] * 2 ) - 100 numRecords = shxRecordLength // 8 # Jump to the first record . shx . seek ( 100 ) for r in range ( numRecords ) : # Offsets are 16 - bit words just like the file length self . _offsets . append ( unpack ( ">i" , shx . read ( 4 ) ) [ 0 ] * 2 ) shx . seek ( shx . tell ( ) + 4 ) if not i == None : return self . _offsets [ i ]
def get_maps ( ) : """Get the full dict of maps { map _ name : map _ class } ."""
maps = { } for mp in Map . all_subclasses ( ) : if mp . filename : map_name = mp . __name__ if map_name in maps : raise DuplicateMapException ( "Duplicate map found: " + map_name ) maps [ map_name ] = mp return maps
def apply_order ( self ) : '''Naively apply query orders .'''
self . _ensure_modification_is_safe ( ) if len ( self . query . orders ) > 0 : self . _iterable = Order . sorted ( self . _iterable , self . query . orders )
def validate_serializer ( serializer , _type ) : """Validates the serializer for given type . : param serializer : ( Serializer ) , the serializer to be validated . : param _ type : ( Type ) , type to be used for serializer validation ."""
if not issubclass ( serializer , _type ) : raise ValueError ( "Serializer should be an instance of {}" . format ( _type . __name__ ) )
def check_actually_paused ( services = None , ports = None ) : """Check that services listed in the services object and ports are actually closed ( not listened to ) , to verify that the unit is properly paused . @ param services : See _ extract _ services _ list _ helper @ returns status , : string for status ( None if okay ) message : string for problem for status _ set"""
state = None message = None messages = [ ] if services is not None : services = _extract_services_list_helper ( services ) services_running , services_states = _check_running_services ( services ) if any ( services_states ) : # there shouldn ' t be any running so this is a problem messages . append ( "these services running: {}" . format ( ", " . join ( _filter_tuples ( services_running , True ) ) ) ) state = "blocked" ports_open , ports_open_bools = ( _check_listening_on_services_ports ( services , True ) ) if any ( ports_open_bools ) : message_parts = { service : ", " . join ( [ str ( v ) for v in open_ports ] ) for service , open_ports in ports_open . items ( ) } message = ", " . join ( [ "{}: [{}]" . format ( s , sp ) for s , sp in message_parts . items ( ) ] ) messages . append ( "these service:ports are open: {}" . format ( message ) ) state = 'blocked' if ports is not None : ports_open , bools = _check_listening_on_ports_list ( ports ) if any ( bools ) : messages . append ( "these ports which should be closed, but are open: {}" . format ( ", " . join ( [ str ( p ) for p , v in ports_open if v ] ) ) ) state = 'blocked' if messages : message = ( "Services should be paused but {}" . format ( ", " . join ( messages ) ) ) return state , message
def from_str ( value : str ) -> ulid . ULID : """Create a new : class : ` ~ ulid . ulid . ULID ` instance from the given : class : ` ~ str ` value . : param value : Base32 encoded string : type value : : class : ` ~ str ` : return : ULID from string value : rtype : : class : ` ~ ulid . ulid . ULID ` : raises ValueError : when the value is not 26 characters or malformed"""
return ulid . ULID ( base32 . decode_ulid ( value ) )
def run_script ( self , script_id , params = None ) : """Runs a stored script . script _ id : = id of stored script . params : = up to 10 parameters required by the script . s = pi . run _ script ( sid , [ par1 , par2 ] ) s = pi . run _ script ( sid ) s = pi . run _ script ( sid , [ 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 ] )"""
# I p1 script id # I p2 0 # I p3 params * 4 ( 0-10 params ) # ( optional ) extension # I [ ] params if params is not None : ext = bytearray ( ) for p in params : ext . extend ( struct . pack ( "I" , p ) ) nump = len ( params ) extents = [ ext ] else : nump = 0 extents = [ ] res = yield from self . _pigpio_aio_command_ext ( _PI_CMD_PROCR , script_id , 0 , nump * 4 , extents ) return _u2i ( res )
def update_stack ( self , name , working_bucket , wait = False , update_only = False , disable_progress = False ) : """Update or create the CF stack managed by Zappa ."""
capabilities = [ ] template = name + '-template-' + str ( int ( time . time ( ) ) ) + '.json' with open ( template , 'wb' ) as out : out . write ( bytes ( self . cf_template . to_json ( indent = None , separators = ( ',' , ':' ) ) , "utf-8" ) ) self . upload_to_s3 ( template , working_bucket , disable_progress = disable_progress ) if self . boto_session . region_name == "us-gov-west-1" : url = 'https://s3-us-gov-west-1.amazonaws.com/{0}/{1}' . format ( working_bucket , template ) else : url = 'https://s3.amazonaws.com/{0}/{1}' . format ( working_bucket , template ) tags = [ { 'Key' : key , 'Value' : self . tags [ key ] } for key in self . tags . keys ( ) if key != 'ZappaProject' ] tags . append ( { 'Key' : 'ZappaProject' , 'Value' : name } ) update = True try : self . cf_client . describe_stacks ( StackName = name ) except botocore . client . ClientError : update = False if update_only and not update : print ( 'CloudFormation stack missing, re-deploy to enable updates' ) return if not update : self . cf_client . create_stack ( StackName = name , Capabilities = capabilities , TemplateURL = url , Tags = tags ) print ( 'Waiting for stack {0} to create (this can take a bit)..' . format ( name ) ) else : try : self . cf_client . update_stack ( StackName = name , Capabilities = capabilities , TemplateURL = url , Tags = tags ) print ( 'Waiting for stack {0} to update..' . format ( name ) ) except botocore . client . ClientError as e : if e . response [ 'Error' ] [ 'Message' ] == 'No updates are to be performed.' : wait = False else : raise if wait : total_resources = len ( self . cf_template . resources ) current_resources = 0 sr = self . cf_client . get_paginator ( 'list_stack_resources' ) progress = tqdm ( total = total_resources , unit = 'res' , disable = disable_progress ) while True : time . sleep ( 3 ) result = self . cf_client . describe_stacks ( StackName = name ) if not result [ 'Stacks' ] : continue # might need to wait a bit if result [ 'Stacks' ] [ 0 ] [ 'StackStatus' ] in [ 'CREATE_COMPLETE' , 'UPDATE_COMPLETE' ] : break # Something has gone wrong . # Is raising enough ? Should we also remove the Lambda function ? if result [ 'Stacks' ] [ 0 ] [ 'StackStatus' ] in [ 'DELETE_COMPLETE' , 'DELETE_IN_PROGRESS' , 'ROLLBACK_IN_PROGRESS' , 'UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS' , 'UPDATE_ROLLBACK_COMPLETE' ] : raise EnvironmentError ( "Stack creation failed. " "Please check your CloudFormation console. " "You may also need to `undeploy`." ) count = 0 for result in sr . paginate ( StackName = name ) : done = ( 1 for x in result [ 'StackResourceSummaries' ] if 'COMPLETE' in x [ 'ResourceStatus' ] ) count += sum ( done ) if count : # We can end up in a situation where we have more resources being created # than anticipated . if ( count - current_resources ) > 0 : progress . update ( count - current_resources ) current_resources = count progress . close ( ) try : os . remove ( template ) except OSError : pass self . remove_from_s3 ( template , working_bucket )
def cmake_setup ( ) : """attempt to build using CMake > = 3"""
cmake_exe = shutil . which ( 'cmake' ) if not cmake_exe : raise FileNotFoundError ( 'CMake not available' ) wopts = [ '-G' , 'MinGW Makefiles' , '-DCMAKE_SH="CMAKE_SH-NOTFOUND' ] if os . name == 'nt' else [ ] subprocess . check_call ( [ cmake_exe ] + wopts + [ str ( SRCDIR ) ] , cwd = BINDIR ) ret = subprocess . run ( [ cmake_exe , '--build' , str ( BINDIR ) ] , stderr = subprocess . PIPE , universal_newlines = True ) result ( ret )
def method_file_cd ( f ) : """A decorator to cd back to the original directory where this object was created ( useful for any calls to TObject . Write ) . This function can decorate methods ."""
@ wraps ( f ) def wrapper ( self , * args , ** kwargs ) : with preserve_current_directory ( ) : self . GetDirectory ( ) . cd ( ) return f ( self , * args , ** kwargs ) return wrapper
def walk_processes ( top , topname = 'top' , topdown = True , ignoreFlag = False ) : """Generator for recursive tree of climlab processes Starts walking from climlab process ` ` top ` ` and generates a complete list of all processes and sub - processes that are managed from ` ` top ` ` process . ` ` level ` ` indicades the rank of specific process in the process hierarchy : . . note : : * level 0 : ` ` top ` ` process * level 1 : sub - processes of ` ` top ` ` process * level 2 : sub - sub - processes of ` ` top ` ` process ( = subprocesses of level 1 processes ) The method is based on os . walk ( ) . : param top : top process from where walking should start : type top : : class : ` ~ climlab . process . process . Process ` : param str topname : name of top process [ default : ' top ' ] : param bool topdown : whether geneterate * process _ types * in regular or in reverse order [ default : True ] : param bool ignoreFlag : whether ` ` topdown ` ` flag should be ignored or not [ default : False ] : returns : name ( str ) , proc ( process ) , level ( int ) : Example : > > > import climlab > > > from climlab . utils import walk > > > model = climlab . EBM ( ) > > > for name , proc , top _ proc in walk . walk _ processes ( model ) : . . . print name top diffusion LW iceline cold _ albedo warm _ albedo albedo insolation"""
if not ignoreFlag : flag = topdown else : flag = True proc = top level = 0 if flag : yield topname , proc , level if len ( proc . subprocess ) > 0 : # there are sub - processes level += 1 for name , subproc in proc . subprocess . items ( ) : for name2 , subproc2 , level2 in walk_processes ( subproc , topname = name , topdown = subproc . topdown , ignoreFlag = ignoreFlag ) : yield name2 , subproc2 , level + level2 if not flag : yield topname , proc , level
def reboot ( self ) : """Requests an autopilot reboot by sending a ` ` MAV _ CMD _ PREFLIGHT _ REBOOT _ SHUTDOWN ` ` command ."""
reboot_msg = self . message_factory . command_long_encode ( 0 , 0 , # target _ system , target _ component mavutil . mavlink . MAV_CMD_PREFLIGHT_REBOOT_SHUTDOWN , # command 0 , # confirmation 1 , # param 1 , autopilot ( reboot ) 0 , # param 2 , onboard computer ( do nothing ) 0 , # param 3 , camera ( do nothing ) 0 , # param 4 , mount ( do nothing ) 0 , 0 , 0 ) # param 5 ~ 7 not used self . send_mavlink ( reboot_msg )
def show_support_save_status_output_show_support_save_status_status ( self , ** kwargs ) : """Auto Generated Code"""
config = ET . Element ( "config" ) show_support_save_status = ET . Element ( "show_support_save_status" ) config = show_support_save_status output = ET . SubElement ( show_support_save_status , "output" ) show_support_save_status = ET . SubElement ( output , "show-support-save-status" ) status = ET . SubElement ( show_support_save_status , "status" ) status . text = kwargs . pop ( 'status' ) callback = kwargs . pop ( 'callback' , self . _callback ) return callback ( config )
def send_email_message ( self , recipient , subject , html_message , text_message , sender_email , sender_name ) : """Send email message via Flask - Sendmail . Args : recipient : Email address or tuple of ( Name , Email - address ) . subject : Subject line . html _ message : The message body in HTML . text _ message : The message body in plain text ."""
if not current_app . testing : # pragma : no cover # Prepare email message from flask_sendmail import Message message = Message ( subject , recipients = [ recipient ] , html = html_message , body = text_message ) # Send email message self . mail . send ( message )
def convenience_calc_probs ( self , params ) : """Calculates the probabilities of the chosen alternative , and the long format probabilities for this model and dataset ."""
shapes , intercepts , betas = self . convenience_split_params ( params ) prob_args = [ betas , self . design , self . alt_id_vector , self . rows_to_obs , self . rows_to_alts , self . utility_transform ] prob_kwargs = { "intercept_params" : intercepts , "shape_params" : shapes , "chosen_row_to_obs" : self . chosen_row_to_obs , "return_long_probs" : True } prob_results = cc . calc_probabilities ( * prob_args , ** prob_kwargs ) return prob_results
def parsimonious_acr ( tree , character , prediction_method , states , num_nodes , num_tips ) : """Calculates parsimonious states on the tree and stores them in the corresponding feature . : param states : numpy array of possible states : param prediction _ method : str , ACCTRAN ( accelerated transformation ) , DELTRAN ( delayed transformation ) or DOWNPASS : param tree : ete3 . Tree , the tree of interest : param character : str , character for which the parsimonious states are reconstructed : return : dict , mapping between reconstruction parameters and values"""
initialise_parsimonious_states ( tree , character , states ) uppass ( tree , character ) results = [ ] result = { STATES : states , NUM_NODES : num_nodes , NUM_TIPS : num_tips } logger = logging . getLogger ( 'pastml' ) def process_result ( method , feature ) : out_feature = get_personalized_feature_name ( character , method ) if prediction_method != method else character res = result . copy ( ) res [ NUM_SCENARIOS ] , res [ NUM_UNRESOLVED_NODES ] , res [ NUM_STATES_PER_NODE ] = choose_parsimonious_states ( tree , feature , out_feature ) res [ NUM_STATES_PER_NODE ] /= num_nodes res [ PERC_UNRESOLVED ] = res [ NUM_UNRESOLVED_NODES ] * 100 / num_nodes logger . debug ( '{} node{} unresolved ({:.2f}%) for {} by {}, ' 'i.e. {:.4f} state{} per node in average.' . format ( res [ NUM_UNRESOLVED_NODES ] , 's are' if res [ NUM_UNRESOLVED_NODES ] != 1 else ' is' , res [ PERC_UNRESOLVED ] , character , method , res [ NUM_STATES_PER_NODE ] , 's' if res [ NUM_STATES_PER_NODE ] > 1 else '' ) ) res [ CHARACTER ] = out_feature res [ METHOD ] = method results . append ( res ) if prediction_method in { ACCTRAN , MP } : feature = get_personalized_feature_name ( character , PARS_STATES ) if prediction_method == MP : feature = get_personalized_feature_name ( feature , ACCTRAN ) acctran ( tree , character , feature ) result [ STEPS ] = get_num_parsimonious_steps ( tree , feature ) process_result ( ACCTRAN , feature ) bu_feature = get_personalized_feature_name ( character , BU_PARS_STATES ) for node in tree . traverse ( ) : if prediction_method == ACCTRAN : node . del_feature ( bu_feature ) node . del_feature ( feature ) if prediction_method != ACCTRAN : downpass ( tree , character , states ) feature = get_personalized_feature_name ( character , PARS_STATES ) if prediction_method == DOWNPASS : result [ STEPS ] = get_num_parsimonious_steps ( tree , feature ) if prediction_method in { DOWNPASS , MP } : process_result ( DOWNPASS , feature ) if prediction_method in { DELTRAN , MP } : deltran ( tree , character ) if prediction_method == DELTRAN : result [ STEPS ] = get_num_parsimonious_steps ( tree , feature ) process_result ( DELTRAN , feature ) for node in tree . traverse ( ) : node . del_feature ( feature ) logger . debug ( "Parsimonious reconstruction for {} requires {} state changes." . format ( character , result [ STEPS ] ) ) return results
def getParameterByType ( self , type ) : """Searchs a parameter by type and returns it ."""
result = None for parameter in self . getParameters ( ) : typeParam = parameter . getType ( ) if typeParam == type : result = parameter break return result
def annotate ( self , text , lang = None , customParams = None ) : """identify the list of entities and nonentities mentioned in the text @ param text : input text to annotate @ param lang : language of the provided document ( can be an ISO2 or ISO3 code ) . If None is provided , the language will be automatically detected @ param customParams : None or a dict with custom parameters to send to the annotation service @ returns : dict"""
params = { "lang" : lang , "text" : text } if customParams : params . update ( customParams ) return self . _er . jsonRequestAnalytics ( "/api/v1/annotate" , params )
def discover_glitter_apps ( self ) : """Find all the Glitter App configurations in the current project ."""
for app_name in settings . INSTALLED_APPS : module_name = '{app_name}.glitter_apps' . format ( app_name = app_name ) try : glitter_apps_module = import_module ( module_name ) if hasattr ( glitter_apps_module , 'apps' ) : self . glitter_apps . update ( glitter_apps_module . apps ) except ImportError : pass self . discovered = True
def predict ( self , x_test ) : """Returns the prediction of the model on the given test data . Args : x _ test : array - like , shape = ( n _ samples , sent _ length ) Test samples . Returns : y _ pred : array - like , shape = ( n _ smaples , sent _ length ) Prediction labels for x ."""
if self . model : lengths = map ( len , x_test ) x_test = self . p . transform ( x_test ) y_pred = self . model . predict ( x_test ) y_pred = self . p . inverse_transform ( y_pred , lengths ) return y_pred else : raise OSError ( 'Could not find a model. Call load(dir_path).' )
def debug ( self , message , extra = { } ) : '''Writes an error message to the log @ param message : The message to write @ param extra : The extras object to pass in'''
if self . level_dict [ 'DEBUG' ] >= self . level_dict [ self . log_level ] : extras = self . add_extras ( extra , "DEBUG" ) self . _write_message ( message , extras ) self . fire_callbacks ( 'DEBUG' , message , extra )
def get_obj_doc0 ( obj , alt = "(no doc)" ) : """Returns first line of cls . _ _ doc _ _ , or alternative text"""
ret = obj . __doc__ . strip ( ) . split ( "\n" ) [ 0 ] if obj . __doc__ is not None else alt return ret
def _preprocess_data ( self , data ) : """Converts a data array to the preferred 3D structure . Parameters data : : obj : ` numpy . ndarray ` The data to process . Returns : obj : ` numpy . ndarray ` The data re - formatted ( if needed ) as a 3D matrix Raises ValueError If the data is not 1 , 2 , or 3D to begin with ."""
original_type = data . dtype if len ( data . shape ) == 1 : data = data [ : , np . newaxis , np . newaxis ] elif len ( data . shape ) == 2 : data = data [ : , : , np . newaxis ] elif len ( data . shape ) == 0 or len ( data . shape ) > 3 : raise ValueError ( 'Illegal data array passed to image. Must be 1, 2, or 3 dimensional numpy array' ) return data . astype ( original_type )
def _to_dict ( self ) : """Return a json dictionary representing this model ."""
_dict = { } if hasattr ( self , 'system' ) and self . system is not None : _dict [ 'system' ] = self . system . _to_dict ( ) return _dict
def bootstrap_c_source ( scheduler_bindings_path , output_dir , module_name = NATIVE_ENGINE_MODULE ) : """Bootstrap an external CFFI C source file ."""
safe_mkdir ( output_dir ) with temporary_dir ( ) as tempdir : temp_output_prefix = os . path . join ( tempdir , module_name ) real_output_prefix = os . path . join ( output_dir , module_name ) temp_c_file = '{}.c' . format ( temp_output_prefix ) if PY2 : temp_c_file = temp_c_file . encode ( 'utf-8' ) c_file = '{}.c' . format ( real_output_prefix ) env_script = '{}.cflags' . format ( real_output_prefix ) # Preprocessor directives won ' t parse in the . cdef calls , so we have to hide them for now . scheduler_bindings_content = read_file ( scheduler_bindings_path ) scheduler_bindings = _hackily_rewrite_scheduler_bindings ( scheduler_bindings_content ) ffibuilder = cffi . FFI ( ) ffibuilder . cdef ( scheduler_bindings ) ffibuilder . cdef ( _FFISpecification . format_cffi_externs ( ) ) ffibuilder . set_source ( module_name , scheduler_bindings ) ffibuilder . emit_c_code ( temp_c_file ) # Work around https : / / github . com / rust - lang / rust / issues / 36342 by renaming initnative _ engine to # wrapped _ initnative _ engine so that the rust code can define the symbol initnative _ engine . # If we dont do this , we end up at the mercy of the implementation details of rust ' s stripping # and LTO . In the past we have found ways to trick it into not stripping symbols which was handy # ( it kept the binary working ) but inconvenient ( it was relying on unspecified behavior , it meant # our binaries couldn ' t be stripped which inflated them by 2 ~ 3x , and it reduced the amount of LTO # we could use , which led to unmeasured performance hits ) . # We additionally remove the ifdefs that apply conditional ` init ` logic for Py2 vs Py3 , in order # to define a module that is loadable by either 2 or 3. # TODO : Because PyPy uses the same ` init ` function name regardless of the python version , this # trick does not work there : we leave its conditional in place . file_content = read_file ( temp_c_file ) if CFFI_C_PATCH_BEFORE not in file_content : raise Exception ( 'The patch for the CFFI generated code will not apply cleanly.' ) file_content = file_content . replace ( CFFI_C_PATCH_BEFORE , CFFI_C_PATCH_AFTER ) # Extract the preprocessor directives we had to hide to get the . cdef call to parse . file_content = _hackily_recreate_includes_for_bindings ( file_content ) _replace_file ( c_file , file_content ) # Write a shell script to be sourced at build time that contains inherited CFLAGS . _replace_file ( env_script , get_build_cflags ( ) )
def getClientSSLContext ( self ) : '''Returns an ssl . SSLContext appropriate for initiating a TLS session'''
sslctx = ssl . create_default_context ( ssl . Purpose . SERVER_AUTH ) self . _loadCasIntoSSLContext ( sslctx ) return sslctx
def user_activity_stats_by_date ( self , username , date , grouped = None ) : """Retrieve activity information about a specific user on the specified date . Params : username ( string ) : filters the username of the user whose activity you are interested in . date ( string ) : filters by the date of interest , best provided in ISO format : YYYY - MM - DD grouped ( boolean ) : filters whether or not to group the commits Returns : list : A list of activities done by a given user on some particular date for all the projects for given Pagure instance ."""
request_url = "{}/api/0/user/{}/activity/{}" . format ( self . instance , username , date ) payload = { } if username is not None : payload [ 'username' ] = username if date is not None : payload [ 'date' ] = date if grouped is not None : payload [ 'grouped' ] = grouped return_value = self . _call_api ( request_url , params = payload ) return return_value [ 'activities' ]
def calcAcceptanceRatio ( self , V , W ) : """Given a order vector V and a proposed order vector W , calculate the acceptance ratio for changing to W when using MCMC . ivar : dict < int , < dict , < int , int > > > wmg : A two - dimensional dictionary that associates integer representations of each pair of candidates , cand1 and cand2 , with the number of times cand1 is ranked above cand2 minus the number of times cand2 is ranked above cand1 . The dictionary represents a weighted majority graph for an election . : ivar float phi : A value for phi such that 0 < = phi < = 1. : ivar list < int > V : Contains integer representations of each candidate in order of their ranking in a vote , from first to last . This is the current sample . : ivar list < int > W : Contains integer representations of each candidate in order of their ranking in a vote , from first to last . This is the proposed sample ."""
acceptanceRatio = 1.0 for comb in itertools . combinations ( V , 2 ) : # Check if comb [ 0 ] is ranked before comb [ 1 ] in V and W vIOverJ = 1 wIOverJ = 1 if V . index ( comb [ 0 ] ) > V . index ( comb [ 1 ] ) : vIOverJ = 0 if W . index ( comb [ 0 ] ) > W . index ( comb [ 1 ] ) : wIOverJ = 0 acceptanceRatio = acceptanceRatio * self . phi ** ( self . wmg [ comb [ 0 ] ] [ comb [ 1 ] ] * ( vIOverJ - wIOverJ ) ) return acceptanceRatio
def as_rest_table ( data , full = False ) : """Originally from ActiveState recipes , copy / pasted from GitHub where it is listed with an MIT license . https : / / github . com / ActiveState / code / tree / master / recipes / Python / 579054 _ Generate _ Sphinx _ table"""
data = data if data else [ [ "No Data" ] ] table = [ ] # max size of each column sizes = list ( map ( max , zip ( * [ [ len ( str ( elt ) ) for elt in member ] for member in data ] ) ) ) num_elts = len ( sizes ) if full : start_of_line = "| " vertical_separator = " | " end_of_line = " |" line_marker = "-" else : start_of_line = "" vertical_separator = " " end_of_line = "" line_marker = "=" meta_template = vertical_separator . join ( [ "{{{{{0}:{{{0}}}}}}}" . format ( i ) for i in range ( num_elts ) ] ) template = "{0}{1}{2}" . format ( start_of_line , meta_template . format ( * sizes ) , end_of_line ) # determine top / bottom borders if full : to_separator = maketrans ( "| " , "+-" ) else : to_separator = maketrans ( "|" , "+" ) start_of_line = start_of_line . translate ( to_separator ) vertical_separator = vertical_separator . translate ( to_separator ) end_of_line = end_of_line . translate ( to_separator ) separator = "{0}{1}{2}" . format ( start_of_line , vertical_separator . join ( [ x * line_marker for x in sizes ] ) , end_of_line , ) # determine header separator th_separator_tr = maketrans ( "-" , "=" ) start_of_line = start_of_line . translate ( th_separator_tr ) line_marker = line_marker . translate ( th_separator_tr ) vertical_separator = vertical_separator . translate ( th_separator_tr ) end_of_line = end_of_line . translate ( th_separator_tr ) th_separator = "{0}{1}{2}" . format ( start_of_line , vertical_separator . join ( [ x * line_marker for x in sizes ] ) , end_of_line , ) # prepare result table . append ( separator ) # set table header titles = data [ 0 ] table . append ( template . format ( * titles ) ) table . append ( th_separator ) for d in data [ 1 : - 1 ] : table . append ( template . format ( * d ) ) if full : table . append ( separator ) table . append ( template . format ( * data [ - 1 ] ) ) table . append ( separator ) return "\n" . join ( table )
def _get_derivative ( self , C , sa1180 , vs30 ) : """Returns equation 30 page 1047"""
derAmp = np . zeros_like ( vs30 ) n = self . CONSTS [ 'n' ] c = C [ 'c' ] b = C [ 'b' ] idx = vs30 < C [ 'vlin' ] derAmp [ idx ] = ( b * sa1180 [ idx ] * ( - 1. / ( sa1180 [ idx ] + c ) + 1. / ( sa1180 [ idx ] + c * ( vs30 [ idx ] / C [ 'vlin' ] ) ** n ) ) ) return derAmp
def _match_dbname ( self , dbname ) : """Map a database name to the Cluster that holds the database . Args : dbname : A database name . Returns : A dict containing the information about the Cluster that holds the database ."""
for config in self . _clusters : if re . match ( config [ 'pattern' ] , dbname ) : return config raise Exception ( 'No such database %s.' % dbname )
def query_int_attribute ( self , target , display_mask , attr ) : """Return the value of an integer attribute"""
reply = NVCtrlQueryAttributeReplyRequest ( display = self . display , opcode = self . display . get_extension_major ( extname ) , target_id = target . id ( ) , target_type = target . type ( ) , display_mask = display_mask , attr = attr ) if not reply . _data . get ( 'flags' ) : return None return int ( reply . _data . get ( 'value' ) )
def instruction_PAGE ( self , opcode ) : """call op from page 2 or 3"""
op_address , opcode2 = self . read_pc_byte ( ) paged_opcode = opcode * 256 + opcode2 # log . debug ( " $ % x * * * call paged opcode $ % x " % ( # self . program _ counter , paged _ opcode self . call_instruction_func ( op_address - 1 , paged_opcode )
def step ( self ) : """Run the next child task and wait for completion ( no timeout ) ."""
if self . index >= len ( self . tasklist ) : raise TaskError ( "step(): sequential compound task %s finished" % self ) self . check_state ( ) # Select next task from the set and advance the index self . task = self . tasklist [ self . index ] self . index += 1 return self . runTask ( self . task )
def index_humansorted ( seq , key = None , reverse = False , alg = ns . DEFAULT ) : """This is a wrapper around ` ` index _ natsorted ( seq , alg = ns . LOCALE ) ` ` . Parameters seq : iterable The input to sort . key : callable , optional A key used to determine how to sort each element of the sequence . It is * * not * * applied recursively . It should accept a single argument and return a single value . reverse : { { True , False } } , optional Return the list in reversed sorted order . The default is ` False ` . alg : ns enum , optional This option is used to control which algorithm ` natsort ` uses when sorting . For details into these options , please see the : class : ` ns ` class documentation . The default is ` ns . LOCALE ` . Returns out : tuple The ordered indexes of the input . See Also humansorted order _ by _ index Notes Please read : ref : ` locale _ issues ` before using ` humansorted ` . Examples Use ` index _ humansorted ` just like the builtin ` sorted ` : : > > > a = [ ' Apple ' , ' Banana ' , ' apple ' , ' banana ' ] > > > index _ humansorted ( a ) [2 , 0 , 3 , 1]"""
return index_natsorted ( seq , key , reverse , alg | ns . LOCALE )
def latencies ( self ) : """List [ Tuple [ : class : ` int ` , : class : ` float ` ] ] : A list of latencies between a HEARTBEAT and a HEARTBEAT _ ACK in seconds . This returns a list of tuples with elements ` ` ( shard _ id , latency ) ` ` ."""
return [ ( shard_id , shard . ws . latency ) for shard_id , shard in self . shards . items ( ) ]
def _begin_stream ( self , command : Command ) : '''Start data stream transfer .'''
begin_reply = yield from self . _commander . begin_stream ( command ) self . _response . reply = begin_reply self . event_dispatcher . notify ( self . Event . begin_transfer , self . _response )
def shut_down ( ) : """Closes connection and restores terminal"""
curses . nocbreak ( ) curses . echo ( ) curses . endwin ( ) gpsd_socket . close ( ) print ( 'Keyboard interrupt received\nTerminated by user\nGood Bye.\n' ) sys . exit ( 1 )
def print_page_cb ( self , print_op , print_context , keep_refs = { } ) : """Called for printing operation by Gtk"""
ORIENTATION_PORTRAIT = 0 ORIENTATION_LANDSCAPE = 1 scaling = 2.0 img = self . img ( width , height ) = img . size # take care of rotating the image if required if print_context . get_width ( ) <= print_context . get_height ( ) : print_orientation = ORIENTATION_PORTRAIT else : print_orientation = ORIENTATION_LANDSCAPE if width <= height : img_orientation = ORIENTATION_PORTRAIT else : img_orientation = ORIENTATION_LANDSCAPE if print_orientation != img_orientation : logger . info ( "Rotating the page ..." ) img = img . rotate ( 90 , expand = True ) ( width , height ) = img . size # scale the image down # XXX ( Jflesch ) : beware that we get floats for the page size . . . scaling = min ( print_context . get_width ( ) / width , print_context . get_height ( ) / height ) logger . info ( "DPI: %fx%f" % ( print_context . get_dpi_x ( ) , print_context . get_dpi_y ( ) ) ) surface = image2surface ( img ) keep_refs [ 'surface_cache_' + str ( self . page_nb ) ] = surface # . . and print ! cairo_context = print_context . get_cairo_context ( ) cairo_context . scale ( scaling , scaling ) cairo_context . set_source_surface ( surface , 0 , 0 ) cairo_context . paint ( )
def index_of_nearest ( p , hot_points , distance_f = distance ) : """Given a point and a set of hot points it found the hot point nearest to the given point . An arbitrary distance function can be specified : return the index of the nearest hot points , or None if the list of hot points is empty"""
min_dist = None nearest_hp_i = None for i , hp in enumerate ( hot_points ) : dist = distance_f ( p , hp ) if min_dist is None or dist < min_dist : min_dist = dist nearest_hp_i = i return nearest_hp_i
def displayStatusMessage ( self , msgObj ) : """Display the last status message and partially completed key sequences . | Args | * ` ` msgObj ` ` ( * * QtmacsMessage * * ) : the data supplied by the hook . | Returns | * * * None * * | Raises | * * * None * *"""
# Ensure the message ends with a newline character . msg = msgObj . data if not msg . endswith ( '\n' ) : msg = msg + '\n' # Display the message in the status field . self . qteLabel . setText ( msg )
def clip_datetime ( dt , tz = DEFAULT_TZ , is_dst = None ) : """Limit a datetime to a valid range for datetime , datetime64 , and Timestamp objects > > > from datetime import timedelta > > > clip _ datetime ( MAX _ DATETIME + timedelta ( 100 ) ) = = pd . Timestamp ( MAX _ DATETIME , tz = ' utc ' ) = = MAX _ TIMESTAMP True > > > MAX _ TIMESTAMP Timestamp ( ' 2262-04-11 23:47:16.854775 + 0000 ' , tz = ' UTC ' )"""
if isinstance ( dt , datetime . datetime ) : # TODO : this gives up a day of datetime range due to assumptions about timezone # make MIN / MAX naive and replace dt . replace ( tz = None ) before comparison # set it back when done dt = make_tz_aware ( dt , tz = tz , is_dst = is_dst ) try : return pd . Timestamp ( dt ) except ( ValueError , AttributeError ) : pass if dt > MAX_DATETIME : return MAX_TIMESTAMP elif dt < MIN_DATETIME : return MIN_TIMESTAMP return NAT return dt
def register_segment_dcnm ( self , cfg , seg_id_min , seg_id_max ) : """Register segmentation id pool with DCNM ."""
orch_id = cfg . dcnm . orchestrator_id try : segid_range = self . dcnm_client . get_segmentid_range ( orch_id ) if segid_range is None : self . dcnm_client . set_segmentid_range ( orch_id , seg_id_min , seg_id_max ) else : conf_min , _ , conf_max = segid_range [ "segmentIdRanges" ] . partition ( "-" ) if int ( conf_min ) != seg_id_min or int ( conf_max ) != seg_id_max : self . dcnm_client . update_segmentid_range ( orch_id , seg_id_min , seg_id_max ) except dexc . DfaClientRequestFailed as exc : LOG . error ( "Segment ID range could not be created/updated" " on DCNM: %s" , exc ) raise SystemExit ( exc )
def read_targets ( targets ) : """Reads generic key - value pairs from input files"""
results = { } for target , regexer in regexer_for_targets ( targets ) : with open ( target ) as fh : results . update ( extract_keypairs ( fh . readlines ( ) , regexer ) ) return results
def resolve ( self , key ) : """Resolve a key to a factory . Attempts to resolve explicit bindings and entry points , preferring explicit bindings . : raises NotBoundError : if the key cannot be resolved"""
try : return self . _resolve_from_binding ( key ) except NotBoundError : return self . _resolve_from_entry_point ( key )
def get_server_capabilities ( self ) : """Get hardware properties which can be used for scheduling : return : a dictionary of server capabilities . : raises : IloError , on an error from iLO . : raises : IloCommandNotSupportedError , if the command is not supported on the server ."""
capabilities = self . _call_method ( 'get_server_capabilities' ) # TODO ( nisha ) : Assumption is that Redfish always see the pci _ device # member name field populated similarly to IPMI . # If redfish is not able to get nic _ capacity , we can fall back to # IPMI way of retrieving nic _ capacity in the future . As of now # the IPMI is not tested on Gen10 , hence assuming that # Redfish will always be able to give the data . if ( 'Gen10' not in self . model ) : major_minor = ( self . _call_method ( 'get_ilo_firmware_version_as_major_minor' ) ) # NOTE ( vmud213 ) : Even if it is None , pass it on to get _ nic _ capacity # as we still want to try getting nic capacity through ipmitool # irrespective of what firmware we are using . nic_capacity = ipmi . get_nic_capacity ( self . ipmi_host_info , major_minor ) if nic_capacity : capabilities . update ( { 'nic_capacity' : nic_capacity } ) if capabilities : return capabilities
def qteImportModule ( self , fileName : str ) : """Import ` ` fileName ` ` at run - time . If ` ` fileName ` ` has no path prefix then it must be in the standard Python module path . Relative path names are possible . | Args | * ` ` fileName ` ` ( * * str * * ) : file name ( with full path ) of module to import . | Returns | * * * module * * : the imported Python module , or * * None * * if an error occurred . | Raises | * * * None * *"""
# Split the absolute file name into the path - and file name . path , name = os . path . split ( fileName ) name , ext = os . path . splitext ( name ) # If the file name has a path prefix then search there , otherwise # search the default paths for Python . if path == '' : path = sys . path else : path = [ path ] # Try to locate the module . try : fp , pathname , desc = imp . find_module ( name , path ) except ImportError : msg = 'Could not find module <b>{}</b>.' . format ( fileName ) self . qteLogger . error ( msg ) return None # Try to import the module . try : mod = imp . load_module ( name , fp , pathname , desc ) return mod except ImportError : msg = 'Could not import module <b>{}</b>.' . format ( fileName ) self . qteLogger . error ( msg ) return None finally : # According to the imp documentation the file pointer # should always be closed explicitly . if fp : fp . close ( )
def AFF4Path ( self , client_urn ) : """Returns the AFF4 URN this pathspec will be stored under . Args : client _ urn : A ClientURN . Returns : A urn that corresponds to this pathspec . Raises : ValueError : If pathspec is not of the correct type ."""
# If the first level is OS and the second level is TSK its probably a mount # point resolution . We map it into the tsk branch . For example if we get : # path : \ \ \ \ . \ \ Volume { 1234 } \ \ # pathtype : OS # mount _ point : / c : / # nested _ path { # path : / windows / # pathtype : TSK # We map this to aff4 : / / client _ id / fs / tsk / \ \ \ \ . \ \ Volume { 1234 } \ \ / windows / if not self . HasField ( "pathtype" ) : raise ValueError ( "Can't determine AFF4 path without a valid pathtype." ) first_component = self [ 0 ] dev = first_component . path if first_component . HasField ( "offset" ) : # We divide here just to get prettier numbers in the GUI dev += ":{}" . format ( first_component . offset // 512 ) if ( len ( self ) > 1 and first_component . pathtype == PathSpec . PathType . OS and self [ 1 ] . pathtype == PathSpec . PathType . TSK ) : result = [ self . AFF4_PREFIXES [ PathSpec . PathType . TSK ] , dev ] # Skip the top level pathspec . start = 1 else : # For now just map the top level prefix based on the first pathtype result = [ self . AFF4_PREFIXES [ first_component . pathtype ] ] start = 0 for p in self [ start ] : component = p . path # The following encode different pathspec properties into the AFF4 path in # such a way that unique files on the client are mapped to unique URNs in # the AFF4 space . Note that this transformation does not need to be # reversible since we always use the PathSpec when accessing files on the # client . if p . HasField ( "offset" ) : component += ":{}" . format ( p . offset // 512 ) # Support ADS names . if p . HasField ( "stream_name" ) : component += ":" + p . stream_name result . append ( component ) return client_urn . Add ( "/" . join ( result ) )
def add_menu_items_for_pages ( self , pagequeryset = None , allow_subnav = True ) : """Add menu items to this menu , linking to each page in ` pagequeryset ` ( which should be a PageQuerySet instance )"""
item_manager = self . get_menu_items_manager ( ) item_class = item_manager . model item_list = [ ] i = item_manager . count ( ) for p in pagequeryset . all ( ) : item_list . append ( item_class ( menu = self , link_page = p , sort_order = i , allow_subnav = allow_subnav ) ) i += 1 item_manager . bulk_create ( item_list )
def extract ( self , file_obj , extractOnly = True , handler = 'update/extract' , ** kwargs ) : """POSTs a file to the Solr ExtractingRequestHandler so rich content can be processed using Apache Tika . See the Solr wiki for details : http : / / wiki . apache . org / solr / ExtractingRequestHandler The ExtractingRequestHandler has a very simple model : it extracts contents and metadata from the uploaded file and inserts it directly into the index . This is rarely useful as it allows no way to store additional data or otherwise customize the record . Instead , by default we ' ll use the extract - only mode to extract the data without indexing it so the caller has the opportunity to process it as appropriate ; call with ` ` extractOnly = False ` ` if you want to insert with no additional processing . Returns None if metadata cannot be extracted ; otherwise returns a dictionary containing at least two keys : : contents : Extracted full - text content , if applicable : metadata : key : value pairs of text strings"""
if not hasattr ( file_obj , "name" ) : raise ValueError ( "extract() requires file-like objects which have a defined name property" ) params = { "extractOnly" : "true" if extractOnly else "false" , "lowernames" : "true" , "wt" : "json" , } params . update ( kwargs ) filename = quote ( file_obj . name . encode ( 'utf-8' ) ) try : # We ' ll provide the file using its true name as Tika may use that # as a file type hint : resp = self . _send_request ( 'post' , handler , body = params , files = { 'file' : ( filename , file_obj ) } ) except ( IOError , SolrError ) : self . log . exception ( "Failed to extract document metadata" ) raise try : data = json . loads ( resp ) except ValueError : self . log . exception ( "Failed to load JSON response" ) raise data [ 'contents' ] = data . pop ( filename , None ) data [ 'metadata' ] = metadata = { } raw_metadata = data . pop ( "%s_metadata" % filename , None ) if raw_metadata : # The raw format is somewhat annoying : it ' s a flat list of # alternating keys and value lists while raw_metadata : metadata [ raw_metadata . pop ( ) ] = raw_metadata . pop ( ) return data
def add_results ( self , * rvs , ** kwargs ) : """Changes the state to reflect the mutation which yielded the given result . In order to use the result , the ` fetch _ mutation _ tokens ` option must have been specified in the connection string , _ and _ the result must have been successful . : param rvs : One or more : class : ` ~ . OperationResult ` which have been returned from mutations : param quiet : Suppress errors if one of the results does not contain a convertible state . : return : ` True ` if the result was valid and added , ` False ` if not added ( and ` quiet ` was specified : raise : : exc : ` ~ . MissingTokenError ` if ` result ` does not contain a valid token"""
if not rvs : raise MissingTokenError . pyexc ( message = 'No results passed' ) for rv in rvs : mi = rv . _mutinfo if not mi : if kwargs . get ( 'quiet' ) : return False raise MissingTokenError . pyexc ( message = 'Result does not contain token' ) self . _add_scanvec ( mi ) return True
def get_members_of_group ( self , gname ) : """Get all members of a group which name is given in parameter : param gname : name of the group : type gname : str : return : list of contacts in the group : rtype : list [ alignak . objects . contact . Contact ]"""
contactgroup = self . find_by_name ( gname ) if contactgroup : return contactgroup . get_contacts ( ) return [ ]
def create ( cls , name , md5_password = None , connect_retry = 120 , session_hold_timer = 180 , session_keep_alive = 60 ) : """Create a new BGP Connection Profile . : param str name : name of profile : param str md5 _ password : optional md5 password : param int connect _ retry : The connect retry timer , in seconds : param int session _ hold _ timer : The session hold timer , in seconds : param int session _ keep _ alive : The session keep alive timer , in seconds : raises CreateElementFailed : failed creating profile : return : instance with meta : rtype : BGPConnectionProfile"""
json = { 'name' : name , 'connect' : connect_retry , 'session_hold_timer' : session_hold_timer , 'session_keep_alive' : session_keep_alive } if md5_password : json . update ( md5_password = md5_password ) return ElementCreator ( cls , json )
def get_report_rst ( self ) : """formats the project into a report in RST format"""
res = '' res += '-----------------------------------\n' res += self . nme + '\n' res += '-----------------------------------\n\n' res += self . desc + '\n' res += self . fldr + '\n\n' res += '.. contents:: \n\n\n' res += 'Overview\n' + '===========================================\n\n' res += 'This document contains details on the project ' + self . nme + '\n\n' for d in self . details : res += ' - ' + d [ 0 ] + ' = ' + d [ 1 ] + '\n\n' res += '\nTABLES\n' + '===========================================\n\n' for t in self . datatables : res += t . name + '\n' res += '-------------------------\n\n' res += t . format_rst ( ) + '\n\n' return res
def grid ( self , ** kw ) : """Position a widget in the parent widget in a grid . : param column : use cell identified with given column ( starting with 0) : type column : int : param columnspan : this widget will span several columns : type columnspan : int : param in \ _ : widget to use as container : type in \ _ : widget : param ipadx : add internal padding in x direction : type ipadx : int : param ipady : add internal padding in y direction : type ipady : int : param padx : add padding in x direction : type padx : int : param pady : add padding in y irection : type pady : int : param row : use cell identified with given row ( starting with 0) : type row : int : param rowspan : this widget will span several rows : type rowspan : int : param sticky : " n " , " s " , " e " , " w " or combinations : if cell is larger on which sides will this widget stick to the cell boundary : type sticky : str"""
ttk . Scrollbar . grid ( self , ** kw ) self . _layout = 'grid'
def compare ( molecules , ensemble_lookup , options ) : """compare stuff : param molecules : : param ensemble _ lookup : : param options : : return :"""
print ( " Analyzing differences ... " ) print ( '' ) sort_order = classification . get_sort_order ( molecules ) ensemble1 = sorted ( ensemble_lookup . keys ( ) ) [ 0 ] ensemble2 = sorted ( ensemble_lookup . keys ( ) ) [ 1 ] stats = { } stats [ 'header' ] = [ ' ' ] name = os . path . basename ( ensemble1 ) . replace ( '.csv' , '' ) stats [ 'header' ] . append ( name ) name = os . path . basename ( ensemble2 ) . replace ( '.csv' , '' ) stats [ 'header' ] . append ( name ) stats [ 'header' ] . append ( 'Difference' ) stats [ 'header' ] . append ( '95% CI' ) stats [ 'header' ] . append ( 'p-value' ) molecules1 = copy . deepcopy ( molecules ) molecules2 = copy . deepcopy ( molecules ) score_structure1 = classification . make_score_structure ( molecules1 , ensemble_lookup [ ensemble1 ] ) score_structure2 = classification . make_score_structure ( molecules2 , ensemble_lookup [ ensemble2 ] ) auc_structure_1 = classification . make_auc_structure ( score_structure1 ) auc_structure_2 = classification . make_auc_structure ( score_structure2 ) # calculate auc value differences auc_diff = classification . calculate_auc_diff ( auc_structure_1 , auc_structure_2 , sort_order ) stats [ 'AUC' ] = auc_diff # calculate enrichment factor differences fpfList = make_fpfList ( options ) for fpf in fpfList : fpf = float ( fpf ) ef_structure1 = classification . make_ef_structure ( score_structure1 , fpf , sort_order ) ef_structure2 = classification . make_ef_structure ( score_structure2 , fpf , sort_order ) if ef_structure1 and ef_structure2 : ef_diff = classification . calculate_ef_diff ( ef_structure1 , ef_structure2 , fpf ) title = 'E%s' % fpf stats [ title ] = ef_diff # write results summary output . write_diff_summary ( stats , options ) # write roc curves if options . write_roc : print ( " Writing ROC data ... " ) print ( '' ) output . write_roc ( auc_structure_1 , ensemble1 , options ) output . write_roc ( auc_structure_2 , ensemble2 , options ) # plot if options . plot : print ( " Making plots ... " ) print ( '' ) plotter ( molecules , ensemble_lookup , options )
def _encrypt_assertion ( self , encrypt_cert , sp_entity_id , response , node_xpath = None ) : """Encryption of assertions . : param encrypt _ cert : Certificate to be used for encryption . : param sp _ entity _ id : Entity ID for the calling service provider . : param response : A samlp . Response : param node _ xpath : Unquie path to the element to be encrypted . : return : A new samlp . Resonse with the designated assertion encrypted ."""
_certs = [ ] if encrypt_cert : _certs . append ( encrypt_cert ) elif sp_entity_id is not None : _certs = self . metadata . certs ( sp_entity_id , "any" , "encryption" ) exception = None for _cert in _certs : try : begin_cert = "-----BEGIN CERTIFICATE-----\n" end_cert = "\n-----END CERTIFICATE-----\n" if begin_cert not in _cert : _cert = "%s%s" % ( begin_cert , _cert ) if end_cert not in _cert : _cert = "%s%s" % ( _cert , end_cert ) _ , cert_file = make_temp ( _cert . encode ( 'ascii' ) , decode = False ) response = self . sec . encrypt_assertion ( response , cert_file , pre_encryption_part ( ) , node_xpath = node_xpath ) return response except Exception as ex : exception = ex pass if exception : raise exception return response
def _merge_similar ( loci , loci_similarity ) : """Internal function to reduce loci complexity : param loci : class cluster : param locilen _ sorted : list of loci sorted by size : return c : updated class cluster"""
n_cluster = 0 internal_cluster = { } clus_seen = { } loci_sorted = sorted ( loci_similarity . iteritems ( ) , key = operator . itemgetter ( 1 ) , reverse = True ) for pairs , sim in loci_sorted : common = sim > parameters . similar n_cluster += 1 logger . debug ( "_merge_similar:try new cluster %s" % n_cluster ) new_c = cluster ( n_cluster ) p_seen , p_unseen = [ ] , [ ] size = min ( len ( _get_seqs ( loci [ pairs [ 0 ] ] ) ) , len ( _get_seqs ( loci [ pairs [ 1 ] ] ) ) ) if common : consistent = _is_consistent ( pairs , common , clus_seen , loci_similarity ) logger . debug ( "_merge_similar: clusters seen: %s" % clus_seen ) logger . debug ( "_merge_similar: id %s common %s|%s total %s consistent %s" % ( pairs , sim , common , size , consistent ) ) if not consistent : continue if pairs [ 0 ] in clus_seen : p_seen . append ( pairs [ 0 ] ) p_unseen . append ( pairs [ 1 ] ) if pairs [ 1 ] in clus_seen : p_seen . append ( pairs [ 1 ] ) p_unseen . append ( pairs [ 0 ] ) if len ( p_seen ) == 0 : new_c = _merge_cluster ( loci [ pairs [ 0 ] ] , new_c ) new_c = _merge_cluster ( loci [ pairs [ 1 ] ] , new_c ) [ clus_seen . update ( { p : n_cluster } ) for p in pairs ] internal_cluster [ n_cluster ] = new_c if len ( p_seen ) == 1 : idc_seen = clus_seen [ p_seen [ 0 ] ] internal_cluster [ idc_seen ] = _merge_cluster ( loci [ p_unseen [ 0 ] ] , internal_cluster [ idc_seen ] ) clus_seen [ p_unseen [ 0 ] ] = idc_seen else : logger . debug ( "_merge_similar: id %s %s are different" % pairs ) continue internal_cluster . update ( _add_unseen ( loci , clus_seen , n_cluster ) ) logger . debug ( "_merge_similar: total clus %s" % len ( internal_cluster . keys ( ) ) ) return internal_cluster
def open_stream ( self , destination , timeout_ms = None ) : """Opens a new stream to a destination service on the device . Not the same as the posix ' open ' or any other Open methods , this corresponds to the OPEN message described in the ADB protocol documentation mentioned above . It creates a stream ( uniquely identified by remote / local ids ) that connects to a particular service endpoint . Args : destination : The service : command string , see ADB documentation . timeout _ ms : Timeout in milliseconds for the Open to succeed ( or as a PolledTimeout object ) . Raises : AdbProtocolError : Wrong local _ id sent to us , or we didn ' t get a ready response . Returns : An AdbStream object that can be used to read / write data to the specified service endpoint , or None if the requested service couldn ' t be opened ."""
timeout = timeouts . PolledTimeout . from_millis ( timeout_ms ) stream_transport = self . _make_stream_transport ( ) self . transport . write_message ( adb_message . AdbMessage ( command = 'OPEN' , arg0 = stream_transport . local_id , arg1 = 0 , data = destination + '\0' ) , timeout ) if not stream_transport . ensure_opened ( timeout ) : return None return AdbStream ( destination , stream_transport )
def asset_class ( self ) -> str : """Returns the full asset class path for this stock"""
result = self . parent . name if self . parent else "" # Iterate to the top asset class and add names . cursor = self . parent while cursor : result = cursor . name + ":" + result cursor = cursor . parent return result
def set_property ( self , prop , objects ) : """Add a property to the definition and set its ` ` objects ` ` ."""
self . _properties . add ( prop ) objects = set ( objects ) self . _objects |= objects pairs = self . _pairs for o in self . _objects : if o in objects : pairs . add ( ( o , prop ) ) else : pairs . discard ( ( o , prop ) )
def build_homogeneisation_vehicules ( temporary_store = None , year = None ) : assert temporary_store is not None """Compute vehicule numbers by type"""
assert year is not None # Load data bdf_survey_collection = SurveyCollection . load ( collection = 'budget_des_familles' , config_files_directory = config_files_directory ) survey = bdf_survey_collection . get_survey ( 'budget_des_familles_{}' . format ( year ) ) if year == 1995 : vehicule = None # L ' enquête BdF 1995 ne contient pas d ' information sur le type de carburant utilisé par les véhicules . if year == 2000 : vehicule = survey . get_values ( table = "depmen" ) kept_variables = [ 'ident' , 'carbu01' , 'carbu02' ] vehicule = vehicule [ kept_variables ] vehicule . rename ( columns = { 'ident' : 'ident_men' } , inplace = True ) vehicule . rename ( columns = { 'carbu01' : 'carbu1' } , inplace = True ) vehicule . rename ( columns = { 'carbu02' : 'carbu2' } , inplace = True ) vehicule [ "veh_tot" ] = 1 vehicule [ "veh_essence" ] = 1 * ( vehicule [ 'carbu1' ] == 1 ) + 1 * ( vehicule [ 'carbu2' ] == 1 ) vehicule [ "veh_diesel" ] = 1 * ( vehicule [ 'carbu1' ] == 2 ) + 1 * ( vehicule [ 'carbu2' ] == 2 ) vehicule . index = vehicule . index . astype ( ident_men_dtype ) if year == 2005 : vehicule = survey . get_values ( table = "automobile" ) kept_variables = [ 'ident_men' , 'carbu' ] vehicule = vehicule [ kept_variables ] vehicule [ "veh_tot" ] = 1 vehicule [ "veh_essence" ] = ( vehicule [ 'carbu' ] == 1 ) vehicule [ "veh_diesel" ] = ( vehicule [ 'carbu' ] == 2 ) if year == 2011 : try : vehicule = survey . get_values ( table = "AUTOMOBILE" ) except : vehicule = survey . get_values ( table = "automobile" ) kept_variables = [ 'ident_me' , 'carbu' ] vehicule = vehicule [ kept_variables ] vehicule . rename ( columns = { 'ident_me' : 'ident_men' } , inplace = True ) vehicule [ "veh_tot" ] = 1 vehicule [ "veh_essence" ] = ( vehicule [ 'carbu' ] == 1 ) vehicule [ "veh_diesel" ] = ( vehicule [ 'carbu' ] == 2 ) # Compute the number of cars by category and save if year != 1995 : vehicule = vehicule . groupby ( by = 'ident_men' ) [ "veh_tot" , "veh_essence" , "veh_diesel" ] . sum ( ) vehicule [ "pourcentage_vehicule_essence" ] = 0 vehicule . pourcentage_vehicule_essence . loc [ vehicule . veh_tot != 0 ] = vehicule . veh_essence / vehicule . veh_tot # Save in temporary store temporary_store [ 'automobile_{}' . format ( year ) ] = vehicule
def connect ( args ) : """% prog connect assembly . fasta read _ mapping . blast Connect contigs using long reads ."""
p = OptionParser ( connect . __doc__ ) p . add_option ( "--clip" , default = 2000 , type = "int" , help = "Only consider end of contigs [default: %default]" ) opts , args = p . parse_args ( args ) if len ( args ) != 2 : sys . exit ( not p . print_help ( ) ) fastafile , blastfile = args clip = opts . clip sizes = Sizes ( fastafile ) . mapping blast = Blast ( blastfile ) blasts = [ ] for b in blast : seqid = b . subject size = sizes [ seqid ] start , end = b . sstart , b . sstop cstart , cend = min ( size , clip ) , max ( 0 , size - clip ) if start > cstart and end < cend : continue blasts . append ( b ) key = lambda x : x . query blasts . sort ( key = key ) g = BiGraph ( ) for query , bb in groupby ( blasts , key = key ) : bb = sorted ( bb , key = lambda x : x . qstart ) nsubjects = len ( set ( x . subject for x in bb ) ) if nsubjects == 1 : continue print ( "\n" . join ( str ( x ) for x in bb ) ) for a , b in pairwise ( bb ) : astart , astop = a . qstart , a . qstop bstart , bstop = b . qstart , b . qstop if a . subject == b . subject : continue arange = astart , astop brange = bstart , bstop ov = range_intersect ( arange , brange ) alen = astop - astart + 1 blen = bstop - bstart + 1 if ov : ostart , ostop = ov ov = ostop - ostart + 1 print ( ov , alen , blen ) if ov and ( ov > alen / 2 or ov > blen / 2 ) : print ( "Too much overlap ({0})" . format ( ov ) ) continue asub = a . subject bsub = b . subject atag = ">" if a . orientation == "+" else "<" btag = ">" if b . orientation == "+" else "<" g . add_edge ( asub , bsub , atag , btag ) graph_to_agp ( g , blastfile , fastafile , verbose = False )
def create_shield_layer ( shield , hashcode ) : """Creates the layer for shields ."""
return pgnreader . parse_pagan_file ( ( '%s%spgn%s' % ( PACKAGE_DIR , os . sep , os . sep ) ) + shield + '.pgn' , hashcode , sym = False , invert = False )
def create_plan ( existing_users = None , proposed_users = None , purge_undefined = None , protected_users = None , allow_non_unique_id = None , manage_home = True , manage_keys = True ) : """Determine what changes are required . args : existing _ users ( Users ) : List of discovered users proposed _ users ( Users ) : List of proposed users purge _ undefined ( bool ) : Remove discovered users that have not been defined in proposed users list protected _ users ( list ) : List of users ' names that should not be evaluated as part of the plan creation process allow _ non _ unique _ id ( bool ) : Allow more than one user to have the same uid manage _ home ( bool ) : Create / remove users ' home directories manage _ keys ( bool ) : Add / update / remove users ' keys ( manage _ home must also be true ) returns : list : Differences between discovered and proposed users with a list of operations that will achieve the desired state ."""
plan = list ( ) proposed_usernames = list ( ) if not purge_undefined : purge_undefined = constants . PURGE_UNDEFINED if not protected_users : protected_users = constants . PROTECTED_USERS if not allow_non_unique_id : allow_non_unique_id = constants . ALLOW_NON_UNIQUE_ID # Create list of modifications to make based on proposed users compared to existing users for proposed_user in proposed_users : proposed_usernames . append ( proposed_user . name ) user_matching_name = existing_users . describe_users ( users_filter = dict ( name = proposed_user . name ) ) user_matching_id = get_user_by_uid ( uid = proposed_user . uid , users = existing_users ) # If user does not exist if not allow_non_unique_id and user_matching_id and not user_matching_name : plan . append ( dict ( action = 'fail' , error = 'uid_clash' , proposed_user = proposed_user , state = 'existing' , result = None ) ) elif not user_matching_name : plan . append ( dict ( action = 'add' , proposed_user = proposed_user , state = 'missing' , result = None , manage_home = manage_home , manage_keys = manage_keys ) ) # If they do , then compare else : user_comparison = compare_user ( passed_user = proposed_user , user_list = existing_users ) if user_comparison . get ( 'result' ) : plan . append ( dict ( action = 'update' , proposed_user = proposed_user , state = 'existing' , user_comparison = user_comparison , manage_home = manage_home , manage_keys = manage_keys ) ) # Application of the proposed user list will not result in deletion of users that need to be removed # If ' PURGE _ UNDEFINED ' then look for existing users that are not defined in proposed usernames and mark for removal if purge_undefined : for existing_user in existing_users : if existing_user . name not in proposed_usernames : if existing_user . name not in protected_users : plan . append ( dict ( action = 'delete' , username = existing_user . name , state = 'existing' , manage_home = manage_home , manage_keys = manage_keys ) ) return plan
def _message_callback ( self , msg ) : """Callback function to handle incoming MIDI messages ."""
if msg . type == 'polytouch' : button = button_from_press ( msg . note ) if button : self . on_button ( button , msg . value != 0 ) elif msg . note == 127 : self . on_fader_touch ( msg . value != 0 ) elif msg . type == 'control_change' and msg . control == 0 : self . _msb = msg . value elif msg . type == 'control_change' and msg . control == 32 : self . _fader = ( self . _msb << 7 | msg . value ) >> 4 self . on_fader ( self . _fader ) elif msg . type == 'pitchwheel' : self . on_rotary ( 1 if msg . pitch < 0 else - 1 ) else : print ( 'Unhandled:' , msg )
def _prep_bins ( ) : """Support for running straight out of a cloned source directory instead of an installed distribution"""
from os import path from sys import platform , maxsize from shutil import copy bit_suffix = "-x86_64" if maxsize > 2 ** 32 else "-x86" package_root = path . abspath ( path . dirname ( __file__ ) ) prebuilt_path = path . join ( package_root , "prebuilt" , platform + bit_suffix ) config = { "MANIFEST_DIR" : prebuilt_path } try : execfile ( path . join ( prebuilt_path , "manifest.pycfg" ) , config ) except IOError : return # there are no prebuilts for this platform - nothing to do files = map ( lambda x : path . join ( prebuilt_path , x ) , config [ "FILES" ] ) for prebuilt_file in files : try : copy ( path . join ( prebuilt_path , prebuilt_file ) , package_root ) except IOError : pass
def _ascii2 ( value ) : """A variant of the ` ascii ( ) ` built - in function known from Python 3 that : (1 ) ensures ASCII - only output , and (2 ) produces a nicer formatting for use in exception and warning messages and other human consumption . This function calls ` ascii ( ) ` and post - processes its output as follows : * For unicode strings , a leading ' u ' is stripped ( u ' xxx ' becomes ' xxx ' ) , if present . * For byte strings , a leading ' b ' is stripped ( b ' xxx ' becomes ' xxx ' ) , if present . * For unicode strings , non - ASCII Unicode characters in the range U + 0000 to U + 00FF are represented as ' / u00hh ' instead of the confusing ' / xhh ' ( ' / ' being a backslash , ' hh ' being a 2 - digit hex number ) . This function correctly handles values of collection types such as list , tuple , dict , and set , by producing the usual Python representation string for them . If the type is not the standard Python type ( i . e . OrderedDict instead of dict ) , the type name is also shown in the result . Returns : str : ASCII string"""
if isinstance ( value , Mapping ) : # NocaseDict in current impl . is not a Mapping ; it uses # its own repr ( ) implementation ( via ascii ( ) , called further down ) items = [ _ascii2 ( k ) + ": " + _ascii2 ( v ) for k , v in six . iteritems ( value ) ] item_str = "{" + ", " . join ( items ) + "}" if value . __class__ . __name__ == 'dict' : return item_str return "{0}({1})" . format ( value . __class__ . __name__ , item_str ) if isinstance ( value , Set ) : items = [ _ascii2 ( v ) for v in value ] item_str = "{" + ", " . join ( items ) + "}" if value . __class__ . __name__ == 'set' : return item_str return "{0}({1})" . format ( value . __class__ . __name__ , item_str ) if isinstance ( value , MutableSequence ) : items = [ _ascii2 ( v ) for v in value ] item_str = "[" + ", " . join ( items ) + "]" if value . __class__ . __name__ == 'list' : return item_str return "{0}({1})" . format ( value . __class__ . __name__ , item_str ) if isinstance ( value , Sequence ) and not isinstance ( value , ( six . text_type , six . binary_type ) ) : items = [ _ascii2 ( v ) for v in value ] if len ( items ) == 1 : item_str = "(" + ", " . join ( items ) + ",)" else : item_str = "(" + ", " . join ( items ) + ")" if value . __class__ . __name__ == 'tuple' : return item_str return "{0}({1})" . format ( value . __class__ . __name__ , item_str ) if isinstance ( value , six . text_type ) : ret = ascii ( value ) # returns type str in py2 and py3 if ret . startswith ( 'u' ) : ret = ret [ 1 : ] # Convert / xhh into / u00hh . # The two look - behind patterns address at least some of the cases that # should not be converted : Up to 5 backslashes in repr ( ) result are # handled correctly . The failure that happens starting with 6 # backslashes and even numbers of backslashes above that is not # dramatic : The / xhh is converted to / u00hh even though it shouldn ' t . ret = re . sub ( r'(?<![^\\]\\)(?<![^\\]\\\\\\)\\x([0-9a-fA-F]{2})' , r'\\u00\1' , ret ) elif isinstance ( value , six . binary_type ) : ret = ascii ( value ) # returns type str in py2 and py3 if ret . startswith ( 'b' ) : ret = ret [ 1 : ] elif isinstance ( value , ( six . integer_types , float ) ) : # str ( ) on Python containers calls repr ( ) on the items . PEP 3140 # that attempted to fix that , has been rejected . See # https : / / www . python . org / dev / peps / pep - 3140 / . # We don ' t want to make that same mistake , and because ascii ( ) calls # repr ( ) , we call str ( ) on the items explicitly . This makes a # difference for example for all pywbem . CIMInt values . ret = str ( value ) else : ret = ascii ( value ) # returns type str in py2 and py3 return ret
def makeringlatticeCIJ ( n , k , seed = None ) : '''This function generates a directed lattice network with toroidal boundary counditions ( i . e . with ring - like " wrapping around " ) . Parameters N : int number of vertices K : int number of edges seed : hashable , optional If None ( default ) , use the np . random ' s global random state to generate random numbers . Otherwise , use a new np . random . RandomState instance seeded with the given value . Returns CIJ : NxN np . ndarray connection matrix Notes The lattice is made by placing connections as close as possible to the main diagonal , with wrapping around . No connections are made on the main diagonal . In / Outdegree is kept approx . constant at K / N .'''
rng = get_rng ( seed ) # initialize CIJ = np . zeros ( ( n , n ) ) CIJ1 = np . ones ( ( n , n ) ) kk = 0 count = 0 seq = range ( 1 , n ) seq2 = range ( n - 1 , 0 , - 1 ) # fill in while kk < k : count += 1 dCIJ = np . triu ( CIJ1 , seq [ count ] ) - np . triu ( CIJ1 , seq [ count ] + 1 ) dCIJ2 = np . triu ( CIJ1 , seq2 [ count ] ) - np . triu ( CIJ1 , seq2 [ count ] + 1 ) dCIJ = dCIJ + dCIJ . T + dCIJ2 + dCIJ2 . T CIJ += dCIJ kk = int ( np . sum ( CIJ ) ) # remove excess connections overby = kk - k if overby : i , j = np . where ( dCIJ ) rp = rng . permutation ( np . size ( i ) ) for ii in range ( overby ) : CIJ [ i [ rp [ ii ] ] , j [ rp [ ii ] ] ] = 0 return CIJ
def cybox_RAW_ft_handler ( self , enrichment , fact , attr_info , add_fact_kargs ) : """Handler for facts whose content is to be written to disk rather than stored in the database . We use it for all elements that contain the string ' Raw _ ' ( ' Raw _ Header ' , ' Raw _ Artifact ' , . . . )"""
# get the value raw_value = add_fact_kargs [ 'values' ] [ 0 ] if len ( raw_value ) >= RAW_DATA_TO_DB_FOR_LENGTH_LESS_THAN : # rewrite the argument for the creation of the fact : there are # no values to be added to the database ( value_hash , storage_location ) = write_large_value ( raw_value , dingos . DINGOS_LARGE_VALUE_DESTINATION ) add_fact_kargs [ 'values' ] = [ ( value_hash , storage_location ) ] return True