signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def _handler ( self , sender , setting , value , ** kwargs ) : """handler for ` ` setting _ changed ` ` signal . @ see : ref : ` django : setting - changed ` _"""
if setting . startswith ( self . prefix ) : self . _set_attr ( setting , value )
def add_root ( cls , ** kwargs ) : """Adds a root node to the tree ."""
if len ( kwargs ) == 1 and 'instance' in kwargs : # adding the passed ( unsaved ) instance to the tree newobj = kwargs [ 'instance' ] if newobj . pk : raise NodeAlreadySaved ( "Attempted to add a tree node that is " "already in the database" ) else : newobj = cls ( ** kwargs ) newobj . _cached_depth = 1 if not cls . node_order_by : try : max = get_result_class ( cls ) . objects . filter ( parent__isnull = True ) . order_by ( 'sib_order' ) . reverse ( ) [ 0 ] . sib_order except IndexError : max = 0 newobj . sib_order = max + 1 newobj . save ( ) return newobj
def role_search ( auth = None , ** kwargs ) : '''Search roles CLI Example : . . code - block : : bash salt ' * ' keystoneng . role _ search salt ' * ' keystoneng . role _ search name = role1 salt ' * ' keystoneng . role _ search domain _ id = b62e76fbeeff4e8fb77073f591cf211e'''
cloud = get_operator_cloud ( auth ) kwargs = _clean_kwargs ( ** kwargs ) return cloud . search_roles ( ** kwargs )
def is_period_arraylike ( arr ) : """Check whether an array - like is a periodical array - like or PeriodIndex . Parameters arr : array - like The array - like to check . Returns boolean Whether or not the array - like is a periodical array - like or PeriodIndex instance . Examples > > > is _ period _ arraylike ( [ 1 , 2 , 3 ] ) False > > > is _ period _ arraylike ( pd . Index ( [ 1 , 2 , 3 ] ) ) False > > > is _ period _ arraylike ( pd . PeriodIndex ( [ " 2017-01-01 " ] , freq = " D " ) ) True"""
if isinstance ( arr , ( ABCPeriodIndex , ABCPeriodArray ) ) : return True elif isinstance ( arr , ( np . ndarray , ABCSeries ) ) : return is_period_dtype ( arr . dtype ) return getattr ( arr , 'inferred_type' , None ) == 'period'
def bsrchd ( value , ndim , array ) : """Do a binary search for a key value within a double precision array , assumed to be in increasing order . Return the index of the matching array entry , or - 1 if the key value is not found . http : / / naif . jpl . nasa . gov / pub / naif / toolkit _ docs / C / cspice / bsrchd _ c . html : param value : Value to find in array . : type value : float : param ndim : Dimension of array . : type ndim : int : param array : Array to be searched . : type array : Array of floats : return : index : rtype : int"""
value = ctypes . c_double ( value ) ndim = ctypes . c_int ( ndim ) array = stypes . toDoubleVector ( array ) return libspice . bsrchd_c ( value , ndim , array )
def replace_headers ( source_pdb_content , target_pdb_content ) : '''Takes the headers from source _ pdb _ content and adds them to target _ pdb _ content , removing any headers that target _ pdb _ content had . Only the content up to the first structural line are taken from source _ pdb _ content and only the content from the first structural line in target _ pdb _ content are taken .'''
s = PDB ( source_pdb_content ) t = PDB ( target_pdb_content ) source_headers = [ ] for l in s . lines : if l [ : 6 ] . strip ( ) in non_header_records : break else : source_headers . append ( l ) target_body = [ ] in_header = True for l in t . lines : if l [ : 6 ] . strip ( ) in non_header_records : in_header = False if not in_header : target_body . append ( l ) return '\n' . join ( source_headers + target_body )
def _update_callbacks ( self , plot ) : """Iterates over all subplots and updates existing CustomJS callbacks with models that were replaced when compositing subplots into a CompositePlot and sets the plot id to match the root level bokeh model ."""
subplots = self . traverse ( lambda x : x , [ GenericElementPlot ] ) merged_tools = { t : list ( plot . select ( { 'type' : TOOL_TYPES [ t ] } ) ) for t in self . _merged_tools } for subplot in subplots : for cb in subplot . callbacks : for c in cb . callbacks : for tool , objs in merged_tools . items ( ) : if tool in c . args and objs : c . args [ tool ] = objs [ 0 ] if self . top_level : c . code = c . code . replace ( 'PLACEHOLDER_PLOT_ID' , self . id )
def from_string ( address ) : """Return new object by the given MAC - address : param address : address to convert : return : WMACAddress"""
str_address = None if WMACAddress . re_dash_format . match ( address ) : str_address = "" . join ( address . split ( "-" ) ) elif WMACAddress . re_colon_format . match ( address ) : str_address = "" . join ( address . split ( ":" ) ) elif WMACAddress . re_cisco_format . match ( address ) : str_address = "" . join ( address . split ( "." ) ) elif WMACAddress . re_spaceless_format . match ( address ) : str_address = address if str_address is None : raise ValueError ( "Invalid MAC address format: " + address ) result = WMACAddress ( ) for octet_index in range ( WMACAddress . octet_count ) : octet = str_address [ : 2 ] result . __address [ octet_index ] = int ( octet , 16 ) str_address = str_address [ 2 : ] return result
def fetch_all_objects_from_db_by_pklist ( self , cls : Type , table : str , fieldlist : Sequence [ str ] , pklist : Sequence [ Any ] , construct_with_pk : bool , * args ) -> List [ T ] : """Fetches all objects from a table , given a list of PKs ."""
objarray = [ ] for pk in pklist : if construct_with_pk : obj = cls ( pk , * args ) # should do its own fetching else : obj = cls ( * args ) self . fetch_object_from_db_by_pk ( obj , table , fieldlist , pk ) objarray . append ( obj ) return objarray
def clean_entity ( self , ent ) : """Strip out extra words that often get picked up by spaCy ' s NER . To do : preserve info about what got stripped out to help with ES / Geonames resolution later . Parameters ent : a spaCy named entity Span Returns new _ ent : a spaCy Span , with extra words stripped out ."""
dump_list = [ 'province' , 'the' , 'area' , 'airport' , 'district' , 'square' , 'town' , 'village' , 'prison' , "river" , "valley" , "provincial" , "prison" , "region" , "municipality" , "state" , "territory" , "of" , "in" , "county" , "central" ] # maybe have ' city ' ? Works differently in different countries # also , " District of Columbia " . Might need to use cap / no cap keep_positions = [ ] for word in ent : if word . text . lower ( ) not in dump_list : keep_positions . append ( word . i ) keep_positions = np . asarray ( keep_positions ) try : new_ent = ent . doc [ keep_positions . min ( ) : keep_positions . max ( ) + 1 ] # can ' t set directly # new _ ent . label _ . _ _ set _ _ ( ent . label _ ) except ValueError : new_ent = ent return new_ent
def open_in_editor ( self , cli ) : """Open code in editor . : param cli : : class : ` ~ prompt _ toolkit . interface . CommandLineInterface ` instance ."""
if self . read_only ( ) : raise EditReadOnlyBuffer ( ) # Write to temporary file descriptor , filename = tempfile . mkstemp ( self . tempfile_suffix ) os . write ( descriptor , self . text . encode ( 'utf-8' ) ) os . close ( descriptor ) # Open in editor # ( We need to use ` cli . run _ in _ terminal ` , because not all editors go to # the alternate screen buffer , and some could influence the cursor # position . ) succes = cli . run_in_terminal ( lambda : self . _open_file_in_editor ( filename ) ) # Read content again . if succes : with open ( filename , 'rb' ) as f : text = f . read ( ) . decode ( 'utf-8' ) # Drop trailing newline . ( Editors are supposed to add it at the # end , but we don ' t need it . ) if text . endswith ( '\n' ) : text = text [ : - 1 ] self . document = Document ( text = text , cursor_position = len ( text ) ) # Clean up temp file . os . remove ( filename )
def remove ( name = None , pkgs = None , ** kwargs ) : '''Remove specified package . Accepts full or partial FMRI . In case of multiple match , the command fails and won ' t modify the OS . name The name of the package to be deleted . Multiple Package Options : pkgs A list of packages to delete . Must be passed as a python list . The ` ` name ` ` parameter will be ignored if this option is passed . Returns a list containing the removed packages . CLI Example : . . code - block : : bash salt ' * ' pkg . remove < package name > salt ' * ' pkg . remove tcsh salt ' * ' pkg . remove pkg : / / solaris / shell / tcsh salt ' * ' pkg . remove pkgs = ' [ " foo " , " bar " ] ' '''
targets = salt . utils . args . split_input ( pkgs ) if pkgs else [ name ] if not targets : return { } if pkgs : log . debug ( 'Removing these packages instead of %s: %s' , name , targets ) # Get a list of the currently installed pkgs . old = list_pkgs ( ) # Remove the package ( s ) cmd = [ '/bin/pkg' , 'uninstall' , '-v' ] + targets out = __salt__ [ 'cmd.run_all' ] ( cmd , output_loglevel = 'trace' ) # Get a list of the packages after the uninstall __context__ . pop ( 'pkg.list_pkgs' , None ) new = list_pkgs ( ) ret = salt . utils . data . compare_dicts ( old , new ) if out [ 'retcode' ] != 0 : raise CommandExecutionError ( 'Error occurred removing package(s)' , info = { 'changes' : ret , 'retcode' : ips_pkg_return_values [ out [ 'retcode' ] ] , 'errors' : [ out [ 'stderr' ] ] } ) return ret
def _ParseIdentifierMappingRecord ( self , parser_mediator , table_name , esedb_record ) : """Extracts an identifier mapping from a SruDbIdMapTable record . Args : parser _ mediator ( ParserMediator ) : mediates interactions between parsers and other components , such as storage and dfvfs . table _ name ( str ) : name of the table the record is stored in . esedb _ record ( pyesedb . record ) : record . Returns : tuple [ int , str ] : numeric identifier and its string representation or None , None if no identifier mapping can be retrieved from the record ."""
record_values = self . _GetRecordValues ( parser_mediator , table_name , esedb_record ) identifier = record_values . get ( 'IdIndex' , None ) if identifier is None : parser_mediator . ProduceExtractionWarning ( 'IdIndex value missing from table: SruDbIdMapTable' ) return None , None identifier_type = record_values . get ( 'IdType' , None ) if identifier_type not in self . _SUPPORTED_IDENTIFIER_TYPES : parser_mediator . ProduceExtractionWarning ( 'unsupported IdType value: {0!s} in table: SruDbIdMapTable' . format ( identifier_type ) ) return None , None mapped_value = record_values . get ( 'IdBlob' , None ) if mapped_value is None : parser_mediator . ProduceExtractionWarning ( 'IdBlob value missing from table: SruDbIdMapTable' ) return None , None if identifier_type == 3 : try : fwnt_identifier = pyfwnt . security_identifier ( ) fwnt_identifier . copy_from_byte_stream ( mapped_value ) mapped_value = fwnt_identifier . get_string ( ) except IOError : parser_mediator . ProduceExtractionWarning ( 'unable to decode IdBlob value as Windows NT security identifier' ) return None , None else : try : mapped_value = mapped_value . decode ( 'utf-16le' ) . rstrip ( '\0' ) except UnicodeDecodeError : parser_mediator . ProduceExtractionWarning ( 'unable to decode IdBlob value as UTF-16 little-endian string' ) return None , None return identifier , mapped_value
def _forwardImplementation ( self , inbuf , outbuf ) : """Proportional probability method ."""
assert self . module propensities = self . module . getActionValues ( 0 ) summedProps = sum ( propensities ) probabilities = propensities / summedProps action = eventGenerator ( probabilities ) # action = drawIndex ( probabilities ) outbuf [ : ] = scipy . array ( [ action ] )
def delete_license ( license_id ) : """Delete a License by ID"""
response = utils . checked_api_call ( pnc_api . licenses , 'delete' , id = license_id ) if response : return utils . format_json ( response . content )
def update_supplier ( self , supplier_id , supplier_dict ) : """Updates a supplier : param supplier _ id : the supplier id : param supplier _ dict : dict : return : dict"""
return self . _create_put_request ( resource = SUPPLIERS , billomat_id = supplier_id , send_data = supplier_dict )
def smart_search ( self , * arguments ) : """Perform a smart search on the given keywords or patterns . : param arguments : The keywords or patterns to search for . : returns : The matched password names ( a list of strings ) . : raises : The following exceptions can be raised : - : exc : ` . NoMatchingPasswordError ` when no matching passwords are found . - : exc : ` . EmptyPasswordStoreError ` when the password store is empty . This method first tries : func : ` simple _ search ( ) ` and if that doesn ' t produce any matches it will fall back to : func : ` fuzzy _ search ( ) ` . If no matches are found an exception is raised ( see above ) ."""
matches = self . simple_search ( * arguments ) if not matches : logger . verbose ( "Falling back from substring search to fuzzy search .." ) matches = self . fuzzy_search ( * arguments ) if not matches : if len ( self . filtered_entries ) > 0 : raise NoMatchingPasswordError ( format ( "No passwords matched the given arguments! (%s)" , concatenate ( map ( repr , arguments ) ) ) ) else : msg = "You don't have any passwords yet! (no *.gpg files found)" raise EmptyPasswordStoreError ( msg ) return matches
def parse_attr_signature ( sig ) : """Parse an attribute signature"""
match = ATTR_SIG_RE . match ( sig . strip ( ) ) if not match : raise RuntimeError ( 'Attribute signature invalid, got ' + sig ) name , _ , params = match . groups ( ) if params is not None and params . strip ( ) != '' : params = split_sig ( params ) params = [ parse_param_signature ( x ) for x in params ] else : params = [ ] return ( name , params )
def create_audit_event ( self , code = 'AUDIT' ) : """Creates a generic auditing Event logging the changes between saves and the initial data in creates . Kwargs : code ( str ) : The code to set the new Event to . Returns : Event : A new event with relevant info inserted into it"""
event = self . _meta . event_model ( code = code , model = self . __class__ . __name__ , ) # Use the logged in User , if possible if current_user : event . created_by = current_user . get_id ( ) self . copy_foreign_keys ( event ) self . populate_audit_fields ( event ) return event
def createRoles ( self , configFiles , dateTimeFormat = None ) : """Parses a JSON configuration file to create roles . Args : configFiles ( list ) : A list of JSON files on disk containing configuration data for creating roles . dateTimeFormat ( str ) : A valid date formatting directive , as understood by : py : meth : ` datetime . datetime . strftime ` . Defaults to ` ` None ` ` , i . e . , ` ` ' % Y - % m - % d % H : % M ' ` ` ."""
if dateTimeFormat is None : dateTimeFormat = '%Y-%m-%d %H:%M' scriptStartTime = datetime . datetime . now ( ) try : print ( "********************Create Roles********************" ) print ( "Script started at %s" % scriptStartTime . strftime ( dateTimeFormat ) ) if self . securityhandler . valid == False : print ( "Login required" ) else : orgTools = orgtools . orgtools ( securityinfo = self ) if orgTools is None : print ( "Error creating orgtools" ) else : for configFile in configFiles : config = common . init_config_json ( config_file = configFile ) if config is not None : startTime = datetime . datetime . now ( ) print ( "Processing config %s, starting at: %s" % ( configFile , startTime . strftime ( dateTimeFormat ) ) ) roleInfos = config [ 'Roles' ] for roleInfo in roleInfos : createRoleResults = orgTools . createRole ( roleInfo [ 'Name' ] , roleInfo [ 'Description' ] , roleInfo [ 'Privileges' ] ) else : print ( "Config %s not found" % configFile ) except ( TypeError , ValueError , AttributeError ) as e : print ( e ) except ( common . ArcRestHelperError ) as e : print ( "error in function: %s" % e [ 0 ] [ 'function' ] ) print ( "error on line: %s" % e [ 0 ] [ 'line' ] ) print ( "error in file name: %s" % e [ 0 ] [ 'filename' ] ) print ( "with error message: %s" % e [ 0 ] [ 'synerror' ] ) if 'arcpyError' in e [ 0 ] : print ( "with arcpy message: %s" % e [ 0 ] [ 'arcpyError' ] ) except Exception as e : if ( reportToolsInstalled ) : if isinstance ( e , ( ReportTools . ReportToolsError , DataPrep . DataPrepError ) ) : print ( "error in function: %s" % e [ 0 ] [ 'function' ] ) print ( "error on line: %s" % e [ 0 ] [ 'line' ] ) print ( "error in file name: %s" % e [ 0 ] [ 'filename' ] ) print ( "with error message: %s" % e [ 0 ] [ 'synerror' ] ) if 'arcpyError' in e [ 0 ] : print ( "with arcpy message: %s" % e [ 0 ] [ 'arcpyError' ] ) else : line , filename , synerror = trace ( ) print ( "error on line: %s" % line ) print ( "error in file name: %s" % filename ) print ( "with error message: %s" % synerror ) else : line , filename , synerror = trace ( ) print ( "error on line: %s" % line ) print ( "error in file name: %s" % filename ) print ( "with error message: %s" % synerror ) finally : print ( "Script complete, time to complete: %s" % str ( datetime . datetime . now ( ) - scriptStartTime ) ) print ( "###############Create Groups Completed#################" ) print ( "" ) # if orgTools is not None : # orgTools . dispose ( ) groupInfo = None groupFile = None iconPath = None startTime = None thumbnail = None result = None config = None sciptPath = None orgTools = None del groupInfo del groupFile del iconPath del startTime del thumbnail del result del config del sciptPath del orgTools gc . collect ( )
def get_assessment_ids_by_banks ( self , bank_ids ) : """Gets the list of ` ` Assessment Ids ` ` corresponding to a list of ` ` Banks ` ` . arg : bank _ ids ( osid . id . IdList ) : list of bank ` ` Ids ` ` return : ( osid . id . IdList ) - list of bank ` ` Ids ` ` raise : NullArgument - ` ` bank _ ids ` ` is ` ` null ` ` raise : OperationFailed - unable to complete request raise : PermissionDenied - authorization failure occurred * compliance : mandatory - - This method must be implemented . *"""
# Implemented from template for # osid . resource . ResourceBinSession . get _ resource _ ids _ by _ bins id_list = [ ] for assessment in self . get_assessments_by_banks ( bank_ids ) : id_list . append ( assessment . get_id ( ) ) return IdList ( id_list )
def get ( self , sid ) : """Constructs a EventContext : param sid : The sid : returns : twilio . rest . taskrouter . v1 . workspace . event . EventContext : rtype : twilio . rest . taskrouter . v1 . workspace . event . EventContext"""
return EventContext ( self . _version , workspace_sid = self . _solution [ 'workspace_sid' ] , sid = sid , )
def walk ( obj , path = '' , skiphidden = True ) : """Returns a recursive iterator over all Nodes starting from findnode ( obj , path ) . If skiphidden is True ( the default ) then structure branches starting with an underscore will be ignored ."""
node = findnode ( obj , path ) return walknode ( node , skiphidden )
def request ( self , * args , ** kwargs ) : """Issue the HTTP request capturing any errors that may occur ."""
try : return self . _http . request ( * args , timeout = TIMEOUT , ** kwargs ) except Exception as exc : raise RequestException ( exc , args , kwargs )
def pycache_clean ( context ) : "Remove _ _ pycache _ _ directories"
# pylint : disable = unused - argument dirs = set ( ) for root , dirnames , _ in os . walk ( os . curdir ) : if '__pycache__' in dirnames : dirs . add ( os . path . join ( root , '__pycache__' ) ) print ( "Removing __pycache__ directories" ) rmrf ( dirs , verbose = False )
def yum_install ( ** kwargs ) : """installs a yum package"""
if 'repo' in kwargs : repo = kwargs [ 'repo' ] for pkg in list ( kwargs [ 'packages' ] ) : if is_package_installed ( distribution = 'el' , pkg = pkg ) is False : if 'repo' in locals ( ) : log_green ( "installing %s from repo %s ..." % ( pkg , repo ) ) sudo ( "yum install -y --quiet --enablerepo=%s %s" % ( repo , pkg ) ) else : log_green ( "installing %s ..." % pkg ) sudo ( "yum install -y --quiet %s" % pkg )
def table_name ( self ) : """Get slice name . In case of 2D return cube name . In case of 3D , return the combination of the cube name with the label of the corresponding slice ( nth label of the 0th dimension ) ."""
if self . _cube . ndim < 3 and not self . ca_as_0th : return None title = self . _cube . name table_name = self . _cube . labels ( ) [ 0 ] [ self . _index ] return "%s: %s" % ( title , table_name )
def dataset_exists ( self , dataset ) : """Returns whether the given dataset exists . If regional location is specified for the dataset , that is also checked to be compatible with the remote dataset , otherwise an exception is thrown . : param dataset : : type dataset : BQDataset"""
try : response = self . client . datasets ( ) . get ( projectId = dataset . project_id , datasetId = dataset . dataset_id ) . execute ( ) if dataset . location is not None : fetched_location = response . get ( 'location' ) if dataset . location != fetched_location : raise Exception ( '''Dataset already exists with regional location {}. Can't use {}.''' . format ( fetched_location if fetched_location is not None else 'unspecified' , dataset . location ) ) except http . HttpError as ex : if ex . resp . status == 404 : return False raise return True
def closeEvent ( self , event ) : """Overloads the close event for this widget to make sure that the data is properly saved before exiting . : param event | < QCloseEvent >"""
if ( not ( self . saveOnClose ( ) and self . checkForSave ( ) ) ) : event . ignore ( ) else : super ( XScintillaEdit , self ) . closeEvent ( event )
def post ( self ) : ''': ref : ` Authenticate < rest _ tornado - auth > ` against Salt ' s eauth system . . http : post : : / login : reqheader X - Auth - Token : | req _ token | : reqheader Accept : | req _ accept | : reqheader Content - Type : | req _ ct | : form eauth : the eauth backend configured for the user : form username : username : form password : password : status 200 : | 200 | : status 400 : | 400 | : status 401 : | 401 | : status 406 : | 406 | : status 500 : | 500 | * * Example request : * * . . code - block : : bash curl - si localhost : 8000 / login \ - H " Accept : application / json " \ - d username = ' saltuser ' \ - d password = ' saltpass ' \ - d eauth = ' pam ' . . code - block : : text POST / HTTP / 1.1 Host : localhost : 8000 Content - Length : 42 Content - Type : application / x - www - form - urlencoded Accept : application / json username = saltuser & password = saltpass & eauth = pam * * Example response : * * . . code - block : : text HTTP / 1.1 200 OK Content - Type : application / json Content - Length : 206 X - Auth - Token : 6d1b722e Set - Cookie : session _ id = 6d1b722e ; expires = Sat , 17 Nov 2012 03:23:52 GMT ; Path = / { " return " : { " token " : " 6d1b722e " , " start " : 1363805943.776223, " expire " : 1363849143.776224, " user " : " saltuser " , " eauth " : " pam " , " perms " : [ " grains . * " , " status . * " , " sys . * " , " test . * "'''
try : if not isinstance ( self . request_payload , dict ) : self . send_error ( 400 ) return creds = { 'username' : self . request_payload [ 'username' ] , 'password' : self . request_payload [ 'password' ] , 'eauth' : self . request_payload [ 'eauth' ] , } # if any of the args are missing , its a bad request except KeyError : self . send_error ( 400 ) return token = self . application . auth . mk_token ( creds ) if 'token' not in token : # TODO : nicer error message # ' Could not authenticate using provided credentials ' ) self . send_error ( 401 ) # return since we don ' t want to execute any more return # Grab eauth config for the current backend for the current user try : eauth = self . application . opts [ 'external_auth' ] [ token [ 'eauth' ] ] # Get sum of ' * ' perms , user - specific perms , and group - specific perms _perms = eauth . get ( token [ 'name' ] , [ ] ) _perms . extend ( eauth . get ( '*' , [ ] ) ) if 'groups' in token and token [ 'groups' ] : user_groups = set ( token [ 'groups' ] ) eauth_groups = set ( [ i . rstrip ( '%' ) for i in eauth . keys ( ) if i . endswith ( '%' ) ] ) for group in user_groups & eauth_groups : _perms . extend ( eauth [ '{0}%' . format ( group ) ] ) # dedup . perm can be a complex dict , so we cant use set perms = [ ] for perm in _perms : if perm not in perms : perms . append ( perm ) # If we can ' t find the creds , then they aren ' t authorized except KeyError : self . send_error ( 401 ) return except ( AttributeError , IndexError ) : log . debug ( "Configuration for external_auth malformed for eauth '%s', " "and user '%s'." , token . get ( 'eauth' ) , token . get ( 'name' ) , exc_info = True ) # TODO better error - - ' Configuration for external _ auth could not be read . ' self . send_error ( 500 ) return ret = { 'return' : [ { 'token' : token [ 'token' ] , 'expire' : token [ 'expire' ] , 'start' : token [ 'start' ] , 'user' : token [ 'name' ] , 'eauth' : token [ 'eauth' ] , 'perms' : perms , } ] } self . write ( self . serialize ( ret ) )
def filter ( self , query , inplace = True ) : """Use a query statement to filter data . Note that you specify the data to be removed ! Parameters query : string The query string to be evaluated . Is directly provided to pandas . DataFrame . query inplace : bool if True , change the container dataframe in place ( defaults to True ) Returns result : : py : class : ` pandas . DataFrame ` DataFrame that contains the result of the filter application"""
with LogDataChanges ( self , filter_action = 'filter' , filter_query = query ) : result = self . data . query ( 'not ({0})' . format ( query ) , inplace = inplace , ) return result
def patch_worker_factory ( ) : """Patches the ` ` luigi . interface . _ WorkerSchedulerFactory ` ` to include sandboxing information when create a worker instance ."""
def create_worker ( self , scheduler , worker_processes , assistant = False ) : worker = luigi . worker . Worker ( scheduler = scheduler , worker_processes = worker_processes , assistant = assistant , worker_id = os . getenv ( "LAW_SANDBOX_WORKER_ID" ) ) worker . _first_task = os . getenv ( "LAW_SANDBOX_WORKER_TASK" ) return worker luigi . interface . _WorkerSchedulerFactory . create_worker = create_worker
def countByWindow ( self , windowDuration , slideDuration ) : """Return a new DStream in which each RDD has a single element generated by counting the number of elements in a window over this DStream . windowDuration and slideDuration are as defined in the window ( ) operation . This is equivalent to window ( windowDuration , slideDuration ) . count ( ) , but will be more efficient if window is large ."""
return self . map ( lambda x : 1 ) . reduceByWindow ( operator . add , operator . sub , windowDuration , slideDuration )
def draw ( data , format = 'auto' , size = ( 400 , 300 ) , drawing_type = 'ball and stick' , camera_type = 'perspective' , shader = 'lambert' , display_html = True , element_properties = None , show_save = False ) : """Draws an interactive 3D visualization of the inputted chemical . Args : data : A string or file representing a chemical . format : The format of the ` data ` variable ( default is ' auto ' ) . size : Starting dimensions of visualization , in pixels . drawing _ type : Specifies the molecular representation . Can be ' ball and stick ' , ' wireframe ' , or ' space filling ' . camera _ type : Can be ' perspective ' or ' orthographic ' . shader : Specifies shading algorithm to use . Can be ' toon ' , ' basic ' , ' phong ' , or ' lambert ' . display _ html : If True ( default ) , embed the html in a IPython display . If False , return the html as a string . element _ properites : A dictionary providing color and radius information for custom elements or overriding the defaults in imolecule . js show _ save : If True , displays a save icon for rendering molecule as an image . The ` format ` can be any value specified by Open Babel ( http : / / openbabel . org / docs / 2.3.1 / FileFormats / Overview . html ) . The ' auto ' option uses the extension for files ( ie . my _ file . mol - > mol ) and defaults to SMILES ( smi ) for strings ."""
# Catch errors on string - based input before getting js involved draw_options = [ 'ball and stick' , 'wireframe' , 'space filling' ] camera_options = [ 'perspective' , 'orthographic' ] shader_options = [ 'toon' , 'basic' , 'phong' , 'lambert' ] if drawing_type not in draw_options : raise Exception ( "Invalid drawing type! Please use one of: " + ", " . join ( draw_options ) ) if camera_type not in camera_options : raise Exception ( "Invalid camera type! Please use one of: " + ", " . join ( camera_options ) ) if shader not in shader_options : raise Exception ( "Invalid shader! Please use one of: " + ", " . join ( shader_options ) ) json_mol = generate ( data , format ) if element_properties is None : element_properties = dict ( ) json_element_properties = to_json ( element_properties ) div_id = uuid . uuid4 ( ) html = """<div id="molecule_%s"></div> <script type="text/javascript"> require.config({baseUrl: '/', paths: {imolecule: ['%s', '%s']}}); require(['imolecule'], function () { var $d = $('#molecule_%s'); $d.width(%d); $d.height(%d); $d.imolecule = jQuery.extend({}, imolecule); $d.imolecule.create($d, {drawingType: '%s', cameraType: '%s', shader: '%s', showSave: %s}); $d.imolecule.addElements(%s); $d.imolecule.draw(%s); $d.resizable({ aspectRatio: %d / %d, resize: function (evt, ui) { $d.imolecule.renderer.setSize(ui.size.width, ui.size.height); } }); }); </script>""" % ( div_id , local_path [ : - 3 ] , remote_path [ : - 3 ] , div_id , size [ 0 ] , size [ 1 ] , drawing_type , camera_type , shader , 'true' if show_save else 'false' , json_element_properties , json_mol , size [ 0 ] , size [ 1 ] ) # Execute js and display the results in a div ( see script for more ) if display_html : try : __IPYTHON__ except NameError : # We ' re running outside ipython , let ' s generate a static HTML and # show it in the browser import shutil import webbrowser from tempfile import mkdtemp from time import time try : # Python 3 from urllib . parse import urljoin from urllib . request import pathname2url except ImportError : # Python 2 from urlparse import urljoin from urllib import pathname2url from tornado import template t = template . Loader ( file_path ) . load ( 'viewer.template' ) html = t . generate ( title = "imolecule" , json_mol = json_mol , drawing_type = drawing_type , shader = shader , camera_type = camera_type , json_element_properties = json_element_properties ) tempdir = mkdtemp ( prefix = 'imolecule_{:.0f}_' . format ( time ( ) ) ) html_filename = os . path . join ( tempdir , 'index.html' ) with open ( html_filename , 'wb' ) as f : f . write ( html ) libs = ( ( 'server' , 'css' , 'chosen.css' ) , ( 'server' , 'css' , 'server.css' ) , ( 'js' , 'jquery-1.11.1.min.js' ) , ( 'server' , 'js' , 'chosen.jquery.min.js' ) , ( 'js' , 'build' , 'imolecule.min.js' ) ) for lib in libs : shutil . copy ( os . path . join ( file_path , * lib ) , tempdir ) html_file_url = urljoin ( 'file:' , pathname2url ( html_filename ) ) print ( 'Opening html file: {}' . format ( html_file_url ) ) webbrowser . open ( html_file_url ) else : # We ' re running in ipython : display widget display ( HTML ( html ) ) else : return html
def loader_cls ( self ) : """Loader class used in ` JsonRef . replace _ refs ` ."""
cls = self . app . config [ 'JSONSCHEMAS_LOADER_CLS' ] if isinstance ( cls , six . string_types ) : return import_string ( cls ) return cls
def genOutputs ( self , code , match ) : """Return a list out template outputs based on the triggers found in the code and the template they create ."""
out = sorted ( ( k , match . output ( m ) ) for ( k , m ) in self . collectTriggers ( match . match , code ) . items ( ) ) out = list ( map ( lambda a : a [ 1 ] , out ) ) return out
def lx4dec ( string , first ) : """Scan a string from a specified starting position for the end of a decimal number . http : / / naif . jpl . nasa . gov / pub / naif / toolkit _ docs / C / cspice / lx4dec _ c . html : param string : Any character string . : type string : str : param first : First character to scan from in string . : type first : int : return : last and nchar : rtype : tuple"""
string = stypes . stringToCharP ( string ) first = ctypes . c_int ( first ) last = ctypes . c_int ( ) nchar = ctypes . c_int ( ) libspice . lx4dec_c ( string , first , ctypes . byref ( last ) , ctypes . byref ( nchar ) ) return last . value , nchar . value
def expected_values ( self , beta ) : """Expected values of the function given the covariance matrix and hyperparameters Parameters beta : np . ndarray Contains untransformed values for latent variables Returns The expected values of the function"""
parm = np . array ( [ self . latent_variables . z_list [ k ] . prior . transform ( beta [ k ] ) for k in range ( beta . shape [ 0 ] ) ] ) L = self . _L ( parm ) alpha = self . _alpha ( L ) return np . dot ( np . transpose ( self . kernel . K ( parm ) ) , alpha )
def destroy ( self ) : """Unregister up from untwisted reactor . It is needed to call self . terminate ( ) first to kill the process ."""
core . gear . pool . remove ( self ) self . base . clear ( )
def process_stdin ( line ) : '''handle commands from user'''
if line is None : sys . exit ( 0 ) # allow for modules to override input handling if mpstate . functions . input_handler is not None : mpstate . functions . input_handler ( line ) return line = line . strip ( ) if mpstate . status . setup_mode : # in setup mode we send strings straight to the master if line == '.' : mpstate . status . setup_mode = False mpstate . status . flightmode = "MAV" mpstate . rl . set_prompt ( "MAV> " ) return if line != '+++' : line += '\r' for c in line : time . sleep ( 0.01 ) mpstate . master ( ) . write ( c ) return if not line : return args = shlex . split ( line ) cmd = args [ 0 ] while cmd in mpstate . aliases : line = mpstate . aliases [ cmd ] args = shlex . split ( line ) + args [ 1 : ] cmd = args [ 0 ] if cmd == 'help' : k = command_map . keys ( ) # k . sort ( ) for cmd in k : ( fn , help ) = command_map [ cmd ] print ( "%-15s : %s" % ( cmd , help ) ) return if cmd == 'exit' and mpstate . settings . requireexit : mpstate . status . exit = True return if cmd == 'velocity' and len ( args ) == 4 : PH_CMDVel = CMDVelI ( args [ 1 ] , args [ 2 ] , args [ 3 ] , 0 , 0 , 0 ) # 1 to avoid indeterminations if not cmd in command_map : for ( m , pm ) in mpstate . modules : if hasattr ( m , 'unknown_command' ) : try : if m . unknown_command ( args ) : return except Exception as e : print ( "ERROR in command: %s" % str ( e ) ) print ( "Unknown command '%s'" % line ) return ( fn , help ) = command_map [ cmd ] try : fn ( args [ 1 : ] ) except Exception as e : print ( "ERROR in command %s: %s" % ( args [ 1 : ] , str ( e ) ) ) if mpstate . settings . moddebug > 1 : traceback . print_exc ( )
def start_session ( self , causal_consistency = True , default_transaction_options = None ) : """Start a logical session . This method takes the same parameters as : class : ` ~ pymongo . client _ session . SessionOptions ` . See the : mod : ` ~ pymongo . client _ session ` module for details and examples . Requires MongoDB 3.6 . It is an error to call : meth : ` start _ session ` if this client has been authenticated to multiple databases using the deprecated method : meth : ` ~ pymongo . database . Database . authenticate ` . A : class : ` ~ pymongo . client _ session . ClientSession ` may only be used with the MongoClient that started it . : Returns : An instance of : class : ` ~ pymongo . client _ session . ClientSession ` . . . versionadded : : 3.6"""
return self . __start_session ( False , causal_consistency = causal_consistency , default_transaction_options = default_transaction_options )
def show_disk ( name = None , kwargs = None , call = None ) : # pylint : disable = W0613 '''Show the details of an existing disk . CLI Example : . . code - block : : bash salt - cloud - a show _ disk myinstance disk _ name = mydisk salt - cloud - f show _ disk gce disk _ name = mydisk'''
if not kwargs or 'disk_name' not in kwargs : log . error ( 'Must specify disk_name.' ) return False conn = get_conn ( ) return _expand_disk ( conn . ex_get_volume ( kwargs [ 'disk_name' ] ) )
def _not ( condition = None , ** kwargs ) : """Return the opposite of input condition . : param condition : condition to process . : result : not condition . : rtype : bool"""
result = True if condition is not None : result = not run ( condition , ** kwargs ) return result
def list_relations ( self ) : '''list every relation in the database as ( src , relation , dst )'''
_ = self . _execute ( 'select * from relations' ) . fetchall ( ) for i in _ : # print ( i ) src , name , dst = i src = self . deserialize ( next ( self . _execute ( 'select code from objects where id=?' , ( src , ) ) ) [ 0 ] ) dst = self . deserialize ( next ( self . _execute ( 'select code from objects where id=?' , ( dst , ) ) ) [ 0 ] ) yield src , name , dst
def msg_curse ( self , args = None , max_width = None ) : """Return the dict to display in the curse interface ."""
# Init the return message ret = [ ] # Only process if stats exist . . . if not self . stats : return ret # Max size for the interface name name_max_width = max_width - 12 # Header msg = '{:{width}}' . format ( 'RAID disks' , width = name_max_width ) ret . append ( self . curse_add_line ( msg , "TITLE" ) ) msg = '{:>7}' . format ( 'Used' ) ret . append ( self . curse_add_line ( msg ) ) msg = '{:>7}' . format ( 'Avail' ) ret . append ( self . curse_add_line ( msg ) ) # Data arrays = sorted ( iterkeys ( self . stats ) ) for array in arrays : # New line ret . append ( self . curse_new_line ( ) ) # Display the current status status = self . raid_alert ( self . stats [ array ] [ 'status' ] , self . stats [ array ] [ 'used' ] , self . stats [ array ] [ 'available' ] , self . stats [ array ] [ 'type' ] ) # Data : RAID type name | disk used | disk available array_type = self . stats [ array ] [ 'type' ] . upper ( ) if self . stats [ array ] [ 'type' ] is not None else 'UNKNOWN' # Build the full name = array type + array name full_name = '{} {}' . format ( array_type , array ) msg = '{:{width}}' . format ( full_name , width = name_max_width ) ret . append ( self . curse_add_line ( msg ) ) if self . stats [ array ] [ 'type' ] == 'raid0' and self . stats [ array ] [ 'status' ] == 'active' : msg = '{:>7}' . format ( len ( self . stats [ array ] [ 'components' ] ) ) ret . append ( self . curse_add_line ( msg , status ) ) msg = '{:>7}' . format ( '-' ) ret . append ( self . curse_add_line ( msg , status ) ) elif self . stats [ array ] [ 'status' ] == 'active' : msg = '{:>7}' . format ( self . stats [ array ] [ 'used' ] ) ret . append ( self . curse_add_line ( msg , status ) ) msg = '{:>7}' . format ( self . stats [ array ] [ 'available' ] ) ret . append ( self . curse_add_line ( msg , status ) ) elif self . stats [ array ] [ 'status' ] == 'inactive' : ret . append ( self . curse_new_line ( ) ) msg = '└─ Status {}' . format ( self . stats [ array ] [ 'status' ] ) ret . append ( self . curse_add_line ( msg , status ) ) components = sorted ( iterkeys ( self . stats [ array ] [ 'components' ] ) ) for i , component in enumerate ( components ) : if i == len ( components ) - 1 : tree_char = '└─' else : tree_char = '├─' ret . append ( self . curse_new_line ( ) ) msg = ' {} disk {}: ' . format ( tree_char , self . stats [ array ] [ 'components' ] [ component ] ) ret . append ( self . curse_add_line ( msg ) ) msg = '{}' . format ( component ) ret . append ( self . curse_add_line ( msg ) ) if self . stats [ array ] [ 'type' ] != 'raid0' and ( self . stats [ array ] [ 'used' ] < self . stats [ array ] [ 'available' ] ) : # Display current array configuration ret . append ( self . curse_new_line ( ) ) msg = '└─ Degraded mode' ret . append ( self . curse_add_line ( msg , status ) ) if len ( self . stats [ array ] [ 'config' ] ) < 17 : ret . append ( self . curse_new_line ( ) ) msg = ' └─ {}' . format ( self . stats [ array ] [ 'config' ] . replace ( '_' , 'A' ) ) ret . append ( self . curse_add_line ( msg ) ) return ret
def parse ( fpath , ** kwargs ) : """parse file contents , via parser plugins , to dict like object NB : the longest file regex will be used from plugins Parameters fpath : file _ like string , object with ' open ' and ' name ' attributes , or object with ' readline ' and ' name ' attributes kwargs : to pass to parser plugin Examples > > > load _ builtin _ plugins ( ' parsers ' ) > > > from pprint import pformat > > > json _ file = StringIO ( ' { " a " : [ 1,2,3.4 ] } ' ) > > > json _ file . name = ' test . json ' > > > dct = parse ( json _ file ) > > > print ( pformat ( dct ) . replace ( " u ' " , " ' " ) ) { ' a ' : [ 1 , 2 , 3.4 ] } > > > reset = json _ file . seek ( 0) > > > from decimal import Decimal > > > dct = parse ( json _ file , parse _ float = Decimal , other = 1) > > > print ( pformat ( dct ) . replace ( " u ' " , " ' " ) ) { ' a ' : [ 1 , 2 , Decimal ( ' 3.4 ' ) ] } > > > class NewParser ( object ) : . . . plugin _ name = ' example ' . . . plugin _ descript = ' loads test . json files ' . . . file _ regex = ' test . json ' . . . def read _ file ( self , file _ obj , * * kwargs ) : . . . return { ' example ' : 1} > > > load _ plugin _ classes ( [ NewParser ] , ' parsers ' ) > > > reset = json _ file . seek ( 0) > > > parse ( json _ file ) { ' example ' : 1} > > > unload _ all _ plugins ( )"""
if isinstance ( fpath , basestring ) : fname = fpath elif hasattr ( fpath , 'open' ) and hasattr ( fpath , 'name' ) : fname = fpath . name elif hasattr ( fpath , 'readline' ) and hasattr ( fpath , 'name' ) : fname = fpath . name else : raise ValueError ( 'fpath should be a str or file_like object: {}' . format ( fpath ) ) parser_dict = { plugin . file_regex : plugin for plugin in get_plugins ( 'parsers' ) . values ( ) } # find longest match first for regex in sorted ( parser_dict . keys ( ) , key = len , reverse = True ) : parser = parser_dict [ regex ] if fnmatch ( fname , regex ) : if isinstance ( fpath , basestring ) : with open ( fpath , 'r' ) as file_obj : data = parser . read_file ( file_obj , ** kwargs ) elif hasattr ( fpath , 'open' ) : with fpath . open ( 'r' ) as file_obj : data = parser . read_file ( file_obj , ** kwargs ) elif hasattr ( fpath , 'readline' ) : data = parser . read_file ( fpath , ** kwargs ) return data raise ValueError ( '{} does not match any regex' . format ( fname ) )
def extract ( self , url = None , raw_html = None ) : """Main method to extract an article object from a URL , pass in a url and get back a Article"""
cc = CrawlCandidate ( self . config , url , raw_html ) return self . crawl ( cc )
def get_ip_scope ( auth , url , scopeid = None , ) : """function requires no inputs and returns all IP address scopes currently configured on the HPE IMC server . If the optional scopeid parameter is included , this will automatically return only the desired scope id . : param scopeid : integer of the desired scope id ( optional ) : param auth : requests auth object # usually auth . creds from auth pyhpeimc . auth . class : param url : base url of IMC RS interface # usually auth . url from pyhpeimc . auth . authclass : return : list of dictionary objects where each element of the list represents one IP scope : rtype : list > > > from pyhpeimc . auth import * > > > from pyhpeimc . plat . termaccess import * > > > auth = IMCAuth ( " http : / / " , " 10.101.0.203 " , " 8080 " , " admin " , " admin " ) > > > ip _ scope _ list = get _ ip _ scope ( auth . creds , auth . url ) > > > assert type ( ip _ scope _ list ) is list > > > assert ' ip ' in ip _ scope _ list [ 0]"""
if scopeid is None : get_ip_scope_url = "/imcrs/res/access/assignedIpScope" else : get_ip_scope_url = "/imcrs/res/access/assignedIpScope/ip?ipScopeId=" + str ( scopeid ) f_url = url + get_ip_scope_url response = requests . get ( f_url , auth = auth , headers = HEADERS ) try : if response . status_code == 200 : ipscopelist = ( json . loads ( response . text ) ) [ 'assignedIpScope' ] if isinstance ( ipscopelist , list ) : return ipscopelist elif isinstance ( ipscopelist , dict ) : return [ ipscopelist ] except requests . exceptions . RequestException as error : return "Error:\n" + str ( error ) + " get_ip_scope: An Error has occured"
def _parse_contract_headers ( self , table ) : """Parse the years on the contract . The years are listed as the headers on the contract . The first header contains ' Team ' which specifies the player ' s current team and should not be included in the years . Parameters table : PyQuery object A PyQuery object containing the contract table . Returns list Returns a list where each element is a string denoting the season , such as ' 2017-18 ' ."""
years = [ i . text ( ) for i in table ( 'th' ) . items ( ) ] years . remove ( 'Team' ) return years
def result_report_overall ( self ) : """Report overall results Returns str result report in string format"""
results = self . results_overall_metrics ( ) output = self . ui . section_header ( 'Overall metrics (micro-average)' , indent = 2 ) + '\n' if results [ 'f_measure' ] : output += self . ui . line ( 'F-measure' , indent = 2 ) + '\n' output += self . ui . data ( field = 'F-measure (F1)' , value = float ( results [ 'f_measure' ] [ 'f_measure' ] ) * 100 , unit = '%' , indent = 4 ) + '\n' output += self . ui . data ( field = 'Precision' , value = float ( results [ 'f_measure' ] [ 'precision' ] ) * 100 , unit = '%' , indent = 4 ) + '\n' output += self . ui . data ( field = 'Recall' , value = float ( results [ 'f_measure' ] [ 'recall' ] ) * 100 , unit = '%' , indent = 4 ) + '\n' if results [ 'error_rate' ] : output += self . ui . line ( 'Error rate' , indent = 2 ) + '\n' output += self . ui . data ( field = 'Error rate (ER)' , value = float ( results [ 'error_rate' ] [ 'error_rate' ] ) , indent = 4 ) + '\n' output += self . ui . data ( field = 'Substitution rate' , value = float ( results [ 'error_rate' ] [ 'substitution_rate' ] ) , indent = 4 ) + '\n' output += self . ui . data ( field = 'Deletion rate' , value = float ( results [ 'error_rate' ] [ 'deletion_rate' ] ) , indent = 4 ) + '\n' output += self . ui . data ( field = 'Insertion rate' , value = float ( results [ 'error_rate' ] [ 'insertion_rate' ] ) , indent = 4 ) + '\n' if results [ 'accuracy' ] : output += self . ui . line ( 'Accuracy' , indent = 2 ) + '\n' output += self . ui . data ( field = 'Sensitivity' , value = float ( results [ 'accuracy' ] [ 'sensitivity' ] * 100 ) , unit = '%' , indent = 4 ) + '\n' output += self . ui . data ( field = 'Specificity' , value = float ( results [ 'accuracy' ] [ 'specificity' ] * 100 ) , unit = '%' , indent = 4 ) + '\n' output += self . ui . data ( field = 'Balanced accuracy' , value = float ( results [ 'accuracy' ] [ 'balanced_accuracy' ] * 100 ) , unit = '%' , indent = 4 ) + '\n' output += self . ui . data ( field = 'Accuracy' , value = float ( results [ 'accuracy' ] [ 'accuracy' ] * 100 ) , unit = '%' , indent = 4 ) + '\n' return output
def _tls_P_hash ( secret , seed , req_len , hm ) : """Provides the implementation of P _ hash function defined in section 5 of RFC 4346 ( and section 5 of RFC 5246 ) . Two parameters have been added ( hm and req _ len ) : - secret : the key to be used . If RFC 4868 is to be believed , the length must match hm . key _ len . Actually , python hmac takes care of formatting every key . - seed : the seed to be used . - req _ len : the length of data to be generated by iterating the specific HMAC function ( hm ) . This prevents multiple calls to the function . - hm : the hmac function class to use for iteration ( either Hmac _ MD5 or Hmac _ SHA1 in TLS < = 1.1 or Hmac _ SHA256 or Hmac _ SHA384 in TLS 1.2)"""
hash_len = hm . hash_alg . hash_len n = ( req_len + hash_len - 1 ) // hash_len seed = bytes_encode ( seed ) res = b"" a = hm ( secret ) . digest ( seed ) # A ( 1) while n > 0 : res += hm ( secret ) . digest ( a + seed ) a = hm ( secret ) . digest ( a ) n -= 1 return res [ : req_len ]
def Security_setOverrideCertificateErrors ( self , override ) : """Function path : Security . setOverrideCertificateErrors Domain : Security Method name : setOverrideCertificateErrors Parameters : Required arguments : ' override ' ( type : boolean ) - > If true , certificate errors will be overridden . No return value . Description : Enable / disable overriding certificate errors . If enabled , all certificate error events need to be handled by the DevTools client and should be answered with handleCertificateError commands ."""
assert isinstance ( override , ( bool , ) ) , "Argument 'override' must be of type '['bool']'. Received type: '%s'" % type ( override ) subdom_funcs = self . synchronous_command ( 'Security.setOverrideCertificateErrors' , override = override ) return subdom_funcs
def move_item ( self , item , origin , destination ) : """Moves an item from one cluster to anoter cluster . : param item : the item to be moved . : param origin : the originating cluster . : param destination : the target cluster ."""
if self . equality : item_index = 0 for i , element in enumerate ( origin ) : if self . equality ( element , item ) : item_index = i break else : item_index = origin . index ( item ) destination . append ( origin . pop ( item_index ) )
def load_commands ( cli , manage_dict ) : """Loads the commands defined in manage file"""
namespaced = manage_dict . get ( 'namespaced' ) # get click commands commands = manage_dict . get ( 'click_commands' , [ ] ) for command_dict in commands : root_module = import_string ( command_dict [ 'module' ] ) group = cli . manage_groups . get ( command_dict . get ( 'group' ) , cli ) if getattr ( root_module , '__path__' , None ) : # This is a package iter_modules = pkgutil . iter_modules ( root_module . __path__ , prefix = root_module . __name__ + '.' ) submodules_names = [ item [ 1 ] for item in iter_modules ] submodules = [ import_string ( name ) for name in submodules_names ] for module in submodules : add_click_commands ( module , group , command_dict , namespaced ) else : # a single file module add_click_commands ( root_module , group , command_dict , namespaced ) # get inline commands commands = manage_dict . get ( 'inline_commands' , [ ] ) for command_dict in commands : name = command_dict [ 'name' ] help_text = command_dict . get ( 'help_text' ) options = command_dict . get ( 'options' , { } ) arguments = command_dict . get ( 'arguments' , { } ) context = command_dict . get ( 'context' , [ ] ) code = command_dict [ 'code' ] group = cli . manage_groups . get ( command_dict . get ( 'group' ) , cli ) group . add_command ( make_command_from_string ( code = code , cmd_context = get_context ( context ) , options = options , arguments = arguments , help_text = help_text ) , name = name ) # get function commands commands = manage_dict . get ( 'function_commands' , [ ] ) for command_dict in commands : name = command_dict [ 'name' ] help_text = command_dict . get ( 'help_text' ) options = command_dict . get ( 'options' , { } ) arguments = command_dict . get ( 'arguments' , { } ) function = import_string ( command_dict [ 'function' ] ) group = cli . manage_groups . get ( command_dict . get ( 'group' ) , cli ) group . add_command ( make_command_from_function ( function = function , options = options , arguments = arguments , help_text = help_text ) , name = name )
def wr_title ( self , worksheet , row_idx = 0 ) : """Write title ( optional ) ."""
if self . vars . title is not None : # Title is one line if isinstance ( self . vars . title , str ) : return self . wr_row_mergeall ( worksheet , self . vars . title , self . fmt_hdr , row_idx ) # Title is multi - line else : ridx = row_idx for title_line in self . vars . title : ridx = self . wr_row_mergeall ( worksheet , title_line , self . fmt_hdr , ridx ) return ridx return row_idx
def compute ( self , inputVector , learn , activeArray ) : """This is the primary public method of the SpatialPooler class . This function takes a input vector and outputs the indices of the active columns . If ' learn ' is set to True , this method also updates the permanences of the columns . @ param inputVector : A numpy array of 0 ' s and 1 ' s that comprises the input to the spatial pooler . The array will be treated as a one dimensional array , therefore the dimensions of the array do not have to match the exact dimensions specified in the class constructor . In fact , even a list would suffice . The number of input bits in the vector must , however , match the number of bits specified by the call to the constructor . Therefore there must be a ' 0 ' or ' 1 ' in the array for every input bit . @ param learn : A boolean value indicating whether learning should be performed . Learning entails updating the permanence values of the synapses , and hence modifying the ' state ' of the model . Setting learning to ' off ' freezes the SP and has many uses . For example , you might want to feed in various inputs and examine the resulting SDR ' s . @ param activeArray : An array whose size is equal to the number of columns . Before the function returns this array will be populated with 1 ' s at the indices of the active columns , and 0 ' s everywhere else ."""
if not isinstance ( inputVector , numpy . ndarray ) : raise TypeError ( "Input vector must be a numpy array, not %s" % str ( type ( inputVector ) ) ) if inputVector . size != self . _numInputs : raise ValueError ( "Input vector dimensions don't match. Expecting %s but got %s" % ( inputVector . size , self . _numInputs ) ) self . _updateBookeepingVars ( learn ) inputVector = numpy . array ( inputVector , dtype = realDType ) inputVector . reshape ( - 1 ) self . _overlaps = self . _calculateOverlap ( inputVector ) # self . _ overlaps [ self . deadCols ] = 0 # Apply boosting when learning is on if learn : self . _boostedOverlaps = self . _boostFactors * self . _overlaps else : self . _boostedOverlaps = self . _overlaps # Apply inhibition to determine the winning columns activeColumns = self . _inhibitColumns ( self . _boostedOverlaps ) if learn : self . _adaptSynapses ( inputVector , activeColumns ) self . _updateDutyCycles ( self . _overlaps , activeColumns ) self . _bumpUpWeakColumns ( ) self . _updateTargetActivityDensity ( ) self . _updateBoostFactors ( ) if self . _isUpdateRound ( ) : self . _updateInhibitionRadius ( ) self . _updateMinDutyCycles ( ) # self . growRandomSynapses ( ) activeArray . fill ( 0 ) activeArray [ activeColumns ] = 1
def tcp_receive ( self ) : """Receive data from TCP port ."""
data = self . conn . recv ( self . BUFFER_SIZE ) if type ( data ) != str : # Python 3 specific data = data . decode ( "utf-8" ) return str ( data )
def downloads_per_day ( self ) : """Return the number of downloads per day , averaged over the past 7 days of data . : return : average number of downloads per day : rtype : int"""
count , num_days = self . _downloads_for_num_days ( 7 ) res = ceil ( count / num_days ) logger . debug ( "Downloads per day = (%d / %d) = %d" , count , num_days , res ) return res
def ossos_release_parser ( table = False , data_release = parameters . RELEASE_VERSION ) : """extra fun as this is space - separated so using CSV parsers is not an option"""
names = [ 'cl' , 'p' , 'j' , 'k' , 'sh' , 'object' , 'mag' , 'e_mag' , 'Filt' , 'Hsur' , 'dist' , 'e_dist' , 'Nobs' , 'time' , 'av_xres' , 'av_yres' , 'max_x' , 'max_y' , 'a' , 'e_a' , 'e' , 'e_e' , 'i' , 'e_i' , 'Omega' , 'e_Omega' , 'omega' , 'e_omega' , 'tperi' , 'e_tperi' , 'RAdeg' , 'DEdeg' , 'JD' , 'rate' ] # , ' eff ' , ' m _ lim ' ] if table : retval = Table . read ( parameters . RELEASE_DETECTIONS [ data_release ] , format = 'ascii' , guess = False , delimiter = ' ' , data_start = 0 , comment = '#' , names = names , header_start = None ) else : retval = [ ] with open ( data_release , 'r' ) as detectionsfile : for line in detectionsfile . readlines ( ) [ 1 : ] : # first line is column definitions obj = TNO . from_string ( line , version = parameters . RELEASE_DETECTIONS [ data_release ] ) retval . append ( obj ) return retval
def update ( self , attributes = values . unset , assignment_status = values . unset , reason = values . unset , priority = values . unset , task_channel = values . unset ) : """Update the TaskInstance : param unicode attributes : The user - defined JSON data describing the custom attributes of this task . : param TaskInstance . Status assignment _ status : A ' pending ' or ' reserved ' Task may be canceled by posting AssignmentStatus = ' canceled ' . : param unicode reason : This is only required if the Task is canceled or completed . : param unicode priority : Override priority for the Task . : param unicode task _ channel : The task _ channel : returns : Updated TaskInstance : rtype : twilio . rest . taskrouter . v1 . workspace . task . TaskInstance"""
data = values . of ( { 'Attributes' : attributes , 'AssignmentStatus' : assignment_status , 'Reason' : reason , 'Priority' : priority , 'TaskChannel' : task_channel , } ) payload = self . _version . update ( 'POST' , self . _uri , data = data , ) return TaskInstance ( self . _version , payload , workspace_sid = self . _solution [ 'workspace_sid' ] , sid = self . _solution [ 'sid' ] , )
def action ( forwards = None , context_class = None ) : """Decorator to build functions . This decorator can be applied to a function to build actions . The decorated function becomes the ` ` forwards ` ` implementation of the action . The first argument of the ` ` forwards ` ` implementation is a context object that can be used to share state between the forwards and backwards implementations . This argument is passed implicitly by ` ` reversible ` ` and callers of the function shouldn ' t provide it . . . code - block : : python @ reversible . action def create _ order ( context , order _ details ) : order _ id = OrderStore . put ( order _ details ) context [ ' order _ id ' ] = order _ id return order _ id The ` ` . backwards ` ` attribute of the decorated function can itself be used as a decorator to specify the ` ` backwards ` ` implementation of the action . . . code - block : : python @ create _ order . backwards def delete _ order ( context , order _ details ) : if ' order _ id ' in context : # order _ id will be absent if create _ order failed OrderStore . delete ( context [ ' order _ id ' ] ) # Note that the context argument was not provided here . It ' s added # implicitly by the library . action = create _ order ( order _ details ) order _ id = reversible . execute ( action ) Both , the ` ` forwards ` ` and ` ` backwards ` ` implementations will be called with the same arguments . Any information that needs to be sent from ` ` forwards ` ` to ` ` backwards ` ` must be added to the context object . The context object defaults to a dictionary . An alternative context constructor may be provided using the ` ` context _ class ` ` argument . It will be called with no arguments to construct the context object . . . code - block : : python @ reversible . action ( context _ class = UserInfo ) def create _ user ( user _ info , user _ details ) : user _ info . user _ id = UserStore . put ( user _ details ) return user _ info Note that a backwards action is required . Attempts to use the action without specifying a way to roll back will fail . : param forwards : The function will be treated as the ` ` forwards ` ` implementation . : param context _ class : Constructor for context objects . A single action call will have its own context object and that object will be implictly passed as the first argument to both , the ` ` forwards ` ` and the ` ` backwards ` ` implementations . : returns : If ` ` forwards ` ` was given , a partially constructed action is returned . The ` ` backwards ` ` method on that object can be used as a decorator to specify the rollback method for the action . If ` ` forwards ` ` was omitted , a decorator that accepts the ` ` forwards ` ` method is returned ."""
context_class = context_class or dict def decorator ( _forwards ) : return ActionBuilder ( _forwards , context_class ) if forwards is not None : return decorator ( forwards ) else : return decorator
def _build_ssh_config ( self , config ) : """build the ssh injection configuration"""
ssh_config_injection = ssh_config_template % { 'host' : config . get ( 'host' ) , 'hostname' : config . get ( 'hostname' ) , 'ssh_key_path' : config . get ( 'ssh_key_path' ) , 'user' : config . get ( 'user' ) } if config . has ( 'port' ) : ssh_config_injection += " Port {0}\n" . format ( config . get ( 'port' ) ) return ssh_config_injection
def prettyprint ( self ) : """Return hypercat formatted prettily"""
return json . dumps ( self . asJSON ( ) , sort_keys = True , indent = 4 , separators = ( ',' , ': ' ) )
def getActiveCompactions ( self , login , tserver ) : """Parameters : - login - tserver"""
self . send_getActiveCompactions ( login , tserver ) return self . recv_getActiveCompactions ( )
def list_children ( self , ** kwargs ) : # type : ( str ) - > Generator '''Generate a list of all of the file / directory objects in the specified location on the ISO . Parameters : iso _ path - The absolute path on the ISO to list the children for . rr _ path - The absolute Rock Ridge path on the ISO to list the children for . joliet _ path - The absolute Joliet path on the ISO to list the children for . udf _ path - The absolute UDF path on the ISO to list the children for . Yields : Children of this path . Returns : Nothing .'''
if not self . _initialized : raise pycdlibexception . PyCdlibInvalidInput ( 'This object is not yet initialized; call either open() or new() to create an ISO' ) num_paths = 0 for key in kwargs : if key in [ 'joliet_path' , 'rr_path' , 'iso_path' , 'udf_path' ] : if kwargs [ key ] is not None : num_paths += 1 else : raise pycdlibexception . PyCdlibInvalidInput ( "Invalid keyword, must be one of 'iso_path', 'rr_path', 'joliet_path', or 'udf_path'" ) if num_paths != 1 : raise pycdlibexception . PyCdlibInvalidInput ( "Must specify one, and only one of 'iso_path', 'rr_path', 'joliet_path', or 'udf_path'" ) if 'udf_path' in kwargs : udf_rec = self . _get_udf_entry ( kwargs [ 'udf_path' ] ) if not udf_rec . is_dir ( ) : raise pycdlibexception . PyCdlibInvalidInput ( 'UDF File Entry is not a directory!' ) for fi_desc in udf_rec . fi_descs : yield fi_desc . file_entry else : if 'joliet_path' in kwargs : rec = self . _get_entry ( None , None , self . _normalize_joliet_path ( kwargs [ 'joliet_path' ] ) ) elif 'rr_path' in kwargs : rec = self . _get_entry ( None , utils . normpath ( kwargs [ 'rr_path' ] ) , None ) else : rec = self . _get_entry ( utils . normpath ( kwargs [ 'iso_path' ] ) , None , None ) for c in _yield_children ( rec ) : yield c
def target_key ( self , key ) : """Temporarily retarget the client for one call to route specifically to the one host that the given key routes to . In that case the result on the promise is just the one host ' s value instead of a dictionary . . . versionadded : : 1.3"""
router = self . connection_pool . cluster . get_router ( ) host_id = router . get_host_for_key ( key ) rv = self . target ( [ host_id ] ) rv . __resolve_singular_result = True return rv
def run ( self ) : """The main run loop for the sender thread ."""
log . debug ( "Starting Kafka producer I/O thread." ) # main loop , runs until close is called while self . _running : try : self . run_once ( ) except Exception : log . exception ( "Uncaught error in kafka producer I/O thread" ) log . debug ( "Beginning shutdown of Kafka producer I/O thread, sending" " remaining records." ) # okay we stopped accepting requests but there may still be # requests in the accumulator or waiting for acknowledgment , # wait until these are completed . while ( not self . _force_close and ( self . _accumulator . has_unsent ( ) or self . _client . in_flight_request_count ( ) > 0 ) ) : try : self . run_once ( ) except Exception : log . exception ( "Uncaught error in kafka producer I/O thread" ) if self . _force_close : # We need to fail all the incomplete batches and wake up the # threads waiting on the futures . self . _accumulator . abort_incomplete_batches ( ) try : self . _client . close ( ) except Exception : log . exception ( "Failed to close network client" ) log . debug ( "Shutdown of Kafka producer I/O thread has completed." )
def discover_master ( self , service_name ) : """Asks sentinel servers for the Redis master ' s address corresponding to the service labeled ` ` service _ name ` ` . Returns a pair ( address , port ) or raises MasterNotFoundError if no master is found ."""
for sentinel_no , sentinel in enumerate ( self . sentinels ) : try : masters = sentinel . sentinel_masters ( ) except ( ConnectionError , TimeoutError ) : continue state = masters . get ( service_name ) if state and self . check_master_state ( state , service_name ) : # Put this sentinel at the top of the list self . sentinels [ 0 ] , self . sentinels [ sentinel_no ] = ( sentinel , self . sentinels [ 0 ] ) return state [ 'ip' ] , state [ 'port' ] raise MasterNotFoundError ( "No master found for %r" % ( service_name , ) )
def create_user ( self , Name , EmailAddress , ** kwargs ) : """Create user ( undocumented API feature ) . : param Name : User name ( login for privileged , required ) : param EmailAddress : Email address ( required ) : param kwargs : Optional fields to set ( see edit _ user ) : returns : ID of new user or False when create fails : raises BadRequest : When user already exists : raises InvalidUse : When invalid fields are set"""
return self . edit_user ( 'new' , Name = Name , EmailAddress = EmailAddress , ** kwargs )
def update ( self , ** args ) : """Update this project version from the server . It is prior used to archive versions ."""
data = { } for field in args : data [ field ] = args [ field ] super ( Version , self ) . update ( ** data )
def accept ( self ) : """Server - side UDP connection establishment This method returns a server - side SSLConnection object , connected to that peer most recently returned from the listen method and not yet connected . If there is no such peer , then the listen method is invoked . Return value : SSLConnection connected to a new peer , None if packet forwarding only to an existing peer occurred ."""
if not self . _pending_peer_address : if not self . listen ( ) : _logger . debug ( "Accept returning without connection" ) return new_conn = SSLConnection ( self , self . _keyfile , self . _certfile , True , self . _cert_reqs , self . _ssl_version , self . _ca_certs , self . _do_handshake_on_connect , self . _suppress_ragged_eofs , self . _ciphers , cb_user_config_ssl_ctx = self . _user_config_ssl_ctx , cb_user_config_ssl = self . _user_config_ssl ) new_peer = self . _pending_peer_address self . _pending_peer_address = None if self . _do_handshake_on_connect : # Note that since that connection ' s socket was just created in its # constructor , the following operation must be blocking ; hence # handshake - on - connect can only be used with a routing demux if # listen is serviced by a separate application thread , or else we # will hang in this call new_conn . do_handshake ( ) _logger . debug ( "Accept returning new connection for new peer" ) return new_conn , new_peer
def export_data ( self , directory , filename , with_md5_hash = False ) : """Save model data in a JSON file . Parameters : param directory : string The directory . : param filename : string The filename . : param with _ md5 _ hash : bool Whether to append the checksum to the filename or not ."""
model_data = { 'vectors' : self . estimator . support_vectors_ . tolist ( ) , 'coefficients' : self . estimator . dual_coef_ . tolist ( ) , 'intercepts' : self . estimator . _intercept_ . tolist ( ) , 'weights' : self . estimator . n_support_ . tolist ( ) , 'kernel' : self . kernel , 'gamma' : float ( self . gamma ) , 'coef0' : float ( self . coef0 ) , 'degree' : float ( self . degree ) , 'nClasses' : int ( self . n_classes ) , 'nRows' : int ( self . n_svs_rows ) } encoder . FLOAT_REPR = lambda o : self . repr ( o ) json_data = dumps ( model_data , sort_keys = True ) if with_md5_hash : import hashlib json_hash = hashlib . md5 ( json_data ) . hexdigest ( ) filename = filename . split ( '.json' ) [ 0 ] + '_' + json_hash + '.json' path = os . path . join ( directory , filename ) with open ( path , 'w' ) as fp : fp . write ( json_data )
def reload_including_local ( module ) : """Reload a module . If it isn " t found , try to include the local service directory . This must be called from a thread that has acquired the import lock . : param module : the module to reload ."""
try : reload ( module ) except ImportError : # This can happen if the module was loaded in the immediate script # directory . Add the service path and try again . if not hasattr ( cherrypy . thread_data , "modulepath" ) : raise path = os . path . abspath ( cherrypy . thread_data . modulepath ) root = os . path . abspath ( cherrypy . config . get ( "webroot" ) ) if path not in sys . path and ( path == root or path . startswith ( root + os . path . sep ) ) : oldpath = sys . path try : sys . path = [ path ] + sys . path reload ( module ) finally : sys . path = oldpath else : raise
def get_jids ( ) : '''List all the jobs that we have . .'''
options = _get_options ( ret = None ) _response = _request ( "GET" , options [ 'url' ] + options [ 'db' ] + "/_all_docs?include_docs=true" ) # Make sure the ' total _ rows ' is returned . . if not error out . if 'total_rows' not in _response : log . error ( 'Didn\'t get valid response from requesting all docs: %s' , _response ) return { } # Return the rows . ret = { } for row in _response [ 'rows' ] : # Because this shows all the documents in the database , including the # design documents , verify the id is salt jid jid = row [ 'id' ] if not salt . utils . jid . is_jid ( jid ) : continue ret [ jid ] = salt . utils . jid . format_jid_instance ( jid , row [ 'doc' ] ) return ret
def _pattern_match ( self , item , pattern ) : """Determine whether the item supplied is matched by the pattern ."""
if pattern . endswith ( '*' ) : return item . startswith ( pattern [ : - 1 ] ) else : return item == pattern
def remove_decorators ( src ) : """Remove decorators from the source code"""
src = src . strip ( ) src_lines = src . splitlines ( ) multi_line = False n_deleted = 0 for n in range ( len ( src_lines ) ) : line = src_lines [ n - n_deleted ] . strip ( ) if ( line . startswith ( '@' ) and 'Benchmark' in line ) or multi_line : del src_lines [ n - n_deleted ] n_deleted += 1 if line . endswith ( ')' ) : multi_line = False else : multi_line = True setup_src = '\n' . join ( src_lines ) return setup_src
def send_msg ( self , connection , data ) : """Function to send messages Parameters connection : socket or connection data : data that can be serialized to json"""
# serialize as JSON msg = json . dumps ( data ) # Prefix each message with a 4 - byte length ( network byte order ) msg = struct . pack ( '>I' , len ( msg ) ) . decode ( ) + msg connection . sendall ( msg . encode ( ) ) return
def list_network_ip_availabilities ( self , retrieve_all = True , ** _params ) : """Fetches IP availibility information for all networks"""
return self . list ( 'network_ip_availabilities' , self . network_ip_availabilities_path , retrieve_all , ** _params )
def authenticateRequest ( self , service_request , username , password , ** kwargs ) : """Processes an authentication request . If no authenticator is supplied , then authentication succeeds . @ return : C { Deferred } . @ rtype : C { twisted . internet . defer . Deferred }"""
authenticator = self . getAuthenticator ( service_request ) if self . logger : self . logger . debug ( 'Authenticator expands to: %r' % authenticator ) if authenticator is None : return defer . succeed ( True ) args = ( username , password ) if hasattr ( authenticator , '_pyamf_expose_request' ) : http_request = kwargs . get ( 'http_request' , None ) args = ( http_request , ) + args return defer . maybeDeferred ( authenticator , * args )
def set_printoptions ( ** kwargs ) : """Set printing options . These options determine the way JPEG 2000 boxes are displayed . Parameters short : bool , optional When True , only the box ID , offset , and length are displayed . Useful for displaying only the basic structure or skeleton of a JPEG 2000 file . xml : bool , optional When False , printing of the XML contents of any XML boxes or UUID XMP boxes is suppressed . codestream : bool , optional When False , the codestream segments are not printed . Otherwise the segments are printed depending on how set _ parseoptions has been used . See also get _ printoptions Examples To put back the default options , you can use : > > > import glymur > > > glymur . set _ printoptions ( short = False , xml = True , codestream = True )"""
warnings . warn ( 'Use set_option instead of set_printoptions.' , DeprecationWarning ) for key , value in kwargs . items ( ) : if key not in [ 'short' , 'xml' , 'codestream' ] : raise KeyError ( '"{0}" not a valid keyword parameter.' . format ( key ) ) set_option ( 'print.' + key , value )
def get_type_string ( item ) : """Return type string of an object ."""
if isinstance ( item , DataFrame ) : return "DataFrame" if isinstance ( item , Index ) : return type ( item ) . __name__ if isinstance ( item , Series ) : return "Series" found = re . findall ( r"<(?:type|class) '(\S*)'>" , to_text_string ( type ( item ) ) ) if found : return found [ 0 ]
def decode ( self , hashid ) : """Restore a tuple of numbers from the passed ` hashid ` . : param hashid The hashid to decode > > > hashids = Hashids ( ' arbitrary salt ' , 16 , ' abcdefghijkl0123456 ' ) > > > hashids . decode ( ' 1d6216i30h53elk3 ' ) (1 , 23 , 456)"""
if not hashid or not _is_str ( hashid ) : return ( ) try : numbers = tuple ( _decode ( hashid , self . _salt , self . _alphabet , self . _separators , self . _guards ) ) return numbers if hashid == self . encode ( * numbers ) else ( ) except ValueError : return ( )
def remove_query_parameters ( request , query_parameters_to_remove ) : """Wrap replace _ query _ parameters ( ) for API backward compatibility ."""
replacements = [ ( k , None ) for k in query_parameters_to_remove ] return replace_query_parameters ( request , replacements )
def alterar ( self , id_brand , name ) : """Change Brand from by the identifier . : param id _ brand : Identifier of the Brand . Integer value and greater than zero . : param name : Brand name . String with a minimum 3 and maximum of 100 characters : return : None : raise InvalidParameterError : The identifier of Brand or name is null and invalid . : raise NomeMarcaDuplicadoError : There is already a registered Brand with the value of name . : raise MarcaNaoExisteError : Brand not registered . : raise DataBaseError : Networkapi failed to access the database . : raise XMLError : Networkapi failed to generate the XML response ."""
if not is_valid_int_param ( id_brand ) : raise InvalidParameterError ( u'The identifier of Brand is invalid or was not informed.' ) url = 'brand/' + str ( id_brand ) + '/' brand_map = dict ( ) brand_map [ 'name' ] = name code , xml = self . submit ( { 'brand' : brand_map } , 'PUT' , url ) return self . response ( code , xml )
def _QName ( self , typ , defNS ) : """Get fully qualified wsdl name ( prefix : name )"""
attr = '' ns , name = GetQualifiedWsdlName ( typ ) if ns == defNS : prefix = '' else : try : prefix = self . nsMap [ ns ] except KeyError : # We have not seen this ns before prefix = ns . split ( ':' , 1 ) [ - 1 ] attr = ' xmlns:{0}="{1}"' . format ( prefix , ns ) return attr , prefix and prefix + ':' + name or name
def parse_buffer_to_jpeg ( data ) : """Parse JPEG file bytes to Pillow Image"""
return [ Image . open ( BytesIO ( image_data + b'\xff\xd9' ) ) for image_data in data . split ( b'\xff\xd9' ) [ : - 1 ] # Last element is obviously empty ]
def log_with_color ( level ) : """log with color by different level"""
def wrapper ( text ) : color = log_colors_config [ level . upper ( ) ] getattr ( logger , level . lower ( ) ) ( coloring ( text , color ) ) return wrapper
def Wp ( self ) : """Total energy in protons above 1.22 GeV threshold ( erg ) ."""
from scipy . integrate import quad Eth = 1.22e-3 with warnings . catch_warnings ( ) : warnings . simplefilter ( "ignore" ) Wp = quad ( lambda x : x * self . _particle_distribution ( x ) , Eth , np . Inf ) [ 0 ] return ( Wp * u . TeV ) . to ( "erg" )
def pop_frame ( self ) : """Remove and return the frame at the top of the stack . : returns : The top frame : rtype : Frame : raises Exception : If there are no frames on the stack"""
self . frames . pop ( 0 ) if len ( self . frames ) == 0 : raise Exception ( "stack is exhausted" ) return self . frames [ 0 ]
def register_image ( self , name = None , description = None , image_location = None , architecture = None , kernel_id = None , ramdisk_id = None , root_device_name = None , block_device_map = None ) : """Register an image . : type name : string : param name : The name of the AMI . Valid only for EBS - based images . : type description : string : param description : The description of the AMI . : type image _ location : string : param image _ location : Full path to your AMI manifest in Amazon S3 storage . Only used for S3 - based AMI ' s . : type architecture : string : param architecture : The architecture of the AMI . Valid choices are : i386 | x86_64 : type kernel _ id : string : param kernel _ id : The ID of the kernel with which to launch the instances : type root _ device _ name : string : param root _ device _ name : The root device name ( e . g . / dev / sdh ) : type block _ device _ map : : class : ` boto . ec2 . blockdevicemapping . BlockDeviceMapping ` : param block _ device _ map : A BlockDeviceMapping data structure describing the EBS volumes associated with the Image . : rtype : string : return : The new image id"""
params = { } if name : params [ 'Name' ] = name if description : params [ 'Description' ] = description if architecture : params [ 'Architecture' ] = architecture if kernel_id : params [ 'KernelId' ] = kernel_id if ramdisk_id : params [ 'RamdiskId' ] = ramdisk_id if image_location : params [ 'ImageLocation' ] = image_location if root_device_name : params [ 'RootDeviceName' ] = root_device_name if block_device_map : block_device_map . build_list_params ( params ) rs = self . get_object ( 'RegisterImage' , params , ResultSet , verb = 'POST' ) image_id = getattr ( rs , 'imageId' , None ) return image_id
def listen_tta ( self , target , timeout ) : """Listen * timeout * seconds for a Type A activation at 106 kbps . The ` ` sens _ res ` ` , ` ` sdd _ res ` ` , and ` ` sel _ res ` ` response data must be provided and ` ` sdd _ res ` ` must be a 4 byte UID that starts with ` ` 08h ` ` . Depending on ` ` sel _ res ` ` an activation may return a target with a ` ` tt2 _ cmd ` ` , ` ` tt4 _ cmd ` ` or ` ` atr _ req ` ` attribute . The default RATS response sent for a Type 4 Tag activation can be replaced with a ` ` rats _ res ` ` attribute ."""
return super ( Device , self ) . listen_tta ( target , timeout )
def iterate ( maybe_iter , unless = ( string_types , dict ) ) : """Always return an iterable . Returns ` ` maybe _ iter ` ` if it is an iterable , otherwise it returns a single element iterable containing ` ` maybe _ iter ` ` . By default , strings and dicts are treated as non - iterable . This can be overridden by passing in a type or tuple of types for ` ` unless ` ` . : param maybe _ iter : A value to return as an iterable . : param unless : A type or tuple of types ( same as ` ` isinstance ` ` ) to be treated as non - iterable . Example : : > > > iterate ( ' foo ' ) [ ' foo ' ] > > > iterate ( [ ' foo ' ] ) [ ' foo ' ] > > > iterate ( [ ' foo ' ] , unless = list ) [ [ ' foo ' ] ] > > > list ( iterate ( xrange ( 5 ) ) ) [0 , 1 , 2 , 3 , 4]"""
if is_iterable ( maybe_iter , unless = unless ) : return maybe_iter return [ maybe_iter ]
def run ( self ) : # No " _ " in the name , but nevertheless , running in the backed """After the fork . Now the process starts running"""
self . preRun_ ( ) self . running = True while ( self . running ) : if ( self . active ) : # activated : shared mem client has been reserved self . cycle_ ( ) else : if ( self . verbose ) : print ( self . pre , "sleep" ) time . sleep ( 0.2 ) self . handleSignal_ ( ) self . postRun_ ( )
def untldict_normalizer ( untl_dict , normalizations ) : """Normalize UNTL elements by their qualifier . Takes a UNTL descriptive metadata dictionary and a dictionary of the elements and the qualifiers for normalization : { ' element1 ' : [ ' qualifier1 ' , ' qualifier2 ' ] , ' element2 ' : [ ' qualifier3 ' ] } and normalizes the elements with that qualifier ."""
# Loop through the element types in the UNTL metadata . for element_type , element_list in untl_dict . items ( ) : # A normalization is required for that element type . if element_type in normalizations : # Get the required normalizations for specific qualifiers list . norm_qualifier_list = normalizations . get ( element_type ) # Loop through the element lists within that element type . for element in element_list : # Determine if the qualifier requires normalization . qualifier = element . get ( 'qualifier' , None ) if qualifier in norm_qualifier_list : content = element . get ( 'content' , None ) # Determine if there is normalizing for the element . if element_type in ELEMENT_NORMALIZERS : elem_norms = ELEMENT_NORMALIZERS . get ( element_type , None ) # If the qualified element requires a # normalization and has content , replace the # content with the normalized . if qualifier in elem_norms : if content and content != '' : element [ 'content' ] = elem_norms [ qualifier ] ( content ) return untl_dict
def _drop_privs ( self ) : """Reduces effective privileges for this process to that of the task owner . The umask and environment variables are also modified to recreate the environment of the user ."""
uid = self . _task . owner # get pwd database info for task owner try : pwd_info = pwd . getpwuid ( uid ) except OSError : pwd_info = None # set secondary group ids for user , must come first if pwd_info : try : gids = [ g . gr_gid for g in grp . getgrall ( ) if pwd_info . pw_name in g . gr_mem ] gids . append ( pwd_info . pw_gid ) os . setgroups ( gids ) except OSError : pass # set group id , must come before uid try : os . setgid ( pwd_info . pw_gid ) except OSError : pass # set user id try : os . setuid ( uid ) # update user env variables if pwd_info : for k in ( 'USER' , 'USERNAME' , 'SHELL' , 'HOME' ) : if k in os . environ : if k in ( 'USER' , 'USERNAME' ) : val = pwd_info . pw_name elif k == 'SHELL' : val = pwd_info . pw_shell elif k == 'HOME' : val = pwd_info . pw_dir # update value os . environ [ k ] = val # remove unneeded env variables keys = [ ] for k , _ in os . environ . iteritems ( ) : if k . startswith ( 'SUDO_' ) or k == 'LOGNAME' : keys . append ( k ) for k in keys : del os . environ [ k ] except OSError : pass # set default umask try : os . umask ( 022 ) except OSError : pass
def max_entropy_distribution ( node_indices , number_of_nodes ) : """Return the maximum entropy distribution over a set of nodes . This is different from the network ' s uniform distribution because nodes outside ` ` node _ indices ` ` are fixed and treated as if they have only 1 state . Args : node _ indices ( tuple [ int ] ) : The set of node indices over which to take the distribution . number _ of _ nodes ( int ) : The total number of nodes in the network . Returns : np . ndarray : The maximum entropy distribution over the set of nodes ."""
distribution = np . ones ( repertoire_shape ( node_indices , number_of_nodes ) ) return distribution / distribution . size
def set_blend_func ( self , srgb = 'one' , drgb = 'zero' , salpha = None , dalpha = None ) : """Specify pixel arithmetic for RGB and alpha Parameters srgb : str Source RGB factor . drgb : str Destination RGB factor . salpha : str | None Source alpha factor . If None , ` ` srgb ` ` is used . dalpha : str Destination alpha factor . If None , ` ` drgb ` ` is used ."""
salpha = srgb if salpha is None else salpha dalpha = drgb if dalpha is None else dalpha self . glir . command ( 'FUNC' , 'glBlendFuncSeparate' , srgb , drgb , salpha , dalpha )
def update_metadata ( self , resource , keys_vals ) : """Updates key - value pairs with the given resource . Will attempt to update all key - value pairs even if some fail . Keys must already exist . Args : resource ( intern . resource . boss . BossResource ) keys _ vals ( dictionary ) : Collection of key - value pairs to update on the given resource . Raises : HTTPErrorList on failure ."""
self . metadata_service . set_auth ( self . _token_metadata ) self . metadata_service . update ( resource , keys_vals )
def main ( ) : """NAME core _ depthplot . py DESCRIPTION plots various measurements versus core _ depth or age . plots data flagged as ' FS - SS - C ' as discrete samples . SYNTAX core _ depthplot . py [ command line options ] # or , for Anaconda users : core _ depthplot _ anaconda [ command line options ] OPTIONS - h prints help message and quits - f FILE : specify input measurments format file - fsum FILE : specify input LIMS database ( IODP ) core summary csv file - fwig FILE : specify input depth , wiggle to plot , in magic format with sample _ core _ depth key for depth - fsa FILE : specify input er _ samples format file from magic for depth - fa FILE : specify input ages format file from magic for age NB : must have either - fsa OR - fa ( not both ) - fsp FILE sym size : specify input zeq _ specimen format file from magic , sym and size NB : PCAs will have specified color , while fisher means will be white with specified color as the edgecolor - fres FILE specify input pmag _ results file from magic , sym and size - LP [ AF , T , ARM , IRM , X ] step [ in mT , C , mT , mT , mass / vol ] to plot - S do not plot blanket treatment data ( if this is set , you don ' t need the - LP ) - sym SYM SIZE , symbol , size for continuous points ( e . g . , ro 5 , bs 10 , g ^ 10 for red dot , blue square , green triangle ) , default is blue dot at 5 pt - D do not plot declination - M do not plot magnetization - log plot magnetization on a log scale - L do not connect dots with a line - I do not plot inclination - d min max [ in m ] depth range to plot - n normalize by weight in er _ specimen table - Iex : plot the expected inc at lat - only available for results with lat info in file - ts TS amin amax : plot the GPTS for the time interval between amin and amax ( numbers in Ma ) TS : [ ck95 , gts04 , gts12] - ds [ mbsf , mcd ] specify depth scale , mbsf default - fmt [ svg , eps , pdf , png ] specify output format for plot ( default : svg ) - sav save plot silently DEFAULTS : Measurements file : measurements . txt Samples file : samples . txt NRM step Summary file : none"""
args = sys . argv if '-h' in args : print ( main . __doc__ ) sys . exit ( ) dataframe = extractor . command_line_dataframe ( [ [ 'f' , False , 'measurements.txt' ] , [ 'fsum' , False , '' ] , [ 'fwig' , False , '' ] , [ 'fsa' , False , '' ] , [ 'fa' , False , '' ] , [ 'fsp' , False , '' ] , [ 'fres' , False , '' ] , [ 'fmt' , False , 'svg' ] , [ 'LP' , False , '' ] , [ 'n' , False , False ] , [ 'd' , False , '-1 -1' ] , [ 'ts' , False , '' ] , [ 'WD' , False , '.' ] , [ 'L' , False , True ] , [ 'S' , False , True ] , [ 'D' , False , True ] , [ 'I' , False , True ] , [ 'M' , False , True ] , [ 'log' , False , 0 ] , [ 'ds' , False , 'sample_core_depth' ] , [ 'sym' , False , 'bo 5' ] , [ 'ID' , False , '.' ] , [ 'sav' , False , False ] , [ 'DM' , False , 3 ] ] ) checked_args = extractor . extract_and_check_args ( args , dataframe ) meas_file , sum_file , wig_file , samp_file , age_file , spc_file , res_file , fmt , meth , norm , depth , timescale , dir_path , pltLine , pltSus , pltDec , pltInc , pltMag , logit , depth_scale , symbol , input_dir , save , data_model_num = extractor . get_vars ( [ 'f' , 'fsum' , 'fwig' , 'fsa' , 'fa' , 'fsp' , 'fres' , 'fmt' , 'LP' , 'n' , 'd' , 'ts' , 'WD' , 'L' , 'S' , 'D' , 'I' , 'M' , 'log' , 'ds' , 'sym' , 'ID' , 'sav' , 'DM' ] , checked_args ) # format some variables # format symbol / size try : sym , size = symbol . split ( ) size = int ( size ) except : print ( 'you should provide -sym in this format: ro 5' ) print ( 'using defaults instead' ) sym , size = 'ro' , 5 # format result file , symbol , size if res_file : try : res_file , res_sym , res_size = res_file . split ( ) except : print ( 'you must provide -fres in this format: -fres filename symbol size' ) print ( 'could not parse {}, defaulting to using no result file' . format ( res_file ) ) res_file , res_sym , res_size = '' , '' , 0 else : res_file , res_sym , res_size = '' , '' , 0 # format specimen file , symbol , size if spc_file : try : spc_file , spc_sym , spc_size = spc_file . split ( ) except : print ( 'you must provide -fsp in this format: -fsp filename symbol size' ) print ( 'could not parse {}, defaulting to using no specimen file' . format ( spc_file ) ) spc_file , spc_sym , spc_size = '' , '' , 0 else : spc_file , spc_sym , spc_size = '' , '' , 0 # format min / max depth try : dmin , dmax = depth . split ( ) except : print ( 'you must provide -d in this format: -d dmin dmax' ) print ( 'could not parse {}, defaulting to plotting all depths' . format ( depth ) ) dmin , dmax = - 1 , - 1 # format timescale , min / max time if timescale : try : timescale , amin , amax = timescale . split ( ) pltTime = True except : print ( 'you must provide -ts in this format: -ts timescale minimum_age maximum_age' ) print ( 'could not parse {}, defaulting to using no timescale' . format ( timescale ) ) timescale , amin , amax = None , - 1 , - 1 pltTime = False else : timescale , amin , amax = None , - 1 , - 1 pltTime = False # format norm and wt _ file if norm and not isinstance ( norm , bool ) : wt_file = norm norm = True else : norm = False wt_file = '' # format list of protcols and step try : method , step = meth . split ( ) except : print ( 'To use the -LP flag you must provide both the protocol and the step in this format:\n-LP [AF,T,ARM,IRM, X] step [in mT,C,mT,mT, mass/vol] to plot' ) print ( 'Defaulting to using no protocol' ) method , step = 'LT-NO' , 0 # list of varnames # [ ' f ' , ' fsum ' , ' fwig ' , ' fsa ' , ' fa ' , ' fsp ' , ' fres ' , ' fmt ' , ' LP ' , ' n ' , ' d ' , ' ts ' , ' WD ' , ' L ' , ' S ' , ' D ' , ' I ' , ' M ' , ' log ' , ' ds ' , ' sym ' ] # meas _ file , sum _ file , wig _ file , samp _ file , age _ file , spc _ file , res _ file , fmt , meth , norm , depth , timescale , dir _ path , pltLine , pltSus , pltDec , pltInc , pltMag , logit , depth _ scale , symbol fig , figname = ipmag . core_depthplot ( input_dir , meas_file , spc_file , samp_file , age_file , sum_file , wt_file , depth_scale , dmin , dmax , sym , size , spc_sym , spc_size , method , step , fmt , pltDec , pltInc , pltMag , pltLine , pltSus , logit , pltTime , timescale , amin , amax , norm , data_model_num ) if not pmagplotlib . isServer : figname = figname . replace ( ':' , '_' ) if fig and save : print ( '-I- Created plot: {}' . format ( figname ) ) plt . savefig ( figname ) return app = wx . App ( redirect = False ) if not fig : pw . simple_warning ( 'No plot was able to be created with the data you provided.\nMake sure you have given all the required information and try again' ) return False dpi = fig . get_dpi ( ) pixel_width = dpi * fig . get_figwidth ( ) pixel_height = dpi * fig . get_figheight ( ) figname = os . path . join ( dir_path , figname ) plot_frame = pmag_menu_dialogs . PlotFrame ( ( int ( pixel_width ) , int ( pixel_height + 50 ) ) , fig , figname , standalone = True ) app . MainLoop ( )
def validate ( cls , value ) : """Raise | ValueError | if * value * is not an assignable value ."""
if value not in cls . _valid_settings : raise ValueError ( "%s not a member of %s enumeration" % ( value , cls . __name__ ) )