signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def print_archive ( self , format = True ) : """Prints out archived WCS keywords ."""
if len ( list ( self . orig_wcs . keys ( ) ) ) > 0 : block = 'Original WCS keywords for ' + self . rootname + '\n' block += ' backed up on ' + repr ( self . orig_wcs [ 'WCSCDATE' ] ) + '\n' if not format : for key in self . wcstrans . keys ( ) : block += key . upper ( ) + " = " + repr ( self . get_archivekw ( key ) ) + '\n' block = 'PA_V3: ' + repr ( self . pa_obs ) + '\n' else : block += 'CD_11 CD_12: ' + repr ( self . get_archivekw ( 'CD1_1' ) ) + ' ' + repr ( self . get_archivekw ( 'CD1_2' ) ) + '\n' block += 'CD_21 CD_22: ' + repr ( self . get_archivekw ( 'CD2_1' ) ) + ' ' + repr ( self . get_archivekw ( 'CD2_2' ) ) + '\n' block += 'CRVAL : ' + repr ( self . get_archivekw ( 'CRVAL1' ) ) + ' ' + repr ( self . get_archivekw ( 'CRVAL2' ) ) + '\n' block += 'CRPIX : ' + repr ( self . get_archivekw ( 'CRPIX1' ) ) + ' ' + repr ( self . get_archivekw ( 'CRPIX2' ) ) + '\n' block += 'NAXIS : ' + repr ( int ( self . get_archivekw ( 'NAXIS1' ) ) ) + ' ' + repr ( int ( self . get_archivekw ( 'NAXIS2' ) ) ) + '\n' block += 'Plate Scale : ' + repr ( self . get_archivekw ( 'pixel scale' ) ) + '\n' block += 'ORIENTAT : ' + repr ( self . get_archivekw ( 'ORIENTAT' ) ) + '\n' print ( block )
def print_dataset_summary ( self ) : """Prints information about the the BIDS data and the files currently selected ."""
print ( '--- DATASET INFORMATION ---' ) print ( '--- Subjects ---' ) if self . raw_data_exists : if self . BIDS . get_subjects ( ) : print ( 'Number of subjects (in dataset): ' + str ( len ( self . BIDS . get_subjects ( ) ) ) ) print ( 'Subjects (in dataset): ' + ', ' . join ( self . BIDS . get_subjects ( ) ) ) else : print ( 'NO SUBJECTS FOUND (is the BIDS directory specified correctly?)' ) print ( 'Number of subjects (selected): ' + str ( len ( self . bids_tags [ 'sub' ] ) ) ) print ( 'Subjects (selected): ' + ', ' . join ( self . bids_tags [ 'sub' ] ) ) if isinstance ( self . bad_subjects , list ) : print ( 'Bad subjects: ' + ', ' . join ( self . bad_subjects ) ) else : print ( 'Bad subjects: 0' ) print ( '--- Tasks ---' ) if self . raw_data_exists : if self . BIDS . get_tasks ( ) : print ( 'Number of tasks (in dataset): ' + str ( len ( self . BIDS . get_tasks ( ) ) ) ) print ( 'Tasks (in dataset): ' + ', ' . join ( self . BIDS . get_tasks ( ) ) ) if 'task' in self . bids_tags : print ( 'Number of tasks (selected): ' + str ( len ( self . bids_tags [ 'task' ] ) ) ) print ( 'Tasks (selected): ' + ', ' . join ( self . bids_tags [ 'task' ] ) ) else : print ( 'No task names found' ) print ( '--- Runs ---' ) if self . raw_data_exists : if self . BIDS . get_runs ( ) : print ( 'Number of runs (in dataset): ' + str ( len ( self . BIDS . get_runs ( ) ) ) ) print ( 'Runs (in dataset): ' + ', ' . join ( self . BIDS . get_runs ( ) ) ) if 'run' in self . bids_tags : print ( 'Number of runs (selected): ' + str ( len ( self . bids_tags [ 'run' ] ) ) ) print ( 'Rubs (selected): ' + ', ' . join ( self . bids_tags [ 'run' ] ) ) else : print ( 'No run names found' ) print ( '--- Sessions ---' ) if self . raw_data_exists : if self . BIDS . get_sessions ( ) : print ( 'Number of runs (in dataset): ' + str ( len ( self . BIDS . get_sessions ( ) ) ) ) print ( 'Sessions (in dataset): ' + ', ' . join ( self . BIDS . get_sessions ( ) ) ) if 'ses' in self . bids_tags : print ( 'Number of sessions (selected): ' + str ( len ( self . bids_tags [ 'ses' ] ) ) ) print ( 'Sessions (selected): ' + ', ' . join ( self . bids_tags [ 'ses' ] ) ) else : print ( 'No session names found' ) print ( '--- PREPROCESSED DATA (Pipelines/Derivatives) ---' ) if not self . pipeline : print ( 'Derivative pipeline not set. To set, run TN.set_pipeline()' ) else : print ( 'Pipeline: ' + self . pipeline ) if self . pipeline_subdir : print ( 'Pipeline subdirectories: ' + self . pipeline_subdir ) selected_files = self . get_selected_files ( quiet = 1 ) if selected_files : print ( '--- SELECTED DATA ---' ) print ( 'Numnber of selected files: ' + str ( len ( selected_files ) ) ) print ( '\n - ' . join ( selected_files ) )
def Parse ( self , stat , file_object , knowledge_base ) : """Parse the History file ."""
_ = knowledge_base # TODO ( user ) : Convert this to use the far more intelligent plaso parser . chrome = ChromeParser ( file_object ) for timestamp , entry_type , url , data1 , _ , _ in chrome . Parse ( ) : if entry_type == "CHROME_DOWNLOAD" : yield rdf_webhistory . BrowserHistoryItem ( url = url , domain = urlparse . urlparse ( url ) . netloc , access_time = timestamp , program_name = "Chrome" , source_path = file_object . Path ( ) , download_path = data1 ) elif entry_type == "CHROME_VISIT" : yield rdf_webhistory . BrowserHistoryItem ( url = url , domain = urlparse . urlparse ( url ) . netloc , access_time = timestamp , program_name = "Chrome" , source_path = file_object . Path ( ) , title = data1 )
def infer_typing_attr ( node , context = None ) : """Infer a typing . X [ . . . ] subscript"""
try : value = next ( node . value . infer ( ) ) except InferenceError as exc : raise UseInferenceDefault from exc if not value . qname ( ) . startswith ( "typing." ) : raise UseInferenceDefault node = extract_node ( TYPING_TYPE_TEMPLATE . format ( value . qname ( ) . split ( "." ) [ - 1 ] ) ) return node . infer ( context = context )
def find_view_function ( module_name , function_name , fallback_app = None , fallback_template = None , verify_decorator = True ) : '''Finds a view function , class - based view , or template view . Raises ViewDoesNotExist if not found .'''
dmp = apps . get_app_config ( 'django_mako_plus' ) # I ' m first calling find _ spec first here beacuse I don ' t want import _ module in # a try / except - - there are lots of reasons that importing can fail , and I just want to # know whether the file actually exists . find _ spec raises AttributeError if not found . try : spec = find_spec ( module_name ) except ValueError : spec = None if spec is None : # no view module , so create a view function that directly renders the template try : return create_view_for_template ( fallback_app , fallback_template ) except TemplateDoesNotExist as e : raise ViewDoesNotExist ( 'view module {} not found, and fallback template {} could not be loaded ({})' . format ( module_name , fallback_template , e ) ) # load the module and function try : module = import_module ( module_name ) func = getattr ( module , function_name ) func . view_type = 'function' except ImportError as e : raise ViewDoesNotExist ( 'module "{}" could not be imported: {}' . format ( module_name , e ) ) except AttributeError as e : raise ViewDoesNotExist ( 'module "{}" found successfully, but "{}" was not found: {}' . format ( module_name , function_name , e ) ) # if class - based view , call as _ view ( ) to get a view function to it if inspect . isclass ( func ) and issubclass ( func , View ) : func = func . as_view ( ) func . view_type = 'class' # if regular view function , check the decorator elif verify_decorator and not view_function . is_decorated ( func ) : raise ViewDoesNotExist ( "view {}.{} was found successfully, but it must be decorated with @view_function or be a subclass of django.views.generic.View." . format ( module_name , function_name ) ) # attach a converter to the view function if dmp . options [ 'PARAMETER_CONVERTER' ] is not None : try : converter = import_qualified ( dmp . options [ 'PARAMETER_CONVERTER' ] ) ( func ) setattr ( func , CONVERTER_ATTRIBUTE_NAME , converter ) except ImportError as e : raise ImproperlyConfigured ( 'Cannot find PARAMETER_CONVERTER: {}' . format ( str ( e ) ) ) # return the function / class return func
def _joliet_name_and_parent_from_path ( self , joliet_path ) : # type : ( bytes ) - > Tuple [ bytes , dr . DirectoryRecord ] '''An internal method to find the parent directory record and name given a Joliet path . If the parent is found , return a tuple containing the basename of the path and the parent directory record object . Parameters : joliet _ path - The absolute Joliet path to the entry on the ISO . Returns : A tuple containing just the name of the entry and a Directory Record object representing the parent of the entry .'''
splitpath = utils . split_path ( joliet_path ) name = splitpath . pop ( ) if len ( name ) > 64 : raise pycdlibexception . PyCdlibInvalidInput ( 'Joliet names can be a maximum of 64 characters' ) parent = self . _find_joliet_record ( b'/' + b'/' . join ( splitpath ) ) return ( name . decode ( 'utf-8' ) . encode ( 'utf-16_be' ) , parent )
def get_data ( self ) : "Return one SNMP response list for all status OIDs , and one list for all metric OIDs ."
alarm_oids = [ netsnmp . Varbind ( status_mib [ alarm_id ] [ 'oid' ] ) for alarm_id in self . models [ self . modem_type ] [ 'alarms' ] ] metric_oids = [ netsnmp . Varbind ( metric_mib [ metric_id ] [ 'oid' ] ) for metric_id in self . models [ self . modem_type ] [ 'metrics' ] ] response = self . snmp_session . get ( netsnmp . VarList ( * alarm_oids + metric_oids ) ) return ( response [ 0 : len ( alarm_oids ) ] , response [ len ( alarm_oids ) : ] )
async def execute_command ( self , * args : bytes , timeout : NumType = None ) -> SMTPResponse : """Sends an SMTP command along with any args to the server , and returns a response ."""
command = b" " . join ( args ) + b"\r\n" await self . write_and_drain ( command , timeout = timeout ) response = await self . read_response ( timeout = timeout ) return response
def http_request ( self , path , method = 'GET' , content = None , content_type = "application/json" , response_format = FMT_JSON ) : """Perform an administrative HTTP request . This request is sent out to the administrative API interface ( i . e . the " Management / REST API " ) of the cluster . Note that this is a fairly low level function . You should use one of the helper methods in this class to perform your task , if possible . : param string path : The path portion ( not including the host ) of the rest call to perform . This should also include any encoded arguments . : param string method : This is the HTTP method to perform . Currently supported values are ` GET ` , ` POST ` , ` PUT ` , and ` DELETE ` : param bytes content : Content to be passed along in the request body . This is only applicable on ` PUT ` and ` POST ` methods . : param string content _ type : Value for the HTTP ` ` Content - Type ` ` header . Currently this is ` ` application - json ` ` , and should probably not be set to something else . : param int response _ format : Hint about how to format the response . This goes into the : attr : ` ~ . HttpResult . value ` field of the : class : ` ~ . HttpResult ` object . The default is : const : ` ~ couchbase . FMT _ JSON ` . Note that if the conversion fails , the content will be returned as ` ` bytes ` ` : raise : : exc : ` ~ . ArgumentError ` if the method supplied was incorrect . : exc : ` ~ . ConnectError ` if there was a problem establishing a connection . : exc : ` ~ . HTTPError ` if the server responded with a negative reply : return : a : class : ` ~ . HttpResult ` object . . . seealso : : : meth : ` bucket _ create ` , : meth : ` bucket _ remove `"""
imeth = None if not method in METHMAP : raise E . ArgumentError . pyexc ( "Unknown HTTP Method" , method ) imeth = METHMAP [ method ] return self . _http_request ( type = LCB . LCB_HTTP_TYPE_MANAGEMENT , path = path , method = imeth , content_type = content_type , post_data = content , response_format = response_format )
def get ( self , bucket = None , versions = missing , uploads = missing ) : """Get list of objects in the bucket . : param bucket : A : class : ` invenio _ files _ rest . models . Bucket ` instance . : returns : The Flask response ."""
if uploads is not missing : return self . multipart_listuploads ( bucket ) else : return self . listobjects ( bucket , versions )
def get_order_columns_list ( self , list_columns = None ) : """Returns the columns that can be ordered : param list _ columns : optional list of columns name , if provided will use this list only ."""
ret_lst = list ( ) list_columns = list_columns or self . get_columns_list ( ) for col_name in list_columns : if hasattr ( self . obj , col_name ) : if not hasattr ( getattr ( self . obj , col_name ) , "__call__" ) : ret_lst . append ( col_name ) else : ret_lst . append ( col_name ) return ret_lst
def write ( self ) : """attempt to get a chunk of data to write to our child process ' s stdin , then write it . the return value answers the questions " are we done writing forever ? " """
# get _ chunk may sometimes return bytes , and sometimes return strings # because of the nature of the different types of STDIN objects we # support try : chunk = self . get_chunk ( ) if chunk is None : raise DoneReadingForever except DoneReadingForever : self . log . debug ( "done reading" ) if self . tty_in : # EOF time try : char = termios . tcgetattr ( self . stream ) [ 6 ] [ termios . VEOF ] except : char = chr ( 4 ) . encode ( ) # normally , one EOF should be enough to signal to an program # that is read ( ) ing , to return 0 and be on your way . however , # some programs are misbehaved , like python3.1 and python3.2. # they don ' t stop reading sometimes after read ( ) returns 0. # this can be demonstrated with the following program : # import sys # sys . stdout . write ( sys . stdin . read ( ) ) # then type ' a ' followed by ctrl - d 3 times . in python # 2.6,2.7,3.3,3.4,3.5,3.6 , it only takes 2 ctrl - d to terminate . # however , in python 3.1 and 3.2 , it takes all 3. # so here we send an extra EOF along , just in case . i don ' t # believe it can hurt anything os . write ( self . stream , char ) os . write ( self . stream , char ) return True except NotYetReadyToRead : self . log . debug ( "received no data" ) return False # if we ' re not bytes , make us bytes if IS_PY3 and hasattr ( chunk , "encode" ) : chunk = chunk . encode ( self . encoding ) for proc_chunk in self . stream_bufferer . process ( chunk ) : self . log . debug ( "got chunk size %d: %r" , len ( proc_chunk ) , proc_chunk [ : 30 ] ) self . log . debug ( "writing chunk to process" ) try : os . write ( self . stream , proc_chunk ) except OSError : self . log . debug ( "OSError writing stdin chunk" ) return True
def _input_as_lines ( self , data ) : """Write sequence of lines to temp file , return filename data : a sequence to be written to a file , each element of the sequence will compose a line in the file * Note : ' \n ' will be stripped off the end of each sequence element before writing to a file in order to avoid multiple new lines accidentally be written to a file"""
self . _input_filename = self . getTmpFilename ( self . WorkingDir , suffix = '.fasta' ) with open ( self . _input_filename , 'w' ) as f : # Use lazy iteration instead of list comprehension to # prevent reading entire file into memory for line in data : f . write ( str ( line ) . strip ( '\n' ) ) f . write ( '\n' ) return self . _input_filename
def convert ( input , width = 132 , output = None , keep = False ) : """Input ASCII trailer file " input " will be read . The contents will then be written out to a FITS file in the same format as used by ' stwfits ' from IRAF . Parameters input : str Filename of input ASCII trailer file width : int Number of characters wide to use for defining output FITS column [ Default : 132] output : str Filename to use for writing out converted FITS trailer file If None , input filename will be converted from * . tra - > * _ trl . fits [ Default : None ] keep : bool Specifies whether or not to keep any previously written FITS files [ Default : False ]"""
# open input trailer file trl = open ( input ) # process all lines lines = np . array ( [ i for text in trl . readlines ( ) for i in textwrap . wrap ( text , width = width ) ] ) # close ASCII trailer file now that we have processed all the lines trl . close ( ) if output is None : # create fits file rootname , suffix = os . path . splitext ( input ) s = suffix [ 1 : ] . replace ( 'ra' , 'rl' ) fitsname = "{}_{}{}fits" . format ( rootname , s , os . path . extsep ) else : fitsname = output full_name = os . path . abspath ( os . path . join ( os . path . curdir , fitsname ) ) old_file = os . path . exists ( full_name ) if old_file : if keep : print ( "ERROR: Trailer file already written out as: {}" . format ( full_name ) ) raise IOError else : os . remove ( full_name ) # Build FITS table and write it out line_fmt = "{}A" . format ( width ) tbhdu = fits . BinTableHDU . from_columns ( [ fits . Column ( name = 'TEXT_FILE' , format = line_fmt , array = lines ) ] ) tbhdu . writeto ( fitsname ) print ( "Created output FITS filename for trailer:{} {}" . format ( os . linesep , full_name ) ) os . remove ( input )
def _augment ( graph , capacity , flow , val , u , target , visit ) : """Find an augmenting path from u to target with value at most val"""
visit [ u ] = True if u == target : return val for v in graph [ u ] : cuv = capacity [ u ] [ v ] if not visit [ v ] and cuv > flow [ u ] [ v ] : # reachable arc res = min ( val , cuv - flow [ u ] [ v ] ) delta = _augment ( graph , capacity , flow , res , v , target , visit ) if delta > 0 : flow [ u ] [ v ] += delta # augment flow flow [ v ] [ u ] -= delta return delta return 0
def _return_result ( self , done ) : """Called set the returned future ' s state that of the future we yielded , and set the current future for the iterator ."""
chain_future ( done , self . _running_future ) self . current_future = done self . current_index = self . _unfinished . pop ( done )
def watch ( self , key , criteria , callback ) : """Registers a new watch under [ key ] ( which can be used with ` unwatch ( ) ` to remove the watch ) that filters messages using [ criteria ] ( may be a predicate or a ' criteria dict ' [ see the README for more info there ] ) . Matching messages are passed to [ callback ] , which must accept three arguments : the matched incoming message , this instance of ` WatchableConnection ` , and the key under which the watch was registered ."""
if hasattr ( criteria , '__call__' ) : pred = criteria else : pred = lambda incoming : _match_criteria ( criteria , incoming ) with self . _watches_lock : self . _watches [ key ] = ( pred , callback )
def stopDtmfAcknowledge ( ) : """STOP DTMF ACKNOWLEDGE Section 9.3.30"""
a = TpPd ( pd = 0x3 ) b = MessageType ( mesType = 0x32 ) # 00110010 packet = a / b return packet
def required_for_output ( inputs , outputs , connections ) : """Collect the nodes whose state is required to compute the final network output ( s ) . : param inputs : list of the input identifiers : param outputs : list of the output node identifiers : param connections : list of ( input , output ) connections in the network . NOTE : It is assumed that the input identifier set and the node identifier set are disjoint . By convention , the output node ids are always the same as the output index . Returns a set of identifiers of required nodes ."""
required = set ( outputs ) s = set ( outputs ) while 1 : # Find nodes not in S whose output is consumed by a node in s . t = set ( a for ( a , b ) in connections if b in s and a not in s ) if not t : break layer_nodes = set ( x for x in t if x not in inputs ) if not layer_nodes : break required = required . union ( layer_nodes ) s = s . union ( t ) return required
def VictoryEnum ( ctx ) : """Victory Type Enumeration ."""
return Enum ( ctx , standard = 0 , conquest = 1 , exploration = 2 , ruins = 3 , artifacts = 4 , discoveries = 5 , gold = 6 , time_limit = 7 , score = 8 , standard2 = 9 , regicide = 10 , last_man = 11 )
def connect ( self ) : """Connect to host and get meta information ."""
self . urlobj = getImageObject ( self . url , self . referrer , self . session ) content_type = unquote ( self . urlobj . headers . get ( 'content-type' , 'application/octet-stream' ) ) content_type = content_type . split ( ';' , 1 ) [ 0 ] if '/' in content_type : maintype , subtype = content_type . split ( '/' , 1 ) else : maintype = content_type subtype = None if maintype != 'image' and content_type not in ( 'application/octet-stream' , 'application/x-shockwave-flash' ) : raise IOError ( 'content type %r is not an image at %s' % ( content_type , self . url ) ) # Always use mime type for file extension if it is sane . if maintype == 'image' : self . ext = '.' + subtype . replace ( 'jpeg' , 'jpg' ) self . contentLength = int ( self . urlobj . headers . get ( 'content-length' , 0 ) ) out . debug ( u'... filename = %r, ext = %r, contentLength = %d' % ( self . filename , self . ext , self . contentLength ) )
def _seconds_to_days ( cls , val , ** kwargs ) : '''converts a number of seconds to days'''
zero_value = kwargs . get ( 'zero_value' , 0 ) if val is not None : if val == zero_value : return 0 return val / 86400 else : return 'Not Defined'
def load_symbols_elf ( filename ) : """Load the symbol tables contained in the file"""
f = open ( filename , 'rb' ) elffile = ELFFile ( f ) symbols = [ ] for section in elffile . iter_sections ( ) : if not isinstance ( section , SymbolTableSection ) : continue if section [ 'sh_entsize' ] == 0 : logger . warn ( "Symbol table {} has a sh_entsize of zero." . format ( section . name ) ) continue logger . info ( "Symbol table {} contains {} entries." . format ( section . name , section . num_symbols ( ) ) ) for _ , symbol in enumerate ( section . iter_symbols ( ) ) : if describe_symbol_shndx ( symbol [ 'st_shndx' ] ) != "UND" and describe_symbol_type ( symbol [ 'st_info' ] [ 'type' ] ) == "FUNC" : symbols . append ( ( symbol [ 'st_value' ] , symbol [ 'st_size' ] , symbol . name ) ) f . close ( ) symbols_by_addr = { addr : ( name , size , True ) for addr , size , name in symbols } return symbols_by_addr
def _hasViewChangeQuorum ( self ) : # This method should just be present for master instance . """Checks whether n - f nodes completed view change and whether one of them is the next primary"""
num_of_ready_nodes = len ( self . _view_change_done ) diff = self . quorum - num_of_ready_nodes if diff > 0 : logger . info ( '{} needs {} ViewChangeDone messages' . format ( self , diff ) ) return False logger . info ( "{} got view change quorum ({} >= {})" . format ( self . name , num_of_ready_nodes , self . quorum ) ) return True
def _volume ( shape ) : """Return the volume of a shape ."""
prod = 1 for start , stop in shape : prod *= stop - start return prod
def IsTensorFlowEventsFile ( path ) : """Check the path name to see if it is probably a TF Events file . Args : path : A file path to check if it is an event file . Raises : ValueError : If the path is an empty string . Returns : If path is formatted like a TensorFlowEventsFile ."""
if not path : raise ValueError ( 'Path must be a nonempty string' ) return 'tfevents' in tf . compat . as_str_any ( os . path . basename ( path ) )
def dprintx ( passeditem , special = False ) : """Print Text if DEBUGALL set , optionally with PrettyPrint . Args : passeditem ( str ) : item to print special ( bool ) : determines if item prints with PrettyPrint or regular print ."""
if DEBUGALL : if special : from pprint import pprint pprint ( passeditem ) else : print ( "%s%s%s" % ( C_TI , passeditem , C_NORM ) )
def to_bytes ( x , blocksize = 0 ) : """Converts input to a byte string . Typically used in PyCrypto as an argument ( e . g . , key , iv ) : param x : string ( does nothing ) , bytearray , array with numbers : return :"""
if isinstance ( x , bytearray ) : return left_zero_pad ( bytes ( x ) , blocksize ) elif isinstance ( x , basestring ) : return left_zero_pad ( x , blocksize ) elif isinstance ( x , ( list , tuple ) ) : return left_zero_pad ( bytes ( bytearray ( x ) ) , blocksize ) elif isinstance ( x , ( long , int ) ) : return long_to_bytes ( x , blocksize ) else : raise ValueError ( 'Unknown input argument type' )
def windings_aligned ( triangles , normals_compare ) : """Given a list of triangles and a list of normals determine if the two are aligned Parameters triangles : ( n , 3 , 3 ) float Vertex locations in space normals _ compare : ( n , 3 ) float List of normals to compare Returns aligned : ( n , ) bool Are normals aligned with triangles"""
triangles = np . asanyarray ( triangles , dtype = np . float64 ) if not util . is_shape ( triangles , ( - 1 , 3 , 3 ) ) : raise ValueError ( 'Triangles must be (n,3,3)!' ) calculated , valid = normals ( triangles ) difference = util . diagonal_dot ( calculated , normals_compare [ valid ] ) aligned = np . zeros ( len ( triangles ) , dtype = np . bool ) aligned [ valid ] = difference > 0.0 return aligned
def get_schema_from_list ( table_name , frum ) : """SCAN THE LIST FOR COLUMN TYPES"""
columns = UniqueIndex ( keys = ( "name" , ) ) _get_schema_from_list ( frum , "." , parent = "." , nested_path = ROOT_PATH , columns = columns ) return Schema ( table_name = table_name , columns = list ( columns ) )
def remove ( cls , resource_id , parent_id = None , grandparent_id = None , wait = True , timeout = None ) : """Delete the required resource ."""
raise exception . NotSupported ( feature = "DELETE" , context = "VirtualSwitchManager" )
def itervalues ( self , key = _absent ) : """Parity with dict . itervalues ( ) except the optional < key > parameter has been added . If < key > is provided , only values from items with the provided key are iterated over . KeyError is raised if < key > is provided and not in the dictionary . Example : omd = omdict ( [ ( 1,1 ) , ( 1,11 ) , ( 1,111 ) , ( 2,2 ) , ( 3,3 ) ] ) omd . itervalues ( 1 ) - > 1 - > 11 - > 111 omd . itervalues ( ) - > 1 - > 11 - > 111 - > 2 - > 3 Raises : KeyError if < key > is provided and isn ' t in the dictionary . Returns : An iterator over the values ( ) of the dictionary , or only the values of key < key > if < key > is provided ."""
if key is not _absent : if key in self : return iter ( [ node . value for node in self . _map [ key ] ] ) raise KeyError ( key ) return iter ( [ nodes [ 0 ] . value for nodes in six . itervalues ( self . _map ) ] )
def sort_tuples ( tuples_list , order_list ) : """This function sorts the tuples based on an order defined by the list ' order _ list ' . Examples : > > > sort _ tuples ( [ ( 4 , 3 ) , ( 1 , 9 ) , ( 2 , 10 ) , ( 3 , 2 ) ] , [ 1 , 4 , 2 , 3 ] ) [ ( 1 , 9 ) , ( 4 , 3 ) , ( 2 , 10 ) , ( 3 , 2 ) ] > > > sort _ tuples ( [ ( 5 , 4 ) , ( 2 , 10 ) , ( 3 , 11 ) , ( 4 , 3 ) ] , [ 3 , 4 , 2 , 5 ] ) [ ( 3 , 11 ) , ( 4 , 3 ) , ( 2 , 10 ) , ( 5 , 4 ) ] > > > sort _ tuples ( [ ( 6 , 3 ) , ( 3 , 8 ) , ( 5 , 7 ) , ( 2 , 4 ) ] , [ 2 , 5 , 3 , 6 ] ) [ ( 2 , 4 ) , ( 5 , 7 ) , ( 3 , 8 ) , ( 6 , 3 ) ] Args : tuples _ list : A list of tuples to be sorted . order _ list : The order in which to sort the tuples . Returns : A list of tuples sorted as per the order _ list"""
tuples_dict = dict ( tuples_list ) sorted_tuples = [ ( key , tuples_dict [ key ] ) for key in order_list ] return sorted_tuples
def inputindex ( input ) : """Handler for showing keyboard or mouse page with day and total links ."""
stats = { } countminmax = "SUM(count) AS count, MIN(day) AS first, MAX(day) AS last" tables = ( "moves" , "clicks" , "scrolls" ) if "mouse" == input else ( "keys" , "combos" ) for table in tables : stats [ table ] = db . fetchone ( "counts" , countminmax , type = table ) stats [ table ] [ "days" ] = db . fetch ( "counts" , order = "day DESC" , type = table ) return bottle . template ( "input.tpl" , locals ( ) , conf = conf )
def run ( command , ** kwargs ) : """Excecutes the given command while transfering control , till the execution is complete ."""
print command p = Popen ( shlex . split ( command ) , ** kwargs ) p . wait ( ) return p . returncode
def bake ( self ) : """Find absolute times for all keys . Absolute time is stored in the KeyFrame dictionary as the variable _ _ abs _ time _ _ ."""
self . unbake ( ) for key in self . dct : self . get_absolute_time ( key ) self . is_baked = True
def _formatVals ( self , val_list ) : """Formats value list from Munin Graph and returns multi - line value entries for the plugin fetch cycle . @ param val _ list : List of name - value pairs . @ return : Multi - line text ."""
vals = [ ] for ( name , val ) in val_list : if val is not None : if isinstance ( val , float ) : vals . append ( "%s.value %f" % ( name , val ) ) else : vals . append ( "%s.value %s" % ( name , val ) ) else : vals . append ( "%s.value U" % ( name , ) ) return "\n" . join ( vals )
def make_interactive_tree ( matrix = None , labels = None ) : '''make interactive tree will return complete html for an interactive tree : param title : a title for the plot , if not defined , will be left out .'''
from scipy . cluster . hierarchy import ( dendrogram , linkage , to_tree ) d3 = None from scipy . cluster . hierarchy import cophenet from scipy . spatial . distance import pdist if isinstance ( matrix , pandas . DataFrame ) : Z = linkage ( matrix , 'ward' ) # clusters T = to_tree ( Z , rd = False ) if labels == None : labels = matrix . index . tolist ( ) lookup = dict ( zip ( range ( len ( labels ) ) , labels ) ) # Create a dendrogram object without plotting dend = dendrogram ( Z , no_plot = True , orientation = "right" , leaf_rotation = 90. , # rotates the x axis labels leaf_font_size = 8. , # font size for the x axis labels labels = labels ) d3 = dict ( children = [ ] , name = "root" ) add_node ( T , d3 ) label_tree ( d3 [ "children" ] [ 0 ] , lookup ) else : bot . warning ( 'Please provide data as pandas Data Frame.' ) return d3
def read_dict_from_properties ( desired_type : Type [ dict ] , file_object : TextIOBase , logger : Logger , conversion_finder : ConversionFinder , ** kwargs ) -> Dict [ str , Any ] : """Helper method to read a dictionary from a . properties file ( java - style ) using jprops . Since jprops does not provide automatic handling for boolean and numbers , this tries to add the feature . : param file _ object : : return :"""
# right now jprops relies on a byte stream . So we convert back our nicely decoded Text stream to a unicode # byte stream ! ( urgh ) class Unicoder : def __init__ ( self , file_object ) : self . f = file_object def __iter__ ( self ) : return self def __next__ ( self ) : line = self . f . __next__ ( ) return line . encode ( encoding = 'utf-8' ) res = jprops . load_properties ( Unicoder ( file_object ) ) # first automatic conversion of strings > numbers res = { key : try_parse_num_and_booleans ( val ) for key , val in res . items ( ) } # further convert if required return ConversionFinder . convert_collection_values_according_to_pep ( res , desired_type , conversion_finder , logger , ** kwargs )
def _compile_device_specific_prims ( self , debug = False , stages = None , stagenames = None ) : """Using the data stored in the CommandQueue , Extract and align compatible sequences of Primitives and compile / optimize the Primitives down into a stream of Level 2 device agnostic primitives . BACKGROUND : Device Specific primitives present a special opportunity for optimization . Many JTAG systems program one device on the chain at a time . But because all devices on a JTAG chain are sent information at once , NO - OP instructions are sent to these other devices . When programming multiple devices , Sending these NO - OPS is a missed opportunity for optimization . Instead of configuring one device at a time , it is more efficient to collect instructions for all deices , and align them so multiple devices can be configured at the same time . WAT THIS METHOD DOES : This method takes in a list of Primitives , groups the device specific primitives by target device , aligns the sequences of device instructions , and expands the aligned sequences into a flat list of device agnostic primitives . Args : debug : A boolean for if debug information should be generated . stages : A list to be edited by this method to store snapshots of the compilation state . Used if debug is True . stagenames : A list of strings describing each debug snapshot of the compiilation process . Used if debug is True ."""
# # # # # # GROUPING BY EXEC BOUNDARIES ! # # # # # fences = [ ] fence = [ self [ 0 ] ] for p in self [ 1 : ] : if type ( fence [ 0 ] ) . _layer == type ( p ) . _layer and isinstance ( fence [ 0 ] , DeviceTarget ) == isinstance ( p , DeviceTarget ) : fence . append ( p ) else : fences . append ( fence ) fence = [ p ] fences . append ( fence ) if debug : # pragma : no cover formatted_fences = [ ] for fence in fences : formatted_fence = [ p . snapshot ( ) for p in fence ] formatted_fences . append ( formatted_fence ) formatted_fences . append ( [ ] ) stages . append ( formatted_fences [ : - 1 ] ) # Ignore trailing [ ] stagenames . append ( "Fencing off execution boundaries" ) # # # # # # SPLIT GROUPS BY DEVICE TARGET ! # # # # # split_fences = [ ] for fence in fences : tmp_chains = { } for p in fence : k = p . _device_index if isinstance ( p , DeviceTarget ) else "chain" subchain = tmp_chains . setdefault ( k , [ ] ) . append ( p ) split_fences . append ( list ( tmp_chains . values ( ) ) ) if debug : # pragma : no cover formatted_split_fences = [ ] for fence in split_fences : for group in fence : formatted_split_fences . append ( [ p . snapshot ( ) for p in group ] ) formatted_split_fences . append ( [ ] ) stages . append ( formatted_split_fences [ : - 1 ] ) stagenames . append ( "Grouping prims of each boundary by " "target device" ) # # # # # # ALIGN SEQUENCES AND PAD FRAMES # # # # # # FIRST DEV REQUIRED LINE grouped_fences = [ FrameSequence ( self . _chain , * fence ) . finalize ( ) for f_i , fence in enumerate ( split_fences ) ] if debug : # pragma : no cover formatted_grouped_fences = [ ] for fence in grouped_fences : formatted_grouped_fences += fence . snapshot ( ) + [ [ ] ] stages . append ( formatted_grouped_fences [ : - 1 ] ) stagenames . append ( "Aligning and combining each group dev " "prim stream" ) # # # # # # RECOMBINE FRAME GROUPS # # # # # ingested_chain = grouped_fences [ 0 ] for fence in grouped_fences [ 1 : ] : ingested_chain += fence if debug : # pragma : no cover stages . append ( ingested_chain . snapshot ( ) ) stagenames . append ( "Recombining sanitized exec boundaries" ) # # # # # # POST INGESTION # # # # # # # # # # # Flatten out LV3 Primitives # # # # # while ( any ( ( f . _layer == 3 for f in ingested_chain ) ) ) : # # # # # # COMBINE COMPATIBLE PRIMS # # # # # ingested_chain = _merge_prims ( ingested_chain ) if debug : # pragma : no cover stages . append ( ingested_chain . snapshot ( ) ) stagenames . append ( "Combining compatible lv3 prims." ) # # # # # # TRANSLATION TO LOWER LAYER # # # # # sm = JTAGStateMachine ( self . _chain . _sm . state ) expanded_prims = FrameSequence ( self . _chain ) for f in ingested_chain : if f . _layer == 3 : expanded_prims += f . expand_macro ( sm ) else : expanded_prims . append ( f ) expanded_prims . finalize ( ) ingested_chain = expanded_prims if self . _fsm is None : self . _fsm = sm assert self . _fsm == sm , "Target %s != Actual %s" % ( self . _fsm . state , sm . state ) if debug : # pragma : no cover stages . append ( ingested_chain . snapshot ( ) ) stagenames . append ( "Expanding lv3 prims" ) # # # # # # Flatten out Dev LV2 Primitives # # # # # while ( any ( ( isinstance ( f . _valid_prim , DeviceTarget ) for f in ingested_chain ) ) ) : # # # # # # COMBINE COMPATIBLE PRIMS # # # # # ingested_chain = _merge_prims ( ingested_chain ) if debug : # pragma : no cover stages . append ( ingested_chain . snapshot ( ) ) stagenames . append ( "Merging Device Specific Prims" ) # # # # # # TRANSLATION TO LOWER LAYER # # # # # sm = JTAGStateMachine ( self . _chain . _sm . state ) expanded_prims = FrameSequence ( self . _chain ) for f in ingested_chain : if issubclass ( f . _prim_type , DeviceTarget ) : expanded_prims += f . expand_macro ( sm ) else : f [ 0 ] . apply_tap_effect ( sm ) expanded_prims . append ( f ) expanded_prims . finalize ( ) ingested_chain = expanded_prims if self . _fsm is None : self . _fsm = sm assert self . _fsm == sm , "Target %s != Actual %s" % ( self . _fsm . state , sm . state ) if debug : # pragma : no cover stages . append ( ingested_chain . snapshot ( ) ) stagenames . append ( "Expanding Device Specific Prims" ) # # # # # # Convert FrameSequence to flat array # # # # # flattened_prims = [ f . _valid_prim for f in ingested_chain ] if debug : # pragma : no cover stages . append ( [ [ p . snapshot ( ) for p in flattened_prims ] ] ) stagenames . append ( "Converting format to single stream." ) return flattened_prims
def v1_tag_list ( tags , tag = '' ) : '''List all direct children tags of the given parent . If no parent is specified , then list all top - level tags . The JSON returned for ` ` / dossier / v1 / tags / list / foo / bar ` ` might look like this : . . code - block : : python ' children ' : [ { ' name ' : ' baz ' , ' parent ' : ' bar ' , ' tag ' : ' foo / bar / baz ' } ,'''
tag = tag . decode ( 'utf-8' ) . strip ( ) return { 'children' : tags . list ( tag ) }
def remove_group_by_id ( self , group_id , recursive = True ) : """Remove the group matching the given group _ id . If recursive is True , all descendants of this group are also removed . : param group _ id : The group id to be removed : param recursive : All descendants should be removed as well : return : True if the element was removed successfully , False if an error occured or there was nothing to remove ."""
group = self . objects [ group_id ] if group is None : return False result = True # iterate over the children and determine if they are file / group and call the right method . for subgroup_id in list ( group . children ) : subgroup = self . objects [ subgroup_id ] if subgroup is None : return False if recursive and isinstance ( subgroup , PBXGroup ) : result &= self . remove_group_by_id ( subgroup . get_id ( ) , recursive ) if isinstance ( subgroup , PBXFileReference ) : result &= self . remove_file_by_id ( subgroup . get_id ( ) ) if not result : return False del self . objects [ group . get_id ( ) ] # remove the reference from any other group object that could be containing it . for other_group in self . objects . get_objects_in_section ( u'PBXGroup' ) : other_group . remove_child ( group ) return True
def add_from_xmlnode ( self , element ) : """Load ui definition from xml . etree . Element node ."""
if self . tree is None : root = ET . Element ( 'interface' ) root . append ( element ) self . tree = tree = ET . ElementTree ( root ) self . root = tree . getroot ( ) self . objects = { } # ET . dump ( tree ) else : # TODO : append to current tree pass
def get_compound_pd ( self ) : """Get the CompoundPhaseDiagram object , which can then be used for plotting . Returns : ( CompoundPhaseDiagram )"""
# For this plot , since the reactions are reported in formation # energies , we need to set the energies of the terminal compositions # to 0 . So we make create copies with 0 energy . entry1 = PDEntry ( self . entry1 . composition , 0 ) entry2 = PDEntry ( self . entry2 . composition , 0 ) cpd = CompoundPhaseDiagram ( self . rxn_entries + [ entry1 , entry2 ] , [ Composition ( entry1 . composition . reduced_formula ) , Composition ( entry2 . composition . reduced_formula ) ] , normalize_terminal_compositions = False ) return cpd
def extract ( self , item ) : """Runs the HTML - response trough a list of initialized extractors , a cleaner and compares the results . : param item : NewscrawlerItem to be processed . : return : An updated NewscrawlerItem including the results of the extraction"""
article_candidates = [ ] for extractor in self . extractor_list : article_candidates . append ( extractor . extract ( item ) ) article_candidates = self . cleaner . clean ( article_candidates ) article = self . comparer . compare ( item , article_candidates ) item [ 'article_title' ] = article . title item [ 'article_description' ] = article . description item [ 'article_text' ] = article . text item [ 'article_image' ] = article . topimage item [ 'article_author' ] = article . author item [ 'article_publish_date' ] = article . publish_date item [ 'article_language' ] = article . language return item
def update_gradients_full ( self , dL_dK , X , X2 = None ) : """derivative of the covariance matrix with respect to the parameters ( shape is N x num _ inducing x num _ params )"""
if X2 is None : X2 = X FX = self . _cos ( self . basis_alpha [ None , : ] , self . basis_omega [ None , : ] , self . basis_phi [ None , : ] ) ( X ) FX2 = self . _cos ( self . basis_alpha [ None , : ] , self . basis_omega [ None , : ] , self . basis_phi [ None , : ] ) ( X2 ) La = np . column_stack ( ( self . a [ 0 ] * np . ones ( ( self . n_basis , 1 ) ) , self . a [ 1 ] * self . basis_omega ) ) Lo = np . column_stack ( ( self . basis_omega , self . basis_omega ) ) Lp = np . column_stack ( ( self . basis_phi , self . basis_phi + np . pi / 2 ) ) r , omega , phi = self . _cos_factorization ( La , Lo , Lp ) Gint = self . _int_computation ( r , omega , phi , r , omega , phi ) Flower = np . array ( self . _cos ( self . basis_alpha , self . basis_omega , self . basis_phi ) ( self . lower ) ) [ : , None ] # dK _ dvar dK_dvar = 1. / self . variance * mdot ( FX , self . Gi , FX2 . T ) # dK _ dlen da_dlen = [ - 1. / self . lengthscale ** 2 , 0. ] dLa_dlen = np . column_stack ( ( da_dlen [ 0 ] * np . ones ( ( self . n_basis , 1 ) ) , da_dlen [ 1 ] * self . basis_omega ) ) r1 , omega1 , phi1 = self . _cos_factorization ( dLa_dlen , Lo , Lp ) dGint_dlen = self . _int_computation ( r1 , omega1 , phi1 , r , omega , phi ) dGint_dlen = dGint_dlen + dGint_dlen . T dG_dlen = 1. / 2 * Gint + self . lengthscale / 2 * dGint_dlen dK_dlen = - mdot ( FX , self . Gi , dG_dlen / self . variance , self . Gi , FX2 . T ) # dK _ dper dFX_dper = self . _cos ( - self . basis_alpha [ None , : ] * self . basis_omega [ None , : ] / self . period * X , self . basis_omega [ None , : ] , self . basis_phi [ None , : ] + np . pi / 2 ) ( X ) dFX2_dper = self . _cos ( - self . basis_alpha [ None , : ] * self . basis_omega [ None , : ] / self . period * X2 , self . basis_omega [ None , : ] , self . basis_phi [ None , : ] + np . pi / 2 ) ( X2 ) dLa_dper = np . column_stack ( ( - self . a [ 0 ] * self . basis_omega / self . period , - self . a [ 1 ] * self . basis_omega ** 2 / self . period ) ) dLp_dper = np . column_stack ( ( self . basis_phi + np . pi / 2 , self . basis_phi + np . pi ) ) r1 , omega1 , phi1 = self . _cos_factorization ( dLa_dper , Lo , dLp_dper ) IPPprim1 = self . upper * ( 1. / ( omega + omega1 . T ) * np . cos ( ( omega + omega1 . T ) * self . upper + phi + phi1 . T - np . pi / 2 ) + 1. / ( omega - omega1 . T ) * np . cos ( ( omega - omega1 . T ) * self . upper + phi - phi1 . T - np . pi / 2 ) ) IPPprim1 -= self . lower * ( 1. / ( omega + omega1 . T ) * np . cos ( ( omega + omega1 . T ) * self . lower + phi + phi1 . T - np . pi / 2 ) + 1. / ( omega - omega1 . T ) * np . cos ( ( omega - omega1 . T ) * self . lower + phi - phi1 . T - np . pi / 2 ) ) # SIMPLIFY ! ! ! IPPprim1 = ( self . upper - self . lower ) * np . cos ( ( omega + omega1 . T ) * self . upper + phi + phi1 . T - np . pi / 2 ) + 1 . / ( omega - omega1 . T ) * np . cos ( ( omega - omega1 . T ) * self . upper + phi - phi1 . T - np . pi / 2 ) ) IPPprim2 = self . upper * ( 1. / ( omega + omega1 . T ) * np . cos ( ( omega + omega1 . T ) * self . upper + phi + phi1 . T - np . pi / 2 ) + self . upper * np . cos ( phi - phi1 . T ) ) IPPprim2 -= self . lower * ( 1. / ( omega + omega1 . T ) * np . cos ( ( omega + omega1 . T ) * self . lower + phi + phi1 . T - np . pi / 2 ) + self . lower * np . cos ( phi - phi1 . T ) ) IPPprim = np . where ( np . logical_or ( np . isnan ( IPPprim1 ) , np . isinf ( IPPprim1 ) ) , IPPprim2 , IPPprim1 ) IPPint1 = 1. / ( omega + omega1 . T ) ** 2 * np . cos ( ( omega + omega1 . T ) * self . upper + phi + phi1 . T - np . pi ) + 1. / ( omega - omega1 . T ) ** 2 * np . cos ( ( omega - omega1 . T ) * self . upper + phi - phi1 . T - np . pi ) IPPint1 -= 1. / ( omega + omega1 . T ) ** 2 * np . cos ( ( omega + omega1 . T ) * self . lower + phi + phi1 . T - np . pi ) + 1. / ( omega - omega1 . T ) ** 2 * np . cos ( ( omega - omega1 . T ) * self . lower + phi - phi1 . T - np . pi ) IPPint2 = 1. / ( omega + omega1 . T ) ** 2 * np . cos ( ( omega + omega1 . T ) * self . upper + phi + phi1 . T - np . pi ) + 1. / 2 * self . upper ** 2 * np . cos ( phi - phi1 . T ) IPPint2 -= 1. / ( omega + omega1 . T ) ** 2 * np . cos ( ( omega + omega1 . T ) * self . lower + phi + phi1 . T - np . pi ) + 1. / 2 * self . lower ** 2 * np . cos ( phi - phi1 . T ) # IPPint2[0,0 ] = ( self . upper * * 2 - self . lower * * 2 ) * np . cos ( phi [ 0,0 ] ) * np . cos ( phi1[0,0 ] ) IPPint = np . where ( np . isnan ( IPPint1 ) , IPPint2 , IPPint1 ) dLa_dper2 = np . column_stack ( ( - self . a [ 1 ] * self . basis_omega / self . period ) ) dLp_dper2 = np . column_stack ( ( self . basis_phi + np . pi / 2 ) ) r2 , omega2 , phi2 = dLa_dper2 . T , Lo [ : , 0 : 1 ] , dLp_dper2 . T dGint_dper = np . dot ( r , r1 . T ) / 2 * ( IPPprim - IPPint ) + self . _int_computation ( r2 , omega2 , phi2 , r , omega , phi ) dGint_dper = dGint_dper + dGint_dper . T dFlower_dper = np . array ( self . _cos ( - self . lower * self . basis_alpha * self . basis_omega / self . period , self . basis_omega , self . basis_phi + np . pi / 2 ) ( self . lower ) ) [ : , None ] dG_dper = 1. / self . variance * ( self . lengthscale / 2 * dGint_dper + self . b [ 0 ] * ( np . dot ( dFlower_dper , Flower . T ) + np . dot ( Flower , dFlower_dper . T ) ) ) dK_dper = mdot ( dFX_dper , self . Gi , FX2 . T ) - mdot ( FX , self . Gi , dG_dper , self . Gi , FX2 . T ) + mdot ( FX , self . Gi , dFX2_dper . T ) self . variance . gradient = np . sum ( dK_dvar * dL_dK ) self . lengthscale . gradient = np . sum ( dK_dlen * dL_dK ) self . period . gradient = np . sum ( dK_dper * dL_dK )
def account_setup ( remote , token , resp ) : """Perform additional setup after user have been logged in . : param remote : The remote application . : param token : The token value . : param resp : The response ."""
gh = github3 . login ( token = resp [ 'access_token' ] ) with db . session . begin_nested ( ) : me = gh . me ( ) token . remote_account . extra_data = { 'login' : me . login , 'id' : me . id } # Create user < - > external id link . oauth_link_external_id ( token . remote_account . user , dict ( id = str ( me . id ) , method = 'github' ) )
def ignore ( self , event ) : """Ignores events ."""
if hasattr ( event , 'inaxes' ) : if event . inaxes != self . ax : return True else : return False
def get_raster_array ( image ) : """Return the array data from any Raster or Image type"""
if isinstance ( image , RGB ) : rgb = image . rgb data = np . dstack ( [ np . flipud ( rgb . dimension_values ( d , flat = False ) ) for d in rgb . vdims ] ) else : data = image . dimension_values ( 2 , flat = False ) if type ( image ) is Raster : data = data . T else : data = np . flipud ( data ) return data
def filterRead ( self , read ) : """Filter a read , according to our set of filters . @ param read : A C { Read } instance or one of its subclasses . @ return : C { False } if the read fails any of our filters , else the C { Read } instance returned by our list of filters ."""
for filterFunc in self . _filters : filteredRead = filterFunc ( read ) if filteredRead is False : return False else : read = filteredRead return read
def get_azm_dip ( inp , iz , ninpz , intpts , isdipole , strength , name , verb ) : r"""Get angles , interpolation weights and normalization weights . This check - function is called from one of the modelling routines in : mod : ` model ` . Consult these modelling routines for a detailed description of the input parameters . Parameters inp : list of floats or arrays Input coordinates ( m ) : - [ x0 , x1 , y0 , y1 , z0 , z1 ] ( bipole of finite length ) - [ x , y , z , azimuth , dip ] ( dipole , infinitesimal small ) iz : int Index of current di - / bipole depth ( - ) . ninpz : int Total number of di - / bipole depths ( ninpz = 1 or npinz = nsrc ) ( - ) . intpts : int Number of integration points for bipole ( - ) . isdipole : bool Boolean if inp is a dipole . strength : float , optional Source strength ( A ) : - If 0 , output is normalized to source and receiver of 1 m length , and source strength of 1 A . - If ! = 0 , output is returned for given source and receiver length , and source strength . name : str , { ' src ' , ' rec ' } Pole - type . verb : { 0 , 1 , 2 , 3 , 4} Level of verbosity . Returns tout : list of floats or arrays Dipole coordinates x , y , and z ( m ) . azm : float or array of floats Horizontal angle ( azimuth ) . dip : float or array of floats Vertical angle ( dip ) . g _ w : float or array of floats Factors from Gaussian interpolation . intpts : int As input , checked . inp _ w : float or array of floats Factors from source / receiver length and source strength ."""
global _min_off # Get this di - / bipole if ninpz == 1 : # If there is only one distinct depth , all at once tinp = inp else : # If there are several depths , we take the current one if isdipole : tinp = [ np . atleast_1d ( inp [ 0 ] [ iz ] ) , np . atleast_1d ( inp [ 1 ] [ iz ] ) , np . atleast_1d ( inp [ 2 ] [ iz ] ) , np . atleast_1d ( inp [ 3 ] ) , np . atleast_1d ( inp [ 4 ] ) ] else : tinp = [ inp [ 0 ] [ iz ] , inp [ 1 ] [ iz ] , inp [ 2 ] [ iz ] , inp [ 3 ] [ iz ] , inp [ 4 ] [ iz ] , inp [ 5 ] [ iz ] ] # Check source strength variable strength = _check_var ( strength , float , 0 , 'strength' , ( ) ) # Dipole / Bipole specific if isdipole : # If input is a dipole , set intpts to 1 intpts = 1 # Check azm azm = _check_var ( np . deg2rad ( tinp [ 3 ] ) , float , 1 , 'azimuth' ) # Check dip dip = _check_var ( np . deg2rad ( tinp [ 4 ] ) , float , 1 , 'dip' ) # If dipole , g _ w are ones g_w = np . ones ( tinp [ 0 ] . size ) # If dipole , inp _ w are once , unless strength > 0 inp_w = np . ones ( tinp [ 0 ] . size ) if name == 'src' and strength > 0 : inp_w *= strength # Collect output tout = tinp else : # Get lengths in each direction dx = np . squeeze ( tinp [ 1 ] - tinp [ 0 ] ) dy = np . squeeze ( tinp [ 3 ] - tinp [ 2 ] ) dz = np . squeeze ( tinp [ 5 ] - tinp [ 4 ] ) # Length of bipole dl = np . atleast_1d ( np . linalg . norm ( [ dx , dy , dz ] , axis = 0 ) ) # Horizontal deviation from x - axis azm = np . atleast_1d ( np . arctan2 ( dy , dx ) ) # Vertical deviation from xy - plane down dip = np . atleast_1d ( np . pi / 2 - np . arccos ( dz / dl ) ) # Check intpts intpts = _check_var ( intpts , int , 0 , 'intpts' , ( ) ) # Gauss quadrature if intpts > 2 ; else set to center of tinp if intpts > 2 : # Calculate the dipole positions # Get integration positions and weights g_x , g_w = special . p_roots ( intpts ) g_x = np . outer ( g_x , dl / 2.0 ) # Adjust to tinp length g_w /= 2.0 # Adjust to tinp length ( dl / 2 ) , normalize ( 1 / dl ) # Coordinate system is left - handed , positive z down # ( East - North - Depth ) . xinp = tinp [ 0 ] + dx / 2 + g_x * np . cos ( dip ) * np . cos ( azm ) yinp = tinp [ 2 ] + dy / 2 + g_x * np . cos ( dip ) * np . sin ( azm ) zinp = tinp [ 4 ] + dz / 2 + g_x * np . sin ( dip ) # Reduce zinp to one , if ninpz is 1 ( as they are all the same then ) if ninpz == 1 : zinp = zinp [ : , 0 ] else : # If intpts < 3 : Calculate bipole at tinp - centre for dip / azm # Set intpts to 1 intpts = 1 # Get centre points xinp = np . array ( tinp [ 0 ] + dx / 2 ) yinp = np . array ( tinp [ 2 ] + dy / 2 ) zinp = np . array ( tinp [ 4 ] + dz / 2 ) # Gaussian weights in this case are ones g_w = np . array ( [ 1 ] ) # Scaling inp_w = np . ones ( dl . size ) if strength > 0 : # If strength > 0 , we scale it by bipole - length inp_w *= dl if name == 'src' : # If source , additionally by source strength inp_w *= strength # Collect output list ; rounding coord . to same precision as min _ off rndco = int ( np . round ( np . log10 ( 1 / _min_off ) ) ) tout = [ np . round ( xinp , rndco ) . ravel ( 'F' ) , np . round ( yinp , rndco ) . ravel ( 'F' ) , np . round ( zinp , rndco ) . ravel ( 'F' ) ] # Print spatial parameters if verb > 2 : # Pole - type : src or rec if name == 'src' : longname = ' Source(s) : ' else : longname = ' Receiver(s) : ' # Print dipole / bipole information if isdipole : print ( longname , str ( tout [ 0 ] . size ) , 'dipole(s)' ) tname = [ 'x ' , 'y ' , 'z ' ] prntinp = tout else : print ( longname , str ( int ( tout [ 0 ] . size / intpts ) ) , 'bipole(s)' ) tname = [ 'x_c' , 'y_c' , 'z_c' ] if intpts == 1 : print ( " > intpts : 1 (as dipole)" ) prntinp = tout else : print ( " > intpts : " , intpts ) prntinp = [ np . atleast_1d ( tinp [ 0 ] ) [ 0 ] + dx / 2 , np . atleast_1d ( tinp [ 2 ] ) [ 0 ] + dy / 2 , np . atleast_1d ( tinp [ 4 ] ) [ 0 ] + dz / 2 ] # Print bipole length and strength _prnt_min_max_val ( dl , " > length [m] : " , verb ) print ( " > strength[A] : " , _strvar ( strength ) ) # Print coordinates for i in range ( 3 ) : text = " > " + tname [ i ] + " [m] : " _prnt_min_max_val ( prntinp [ i ] , text , verb ) # Print angles _prnt_min_max_val ( np . rad2deg ( azm ) , " > azimuth [°] : " , verb ) _prnt_min_max_val ( np . rad2deg ( dip ) , " > dip [°] : " , verb ) return tout , azm , dip , g_w , intpts , inp_w
def _get_plot_data ( data , ndim = None ) : """Get plot data out of an input object Parameters data : array - like , ` phate . PHATE ` or ` scanpy . AnnData ` ndim : int , optional ( default : None ) Minimum number of dimensions"""
out = data if isinstance ( data , PHATE ) : out = data . transform ( ) else : try : if isinstance ( data , anndata . AnnData ) : try : out = data . obsm [ 'X_phate' ] except KeyError : raise RuntimeError ( "data.obsm['X_phate'] not found. " "Please run `sc.tl.phate(adata)` before plotting." ) except NameError : # anndata not installed pass if ndim is not None and out [ 0 ] . shape [ 0 ] < ndim : if isinstance ( data , PHATE ) : data . set_params ( n_components = ndim ) out = data . transform ( ) else : raise ValueError ( "Expected at least {}-dimensional data, got {}" . format ( ndim , out [ 0 ] . shape [ 0 ] ) ) return out
def do_GET ( self ) : """Handle a HTTP GET request ."""
# Example session : # in : GET / wsapi / decrypt ? otp = ftftftccccdvvbfcfduvvcubikngtchlubtutucrld HTTP / 1.0 # out : OK counter = 0004 low = f585 high = 3e use = 03 if self . path . startswith ( self . serve_url ) : from_key = self . path [ len ( self . serve_url ) : ] val_res = self . decrypt_yubikey_otp ( from_key ) self . send_response ( 200 ) self . send_header ( 'Content-type' , 'text/html' ) self . end_headers ( ) self . wfile . write ( val_res ) self . wfile . write ( "\n" ) elif self . stats_url and self . path == self . stats_url : self . send_response ( 200 ) self . send_header ( 'Content-type' , 'text/html' ) self . end_headers ( ) for key in stats : self . wfile . write ( "%s %d\n" % ( key , stats [ key ] ) ) else : self . log_error ( "Bad URL '%s' - I'm serving '%s' (responding 403)" % ( self . path , self . serve_url ) ) self . send_response ( 403 , 'Forbidden' ) self . end_headers ( )
def add ( self , statement , parameters = None ) : """Adds a : class : ` . Statement ` and optional sequence of parameters to be used with the statement to the batch . Like with other statements , parameters must be a sequence , even if there is only one item ."""
if isinstance ( statement , six . string_types ) : if parameters : encoder = Encoder ( ) if self . _session is None else self . _session . encoder statement = bind_params ( statement , parameters , encoder ) self . _add_statement_and_params ( False , statement , ( ) ) elif isinstance ( statement , PreparedStatement ) : query_id = statement . query_id bound_statement = statement . bind ( ( ) if parameters is None else parameters ) self . _update_state ( bound_statement ) self . _add_statement_and_params ( True , query_id , bound_statement . values ) elif isinstance ( statement , BoundStatement ) : if parameters : raise ValueError ( "Parameters cannot be passed with a BoundStatement " "to BatchStatement.add()" ) self . _update_state ( statement ) self . _add_statement_and_params ( True , statement . prepared_statement . query_id , statement . values ) else : # it must be a SimpleStatement query_string = statement . query_string if parameters : encoder = Encoder ( ) if self . _session is None else self . _session . encoder query_string = bind_params ( query_string , parameters , encoder ) self . _update_state ( statement ) self . _add_statement_and_params ( False , query_string , ( ) ) return self
def force_flush ( ) : """force _ flush"""
if SPLUNK_DEBUG : print ( '{} -------------------------------' . format ( rnow ( ) ) ) print ( '{} splunkpub: force_flush - start' . format ( rnow ( ) ) ) worked = True for instance in instances : try : instance . force_flush ( ) except Exception as e : worked = False if SPLUNK_DEBUG : print ( '{} - force_flush instance={} ' '- hit ex={}' . format ( rnow ( ) , instance , e ) ) # end of try / ex if not worked : if SPLUNK_DEBUG : print ( '{} Failed flushing queues' . format ( rnow ( ) ) ) if SPLUNK_DEBUG : print ( '{} splunkpub: force_flush - done' . format ( rnow ( ) ) ) print ( '{} -------------------------------' . format ( rnow ( ) ) )
def positional ( self , argument_dest , arg_type = None , ** kwargs ) : """Register a positional argument for the given command scope using a knack . arguments . CLIArgumentType : param argument _ dest : The destination argument to add this argument type to : type argument _ dest : str : param arg _ type : Predefined CLIArgumentType definition to register , as modified by any provided kwargs . : type arg _ type : knack . arguments . CLIArgumentType : param kwargs : Possible values : ` validator ` , ` completer ` , ` nargs ` , ` action ` , ` const ` , ` default ` , ` type ` , ` choices ` , ` required ` , ` help ` , ` metavar ` . See / docs / arguments . md ."""
self . _check_stale ( ) if not self . _applicable ( ) : return if self . command_scope not in self . command_loader . command_table : raise ValueError ( "command authoring error: positional argument '{}' cannot be registered to a group-level " "scope '{}'. It must be registered to a specific command." . format ( argument_dest , self . command_scope ) ) # Before adding the new positional arg , ensure that there are no existing positional arguments # registered for this command . command_args = self . command_loader . argument_registry . arguments [ self . command_scope ] positional_args = { k : v for k , v in command_args . items ( ) if v . settings . get ( 'options_list' ) == [ ] } if positional_args and argument_dest not in positional_args : raise CLIError ( "command authoring error: commands may have, at most, one positional argument. '{}' already " "has positional argument: {}." . format ( self . command_scope , ' ' . join ( positional_args . keys ( ) ) ) ) deprecate_action = self . _handle_deprecations ( argument_dest , ** kwargs ) if deprecate_action : kwargs [ 'action' ] = deprecate_action kwargs [ 'options_list' ] = [ ] self . command_loader . argument_registry . register_cli_argument ( self . command_scope , argument_dest , arg_type , ** kwargs )
def insert ( collection_name , docs , check_keys , safe , last_error_args , continue_on_error , opts ) : """Get an * * insert * * message ."""
options = 0 if continue_on_error : options += 1 data = struct . pack ( "<i" , options ) data += bson . _make_c_string ( collection_name ) encoded = [ bson . BSON . encode ( doc , check_keys , opts ) for doc in docs ] if not encoded : raise InvalidOperation ( "cannot do an empty bulk insert" ) max_bson_size = max ( map ( len , encoded ) ) data += _EMPTY . join ( encoded ) if safe : ( _ , insert_message ) = __pack_message ( 2002 , data ) ( request_id , error_message , _ ) = __last_error ( collection_name , last_error_args ) return ( request_id , insert_message + error_message , max_bson_size ) else : ( request_id , insert_message ) = __pack_message ( 2002 , data ) return ( request_id , insert_message , max_bson_size )
def modularity_louvain_und_sign ( W , gamma = 1 , qtype = 'sta' , seed = None ) : '''The optimal community structure is a subdivision of the network into nonoverlapping groups of nodes in a way that maximizes the number of within - group edges , and minimizes the number of between - group edges . The modularity is a statistic that quantifies the degree to which the network may be subdivided into such clearly delineated groups . The Louvain algorithm is a fast and accurate community detection algorithm ( at the time of writing ) . Use this function as opposed to modularity _ louvain _ und ( ) only if the network contains a mix of positive and negative weights . If the network contains all positive weights , the output will be equivalent to that of modularity _ louvain _ und ( ) . Parameters W : NxN np . ndarray undirected weighted / binary connection matrix with positive and negative weights qtype : str modularity type . Can be ' sta ' ( default ) , ' pos ' , ' smp ' , ' gja ' , ' neg ' . See Rubinov and Sporns ( 2011 ) for a description . gamma : float resolution parameter . default value = 1 . Values 0 < = gamma < 1 detect larger modules while gamma > 1 detects smaller modules . seed : hashable , optional If None ( default ) , use the np . random ' s global random state to generate random numbers . Otherwise , use a new np . random . RandomState instance seeded with the given value . Returns ci : Nx1 np . ndarray refined community affiliation vector Q : float optimized modularity metric Notes Ci and Q may vary from run to run , due to heuristics in the algorithm . Consequently , it may be worth to compare multiple runs .'''
rng = get_rng ( seed ) n = len ( W ) # number of nodes W0 = W * ( W > 0 ) # positive weights matrix W1 = - W * ( W < 0 ) # negative weights matrix s0 = np . sum ( W0 ) # weight of positive links s1 = np . sum ( W1 ) # weight of negative links if qtype == 'smp' : d0 = 1 / s0 d1 = 1 / s1 # dQ = dQ0 / s0 - sQ1 / s1 elif qtype == 'gja' : d0 = 1 / ( s0 + s1 ) d1 = d0 # dQ = ( dQ0 - dQ1 ) / ( s0 + s1) elif qtype == 'sta' : d0 = 1 / s0 d1 = 1 / ( s0 + s1 ) # dQ = dQ0 / s0 - dQ1 / ( s0 + s1) elif qtype == 'pos' : d0 = 1 / s0 d1 = 0 # dQ = dQ0 / s0 elif qtype == 'neg' : d0 = 0 d1 = 1 / s1 # dQ = - dQ1 / s1 else : raise KeyError ( 'modularity type unknown' ) if not s0 : # adjust for absent positive weights s0 = 1 d0 = 0 if not s1 : # adjust for absent negative weights s1 = 1 d1 = 0 h = 1 # hierarchy index nh = n # number of nodes in hierarchy ci = [ None , np . arange ( n ) + 1 ] # hierarchical module assignments q = [ - 1 , 0 ] # hierarchical modularity values while q [ h ] - q [ h - 1 ] > 1e-10 : if h > 300 : raise BCTParamError ( 'Modularity Infinite Loop Style A. Please ' 'contact the developer with this error.' ) kn0 = np . sum ( W0 , axis = 0 ) # positive node degree kn1 = np . sum ( W1 , axis = 0 ) # negative node degree km0 = kn0 . copy ( ) # positive module degree km1 = kn1 . copy ( ) # negative module degree knm0 = W0 . copy ( ) # positive node - to - module degree knm1 = W1 . copy ( ) # negative node - to - module degree m = np . arange ( nh ) + 1 # initial module assignments flag = True # flag for within hierarchy search it = 0 while flag : it += 1 if it > 1000 : raise BCTParamError ( 'Infinite Loop was detected and stopped. ' 'This was probably caused by passing in a directed matrix.' ) flag = False # loop over nodes in random order for u in rng . permutation ( nh ) : ma = m [ u ] - 1 dQ0 = ( ( knm0 [ u , : ] + W0 [ u , u ] - knm0 [ u , ma ] ) - gamma * kn0 [ u ] * ( km0 + kn0 [ u ] - km0 [ ma ] ) / s0 ) # positive dQ dQ1 = ( ( knm1 [ u , : ] + W1 [ u , u ] - knm1 [ u , ma ] ) - gamma * kn1 [ u ] * ( km1 + kn1 [ u ] - km1 [ ma ] ) / s1 ) # negative dQ dQ = d0 * dQ0 - d1 * dQ1 # rescaled changes in modularity dQ [ ma ] = 0 # no changes for same module max_dQ = np . max ( dQ ) # maximal increase in modularity if max_dQ > 1e-10 : # if maximal increase is positive flag = True mb = np . argmax ( dQ ) # change positive node - to - module degrees knm0 [ : , mb ] += W0 [ : , u ] knm0 [ : , ma ] -= W0 [ : , u ] # change negative node - to - module degrees knm1 [ : , mb ] += W1 [ : , u ] knm1 [ : , ma ] -= W1 [ : , u ] km0 [ mb ] += kn0 [ u ] # change positive module degrees km0 [ ma ] -= kn0 [ u ] km1 [ mb ] += kn1 [ u ] # change negative module degrees km1 [ ma ] -= kn1 [ u ] m [ u ] = mb + 1 # reassign module h += 1 ci . append ( np . zeros ( ( n , ) ) ) _ , m = np . unique ( m , return_inverse = True ) m += 1 for u in range ( nh ) : # loop through initial module assignments ci [ h ] [ np . where ( ci [ h - 1 ] == u + 1 ) ] = m [ u ] # assign new modules nh = np . max ( m ) # number of new nodes wn0 = np . zeros ( ( nh , nh ) ) # new positive weights matrix wn1 = np . zeros ( ( nh , nh ) ) for u in range ( nh ) : for v in range ( u , nh ) : wn0 [ u , v ] = np . sum ( W0 [ np . ix_ ( m == u + 1 , m == v + 1 ) ] ) wn1 [ u , v ] = np . sum ( W1 [ np . ix_ ( m == u + 1 , m == v + 1 ) ] ) wn0 [ v , u ] = wn0 [ u , v ] wn1 [ v , u ] = wn1 [ u , v ] W0 = wn0 W1 = wn1 q . append ( 0 ) # compute modularity q0 = np . trace ( W0 ) - np . sum ( np . dot ( W0 , W0 ) ) / s0 q1 = np . trace ( W1 ) - np . sum ( np . dot ( W1 , W1 ) ) / s1 q [ h ] = d0 * q0 - d1 * q1 _ , ci_ret = np . unique ( ci [ - 1 ] , return_inverse = True ) ci_ret += 1 return ci_ret , q [ - 1 ]
def encipher ( self , string , keep_punct = False ) : """Encipher string using Atbash cipher . Example : : ciphertext = Atbash ( ) . encipher ( plaintext ) : param string : The string to encipher . : param keep _ punct : if true , punctuation and spacing are retained . If false , it is all removed . Default is False . : returns : The enciphered string ."""
if not keep_punct : string = self . remove_punctuation ( string ) ret = '' for c in string . upper ( ) : if c . isalpha ( ) : ret += self . key [ self . a2i ( c ) ] else : ret += c return ret
async def put ( self , cid ) : """Update price of current content Accepts : Query string args : - " cid " - int Request body params : - " access _ type " - str - " price " - int - " coinid " - str Returns : dict with following fields : - " confirmed " : None - " txid " - str - " description " - str - " content " - str - " read _ access " - int - " write _ access " - int - " cid " - int - " txid " - str - " seller _ pubkey " - str - " seller _ access _ string " : None or str Verified : True"""
if settings . SIGNATURE_VERIFICATION : super ( ) . verify ( ) try : body = json . loads ( self . request . body ) except : self . set_status ( 400 ) self . write ( { "error" : 400 , "reason" : "Unexpected data format. JSON required" } ) raise tornado . web . Finish # Get data from signed message public_key = body . get ( "public_key" , None ) if isinstance ( body [ "message" ] , str ) : message = json . loads ( body [ "message" ] ) elif isinstance ( body [ "message" ] , dict ) : message = body [ "message" ] price = message . get ( "price" ) access_type = message . get ( "access_type" ) coinid = message . get ( "coinid" ) # Check if required fields exists if not any ( [ price , access_type , coinid ] ) : self . set_status ( 400 ) self . write ( { "error" : 400 , "reason" : "Missed price and access type for content" } ) # Set bridges url if coinid in settings . bridges . keys ( ) : self . account . blockchain . setendpoint ( settings . bridges [ coinid ] ) else : self . set_status ( 400 ) self . write ( { "error" : 400 , "reason" : "Invalid coin ID" } ) raise tornado . web . Finish # Get public key hex or checksum format check = self . account . validator [ coinid ] ( public_key ) # Get content owner address owneraddr = await self . account . blockchain . ownerbycid ( cid = cid ) if isinstance ( owneraddr , dict ) : if "error" in owneraddr . keys ( ) : self . set_status ( 404 ) self . write ( { "error" : 404 , "reason" : "Owner not found." } ) raise tornado . web . Finish # Check if current content belongs to current user if owneraddr != check : self . set_status ( 403 ) self . write ( { "error" : 403 , "reason" : "Owner does not match." } ) raise tornado . web . Finish response = { "cid" : cid , "coinid" : coinid } # Make setprice request to the bridge if access_type == "write_price" : result = await self . account . blockchain . setwriteprice ( cid = cid , write_price = price ) response [ "write_access" ] = result [ "price" ] elif access_type == "read_price" : result = await self . account . blockchain . setreadprice ( cid = cid , read_price = price ) response [ "read_access" ] = result [ "price" ] # Fee fee = await billing . set_price_fee ( cid = cid , price = price , owneraddr = owneraddr ) if "error" in fee . keys ( ) : self . set_status ( fee [ "error" ] ) self . write ( fee ) raise tornado . web . Finish self . write ( response )
def __get_wbfmt_format_txt ( self , data_nt ) : """Return format for text cell from namedtuple field , ' format _ txt ' ."""
format_txt_val = getattr ( data_nt , "format_txt" ) if format_txt_val == 1 : return self . fmtname2wbfmtobj . get ( "very light grey" ) if format_txt_val == 2 : return self . fmtname2wbfmtobj . get ( "light grey" ) return self . fmtname2wbfmtobj . get ( format_txt_val )
def copy ( self ) : """Return a deep copy of the current scene Returns copied : trimesh . Scene Copy of the current scene"""
# use the geometries copy method to # allow them to handle references to unpickle - able objects geometry = { n : g . copy ( ) for n , g in self . geometry . items ( ) } # create a new scene with copied geometry and graph copied = Scene ( geometry = geometry , graph = self . graph . copy ( ) ) return copied
def get ( self , * args , ** kwargs ) : """Works just like the default Manager ' s : func : ` get ` method , but you can pass an additional keyword argument named ` ` path ` ` specifying the full path of the object you want to retrieve , e . g . ` ` " path / to / folder / readme . txt " ` ` ."""
if 'path' in kwargs : kwargs = self . get_filter_args_with_path ( True , ** kwargs ) return super ( FileNodeManager , self ) . get ( * args , ** kwargs )
def set ( self , ctype , key , data ) : """Set or update cache content . : param ctype : cache type : param key : the key to be set value : param data : cache data"""
with zvmutils . acquire_lock ( self . _lock ) : target_cache = self . _get_ctype_cache ( ctype ) target_cache [ 'data' ] [ key ] = data
def do_config ( self , arg ) : """Usage : config print config reload config help"""
if arg [ 'print' ] : self . print_config ( arg ) elif arg [ 'reload' ] : self . reload_config ( arg ) else : self . help_server ( )
def md5sum ( file_path , blocksize = 65536 ) : """Compute the md5 of a file . Pretty fast ."""
md5 = hashlib . md5 ( ) with open ( file_path , "rb" ) as f : for block in iter ( lambda : f . read ( blocksize ) , "" ) : md5 . update ( block ) return md5 . hexdigest ( )
def extract_fragment ( self , iri : str ) -> str : '''Pulls only for code / ID from the iri I only add the str ( ) conversion for the iri because rdflib objects need to be converted .'''
fragment = str ( iri ) . rsplit ( '/' ) [ - 1 ] . split ( ':' , 1 ) [ - 1 ] . split ( '#' , 1 ) [ - 1 ] . split ( '_' , 1 ) [ - 1 ] return fragment
def from_array ( array ) : """Deserialize a new InputLocationMessageContent from a given dictionary . : return : new InputLocationMessageContent instance . : rtype : InputLocationMessageContent"""
if array is None or not array : return None # end if assert_type_or_raise ( array , dict , parameter_name = "array" ) data = { } data [ 'latitude' ] = float ( array . get ( 'latitude' ) ) data [ 'longitude' ] = float ( array . get ( 'longitude' ) ) data [ 'live_period' ] = int ( array . get ( 'live_period' ) ) if array . get ( 'live_period' ) is not None else None instance = InputLocationMessageContent ( ** data ) instance . _raw = array return instance
def add_binding ( self , binding : Binding ) : """Stores binding"""
binding . add_error_info = lambda error : error . add_view_info ( self . _xml_node . view_info ) self . _bindings . append ( binding )
def find_id ( self , element_id ) : """Find element by its id . Parameters element _ id : str ID of the element to find Returns FigureElement one of the children element with the given ID ."""
find = etree . XPath ( "//*[@id=$id]" ) return FigureElement ( find ( self . root , id = element_id ) [ 0 ] )
def _get_stddevs ( self , C , mag , stddev_types , num_sites ) : """Return standard deviation as defined in eq . 11 page 319."""
std = C [ 'c16' ] + np . zeros ( num_sites ) if mag < 7.4 : std -= 0.07 * mag else : std -= 0.518 # only the ' total ' standard deviation is supported , therefore the # std is always the same for all types stddevs = [ std for _ in stddev_types ] return stddevs
def get_letter_as_image ( self , pixelletter_id ) : """Get specified letter as image : param pixelletter _ id : ID of the letter : return : JPG - Letter - Unicode - String if successful else None"""
image_letter = self . _make_get_request ( 'letters/previews/{}_1.jpg' . format ( pixelletter_id ) ) if image_letter : return image_letter return None
def partition ( self , ref = None , ** kwargs ) : """Returns partition by ref ."""
from . exc import NotFoundError from six import text_type if ref : for p in self . partitions : # This is slow for large datasets , like Census years . if ( text_type ( ref ) == text_type ( p . name ) or text_type ( ref ) == text_type ( p . id ) or text_type ( ref ) == text_type ( p . vid ) ) : return p raise NotFoundError ( "Failed to find partition for ref '{}' in dataset '{}'" . format ( ref , self . name ) ) elif kwargs : from . . identity import PartitionNameQuery pnq = PartitionNameQuery ( ** kwargs ) return self . _find_orm
def _build_keys ( self , slug , date = None , granularity = 'all' ) : """Builds redis keys used to store metrics . * ` ` slug ` ` - - a slug used for a metric , e . g . " user - signups " * ` ` date ` ` - - ( optional ) A ` ` datetime . datetime ` ` object used to generate the time period for the metric . If omitted , the current date and time ( in UTC ) will be used . * ` ` granularity ` ` - - Must be one of : " all " ( default ) , " yearly " , " monthly " , " weekly " , " daily " , " hourly " , " minutes " , or " seconds " . Returns a list of strings ."""
slug = slugify ( slug ) # Ensure slugs have a consistent format if date is None : date = datetime . utcnow ( ) patts = self . _build_key_patterns ( slug , date ) if granularity == "all" : return list ( patts . values ( ) ) return [ patts [ granularity ] ]
def get_build_info_for_date ( self , date , build_index = None ) : """Return the build information for a given date ."""
url = urljoin ( self . base_url , self . monthly_build_list_regex ) has_time = date and date . time ( ) self . logger . info ( 'Retrieving list of builds from %s' % url ) parser = self . _create_directory_parser ( url ) regex = r'%(DATE)s-(\d+-)+%(BRANCH)s%(L10N)s%(PLATFORM)s$' % { 'DATE' : date . strftime ( '%Y-%m-%d' ) , 'BRANCH' : self . branch , # ensure to select the correct subfolder for localized builds 'L10N' : '(-l10n)?' if self . locale_build else '' , 'PLATFORM' : '' if self . application not in ( 'fennec' ) else '-' + self . platform } parser . entries = parser . filter ( regex ) parser . entries = parser . filter ( self . is_build_dir ) if has_time : # If a time is included in the date , use it to determine the # build ' s index regex = r'.*%s.*' % date . strftime ( '%H-%M-%S' ) parser . entries = parser . filter ( regex ) if not parser . entries : date_format = '%Y-%m-%d-%H-%M-%S' if has_time else '%Y-%m-%d' message = 'Folder for builds on %s has not been found' % self . date . strftime ( date_format ) raise errors . NotFoundError ( message , url ) # If no index has been given , set it to the last build of the day . self . show_matching_builds ( parser . entries ) # If no index has been given , set it to the last build of the day . if build_index is None : # Find the most recent non - empty entry . build_index = len ( parser . entries ) for build in reversed ( parser . entries ) : build_index -= 1 if not build_index or self . is_build_dir ( build ) : break self . logger . info ( 'Selected build: %s' % parser . entries [ build_index ] ) return ( parser . entries , build_index )
def open_output_file ( self , test_record ) : """Open file based on pattern ."""
# Ignore keys for the log filename to not convert larger data structures . record_dict = data . convert_to_base_types ( test_record , ignore_keys = ( 'code_info' , 'phases' , 'log_records' ) ) pattern = self . filename_pattern if isinstance ( pattern , six . string_types ) or callable ( pattern ) : output_file = self . open_file ( util . format_string ( pattern , record_dict ) ) try : yield output_file finally : output_file . close ( ) elif hasattr ( self . filename_pattern , 'write' ) : yield self . filename_pattern else : raise ValueError ( 'filename_pattern must be string, callable, or File-like object' )
def matches_address ( self , address ) : """returns whether this account knows about an email address : param str address : address to look up : rtype : bool"""
if self . address == address : return True for alias in self . aliases : if alias == address : return True if self . _alias_regexp and self . _alias_regexp . match ( address ) : return True return False
def old_values ( self ) : '''Returns the old values from the diff'''
def get_old_values_and_key ( item ) : values = item . old_values values . update ( { self . _key : item . past_dict [ self . _key ] } ) return values return [ get_old_values_and_key ( el ) for el in self . _get_recursive_difference ( 'all' ) if el . diffs and el . past_dict ]
def resources ( self , start = 1 , num = 10 ) : """Resources lists all file resources for the organization . The start and num paging parameters are supported . Inputs : start - the number of the first entry in the result set response The index number is 1 - based and the default is 1 num - the maximum number of results to be returned as a whole #"""
url = self . _url + "/resources" params = { "f" : "json" , "start" : start , "num" : num } return self . _get ( url = url , param_dict = params , securityHandler = self . _securityHandler , proxy_url = self . _proxy_url , proxy_port = self . _proxy_port )
def remove_users_from_group ( self , group_id , body , ** kwargs ) : # noqa : E501 """Remove users from a group . # noqa : E501 An endpoint for removing users from groups . * * Example usage : * * ` curl - X DELETE https : / / api . us - east - 1 . mbedcloud . com / v3 / policy - groups / { group - id } / users - d ' [ 0162056a9a1586f30242590700000,0117056a9a1586f30242590700000 ] ' - H ' content - type : application / json ' - H ' Authorization : Bearer API _ KEY ' ` # noqa : E501 This method makes a synchronous HTTP request by default . To make an asynchronous HTTP request , please pass asynchronous = True > > > thread = api . remove _ users _ from _ group ( group _ id , body , asynchronous = True ) > > > result = thread . get ( ) : param asynchronous bool : param str group _ id : The ID of the group whose users are removed . ( required ) : param SubjectList body : A list of users to be removed from the group . ( required ) : return : UpdatedResponse If the method is called asynchronously , returns the request thread ."""
kwargs [ '_return_http_data_only' ] = True if kwargs . get ( 'asynchronous' ) : return self . remove_users_from_group_with_http_info ( group_id , body , ** kwargs ) # noqa : E501 else : ( data ) = self . remove_users_from_group_with_http_info ( group_id , body , ** kwargs ) # noqa : E501 return data
def upload_tree ( self ) : """upload _ tree : sends processed channel data to server to create tree Args : None Returns : link to uploadedchannel"""
from datetime import datetime start_time = datetime . now ( ) root , channel_id = self . add_channel ( ) self . node_count_dict = { "upload_count" : 0 , "total_count" : self . channel . count ( ) } config . LOGGER . info ( "\tPreparing fields..." ) self . truncate_fields ( self . channel ) self . add_nodes ( root , self . channel ) if self . check_failed ( print_warning = False ) : failed = self . failed_node_builds self . failed_node_builds = { } self . reattempt_failed ( failed ) self . check_failed ( ) channel_id , channel_link = self . commit_channel ( channel_id ) end_time = datetime . now ( ) config . LOGGER . info ( "Upload time: {time}s" . format ( time = ( end_time - start_time ) . total_seconds ( ) ) ) return channel_id , channel_link
def _generate_corpus_table ( self , labels , ngrams ) : """Returns an HTML table containing data on each corpus ' n - grams ."""
html = [ ] for label in labels : html . append ( self . _render_corpus_row ( label , ngrams ) ) return '\n' . join ( html )
def list_groups ( name ) : '''Return a list of groups the named user belongs to Args : name ( str ) : The user name for which to list groups Returns : list : A list of groups to which the user belongs CLI Example : . . code - block : : bash salt ' * ' user . list _ groups foo'''
if six . PY2 : name = _to_unicode ( name ) ugrp = set ( ) try : user = info ( name ) [ 'groups' ] except KeyError : return False for group in user : ugrp . add ( group . strip ( ' *' ) ) return sorted ( list ( ugrp ) )
def get_batch ( sentences , token_dict , ignore_case = False , unk_index = 1 , eos_index = 2 ) : """Get a batch of inputs and outputs from given sentences . : param sentences : A list of list of tokens . : param token _ dict : The dict that maps a token to an integer . ` < UNK > ` and ` < EOS > ` should be preserved . : param ignore _ case : Whether ignoring the case of the token . : param unk _ index : The index for unknown token . : param eos _ index : The index for ending of sentence . : return inputs , outputs : The inputs and outputs of the batch ."""
batch_size = len ( sentences ) max_sentence_len = max ( map ( len , sentences ) ) inputs = [ [ 0 ] * max_sentence_len for _ in range ( batch_size ) ] outputs_forward = [ [ 0 ] * max_sentence_len for _ in range ( batch_size ) ] outputs_backward = [ [ 0 ] * max_sentence_len for _ in range ( batch_size ) ] for i , sentence in enumerate ( sentences ) : outputs_forward [ i ] [ len ( sentence ) - 1 ] = eos_index outputs_backward [ i ] [ 0 ] = eos_index for j , token in enumerate ( sentence ) : if ignore_case : index = token_dict . get ( token . lower ( ) , unk_index ) else : index = token_dict . get ( token , unk_index ) inputs [ i ] [ j ] = index if j - 1 >= 0 : outputs_forward [ i ] [ j - 1 ] = index if j + 1 < len ( sentence ) : outputs_backward [ i ] [ j + 1 ] = index outputs_forward = np . expand_dims ( np . asarray ( outputs_forward ) , axis = - 1 ) outputs_backward = np . expand_dims ( np . asarray ( outputs_backward ) , axis = - 1 ) return np . asarray ( inputs ) , [ outputs_forward , outputs_backward ]
def _matches_docs ( self , docs , other_docs ) : """Overridable method ."""
for doc , other_doc in zip ( docs , other_docs ) : if not self . _match_map ( doc , other_doc ) : return False return True
def _check_modes ( self , modes ) : """Check that the image is in one of the given * modes * , raise an exception otherwise ."""
if not isinstance ( modes , ( tuple , list , set ) ) : modes = [ modes ] if self . mode not in modes : raise ValueError ( "Image not in suitable mode, expected: %s, got: %s" % ( modes , self . mode ) )
def load_definition_by_name ( name : str ) -> dict : """Look up and return a definition by name ( name is expected to correspond to the filename of the definition , with the . json extension ) and return it or raise an exception : param name : A string to use for looking up a labware defintion previously saved to disc . The definition file must have been saved in a known location with the filename ' $ { name } . json '"""
def_path = 'shared_data/definitions2/{}.json' . format ( name . lower ( ) ) labware_def = json . loads ( pkgutil . get_data ( 'opentrons' , def_path ) ) # type : ignore # NOQA return labware_def
def _register_elements ( self , elements ) : """Takes elements from the metadata class and creates a base model for all backend models ."""
self . elements = elements for key , obj in elements . items ( ) : obj . contribute_to_class ( self . metadata , key ) # Create the common Django fields fields = { } for key , obj in elements . items ( ) : if obj . editable : field = obj . get_field ( ) if not field . help_text : if key in self . bulk_help_text : field . help_text = self . bulk_help_text [ key ] fields [ key ] = field # 0 . Abstract base model with common fields base_meta = type ( 'Meta' , ( ) , self . original_meta ) class BaseMeta ( base_meta ) : abstract = True app_label = 'seo' fields [ 'Meta' ] = BaseMeta # Do we need this ? fields [ '__module__' ] = __name__ # attrs [ ' _ _ module _ _ ' ] self . MetadataBaseModel = type ( '%sBase' % self . name , ( models . Model , ) , fields )
def on_tweet ( self , tweet ) : """Callback to receive tweet from : class : ` ~ responsebot . responsebot _ stream . ResponseBotStream ` . Tries to forward the received tweet to registered handlers . : param tweet : An object containing a tweet ' s text and metadata : type tweet : : class : ` ~ responsebot . models . Tweet `"""
logging . info ( u'Received tweet: `{message}`' . format ( message = tweet . text ) ) for handler in self . handlers : if not handler . catch_self_tweets and self . is_self_tweet ( tweet ) : continue if not handler . filter . match_tweet ( tweet = tweet , user_stream = self . client . config . get ( 'user_stream' ) ) : continue handler . on_tweet ( tweet )
def _reindex ( self ) : """Create a case - insensitive index of the paths"""
self . index = [ ] for path in self . paths : target_path = os . path . normpath ( os . path . join ( BASE_PATH , path ) ) for root , subdirs , files in os . walk ( target_path ) : for f in files : self . index . append ( ( os . path . join ( root , f ) . lower ( ) , os . path . join ( root , f ) ) )
def getAllCellsInColumns ( columns , cellsPerColumn ) : """Calculate all cell indices in the specified columns . @ param columns ( numpy array ) @ param cellsPerColumn ( int ) @ return ( numpy array ) All cells within the specified columns . The cells are in the same order as the provided columns , so they ' re sorted if the columns are sorted ."""
# Add # [ [ beginningOfColumn0 ] , # [ beginningOfColumn1 ] , # to # [0 , 1 , 2 , . . . , cellsPerColumn - 1] # to get # [ beginningOfColumn0 + 0 , beginningOfColumn0 + 1 , . . . # beginningOfColumn1 + 0 , . . . # then flatten it . return ( ( columns * cellsPerColumn ) . reshape ( ( - 1 , 1 ) ) + np . arange ( cellsPerColumn , dtype = "uint32" ) ) . flatten ( )
def name ( self ) : '''Returns the name of this template ( if created from a file ) or " string " if not'''
if self . mako_template . filename : return os . path . basename ( self . mako_template . filename ) return 'string'
def daily_hold ( self ) : '每日交易结算时的持仓表'
data = self . trade . cumsum ( ) if len ( data ) < 1 : return None else : # print ( data . index . levels [ 0 ] ) data = data . assign ( account_cookie = self . account_cookie ) . assign ( date = pd . to_datetime ( data . index . levels [ 0 ] ) . date ) data . date = pd . to_datetime ( data . date ) data = data . set_index ( [ 'date' , 'account_cookie' ] ) res = data [ ~ data . index . duplicated ( keep = 'last' ) ] . sort_index ( ) # 这里会导致股票停牌时的持仓也被计算 但是计算market _ value的时候就没了 le = pd . DataFrame ( pd . Series ( data = None , index = pd . to_datetime ( self . trade_range_max ) . set_names ( 'date' ) , name = 'predrop' ) ) ri = res . reset_index ( ) . set_index ( 'date' ) res_ = pd . merge ( le , ri , how = 'left' , left_index = True , right_index = True ) res_ = res_ . ffill ( ) . fillna ( 0 ) . drop ( [ 'predrop' , 'account_cookie' ] , axis = 1 ) . reset_index ( ) . set_index ( [ 'date' ] ) . sort_index ( ) res_ = res_ [ res_ . index . isin ( self . trade_range ) ] return res_
def create_environment ( self , ** kwargs ) : """Return a new Jinja environment . Derived classes may override method to pass additional parameters or to change the template loader type ."""
environment = super ( ) . create_environment ( ** kwargs ) environment . tests . update ( { 'type' : self . test_type , 'kind' : self . test_kind , 'opposite_before_self' : self . test_opposite_before_self , } ) environment . filters . update ( { 'docstringline' : self . filter_docstringline , 'pyquotesingle' : self . filter_pyquotesingle , 'derivedname' : self . filter_derived_name , 'refqualifiers' : self . filter_refqualifiers , 'attrqualifiers' : self . filter_attrqualifiers , 'supertypes' : self . filter_supertypes , 'all_contents' : self . filter_all_contents , 'pyfqn' : self . filter_pyfqn , 're_sub' : lambda v , p , r : re . sub ( p , r , v ) , 'set' : self . filter_set , } ) from pyecore import ecore environment . globals . update ( { 'ecore' : ecore } ) return environment
def to_json ( self , * attributes , ** options ) : """Returns the selected field * attributes * for each : class : ` Field ` * nested * in the ` Container ` as a JSON formatted string . The * attributes * of each : class : ` Field ` for containers * nested * in the ` Container ` are viewed as well ( chained method call ) . : param str attributes : selected : class : ` Field ` attributes . Fallback is the field : attr : ` ~ Field . value ` . : keyword tuple fieldnames : sequence of dictionary keys for the selected field * attributes * . Defaults to ` ` ( * attributes ) ` ` . : keyword bool nested : if ` ` True ` ` all : class : ` Pointer ` fields in the ` Container ` views their referenced : attr : ` ~ Pointer . data ` object field attributes as well ( chained method call ) ."""
nested = options . pop ( 'nested' , False ) fieldnames = options . pop ( 'fieldnames' , attributes ) if 'cls' in options . keys ( ) : return json . dumps ( self . view_fields ( * attributes , nested = nested , fieldnames = fieldnames ) , ** options ) else : return json . dumps ( self . view_fields ( * attributes , nested = nested , fieldnames = fieldnames ) , cls = _CategoryJSONEncoder , ** options )
def get_configs ( args , command_args , ansible_args = ( ) ) : """Glob the current directory for Molecule config files , instantiate config objects , and returns a list . : param args : A dict of options , arguments and commands from the CLI . : param command _ args : A dict of options passed to the subcommand from the CLI . : param ansible _ args : An optional tuple of arguments provided to the ` ansible - playbook ` command . : return : list"""
configs = [ config . Config ( molecule_file = util . abs_path ( c ) , args = args , command_args = command_args , ansible_args = ansible_args , ) for c in glob . glob ( MOLECULE_GLOB ) ] _verify_configs ( configs ) return configs
def train_batch ( self , batch_info : BatchInfo ) -> None : """Batch - the most atomic unit of learning . For this reinforforcer , that involves : 1 . Roll out the environmnent using current policy 2 . Use that rollout to train the policy"""
# Calculate environment rollout on the evaluation version of the model self . model . train ( ) rollout = self . env_roller . rollout ( batch_info , self . model , self . settings . number_of_steps ) # Process rollout by the ' algo ' ( e . g . perform the advantage estimation ) rollout = self . algo . process_rollout ( batch_info , rollout ) # Perform the training step # Algo will aggregate data into this list : batch_info [ 'sub_batch_data' ] = [ ] if self . settings . shuffle_transitions : rollout = rollout . to_transitions ( ) if self . settings . stochastic_experience_replay : # Always play experience at least once experience_replay_count = max ( np . random . poisson ( self . settings . experience_replay ) , 1 ) else : experience_replay_count = self . settings . experience_replay # Repeat the experience N times for i in range ( experience_replay_count ) : # We may potentially need to split rollout into multiple batches if self . settings . batch_size >= rollout . frames ( ) : batch_result = self . algo . optimizer_step ( batch_info = batch_info , device = self . device , model = self . model , rollout = rollout . to_device ( self . device ) ) batch_info [ 'sub_batch_data' ] . append ( batch_result ) else : # Rollout too big , need to split in batches for batch_rollout in rollout . shuffled_batches ( self . settings . batch_size ) : batch_result = self . algo . optimizer_step ( batch_info = batch_info , device = self . device , model = self . model , rollout = batch_rollout . to_device ( self . device ) ) batch_info [ 'sub_batch_data' ] . append ( batch_result ) batch_info [ 'frames' ] = rollout . frames ( ) batch_info [ 'episode_infos' ] = rollout . episode_information ( ) # Even with all the experience replay , we count the single rollout as a single batch batch_info . aggregate_key ( 'sub_batch_data' )
def backend ( self ) : """Returns the backend class"""
from indico_livesync . plugin import LiveSyncPlugin return LiveSyncPlugin . instance . backend_classes . get ( self . backend_name )
def prepare_batch ( self ) : """Propagates exception on failure : return : byte array to put on the blockchain"""
for cert in self . certificates_to_issue : self . certificate_handler . validate_certificate ( cert ) self . merkle_tree . populate ( self . get_certificate_generator ( ) ) logging . info ( 'here is the op_return_code data: %s' , b2h ( self . merkle_tree . get_blockchain_data ( ) ) ) return self . merkle_tree . get_blockchain_data ( )
def deploy_snmp ( snmp , host = None , admin_username = None , admin_password = None , module = None ) : '''Change the QuickDeploy SNMP community string , used for switches as well CLI Example : . . code - block : : bash salt dell dracr . deploy _ snmp SNMP _ STRING host = < remote DRAC or CMC > admin _ username = < DRAC user > admin _ password = < DRAC PW > salt dell dracr . deploy _ password diana secret'''
return __execute_cmd ( 'deploy -v SNMPv2 {0} ro' . format ( snmp ) , host = host , admin_username = admin_username , admin_password = admin_password , module = module )