signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def design_matrix ( phases , degree ) : r"""Constructs an : math : ` N \ times 2n + 1 ` matrix of the form : . . math : : \ begin { bmatrix } & \ sin ( 1 \ cdot 2 \ pi \ cdot \ phi _ 0) & \ cos ( 1 \ cdot 2 \ pi \ cdot \ phi _ 0) & \ ldots & \ sin ( n \ cdot 2 \ pi \ cdot \ phi _ 0) & \ cos ( n \ cdot 2 \ pi \ cdot \ phi _ 0) \ vdots & \ vdots & \ vdots & \ ddots & \ vdots & \ vdots & \ sin ( 1 \ cdot 2 \ pi \ cdot \ phi _ N ) & \ cos ( 1 \ cdot 2 \ pi \ cdot \ phi _ N ) & \ ldots & \ sin ( n \ cdot 2 \ pi \ cdot \ phi _ N ) & \ cos ( n \ cdot 2 \ pi \ cdot \ phi _ N ) \ end { bmatrix } where : math : ` n = ` * degree * , : math : ` N = ` * n _ samples * , and : math : ` \ phi _ i = ` * phases [ i ] * . Parameters phases : array - like , shape = [ n _ samples ]"""
n_samples = phases . size # initialize coefficient matrix M = numpy . empty ( ( n_samples , 2 * degree + 1 ) ) # indices i = numpy . arange ( 1 , degree + 1 ) # initialize the Nxn matrix that is repeated within the # sine and cosine terms x = numpy . empty ( ( n_samples , degree ) ) # the Nxn matrix now has N copies of the same row , and each row is # integer multiples of pi counting from 1 to the degree x [ : , : ] = i * 2 * numpy . pi # multiply each row of x by the phases x . T [ : , : ] *= phases # place 1 ' s in the first column of the coefficient matrix M [ : , 0 ] = 1 # the odd indices of the coefficient matrix have sine terms M [ : , 1 : : 2 ] = numpy . sin ( x ) # the even indices of the coefficient matrix have cosine terms M [ : , 2 : : 2 ] = numpy . cos ( x ) return M
def mark_whole_doc_dirty ( self ) : """Marks the whole document as dirty to force a full refresh . * * SLOW * *"""
text_cursor = self . _editor . textCursor ( ) text_cursor . select ( text_cursor . Document ) self . _editor . document ( ) . markContentsDirty ( text_cursor . selectionStart ( ) , text_cursor . selectionEnd ( ) )
def is_resource_modified ( environ , etag = None , data = None , last_modified = None , ignore_if_range = True ) : """Convenience method for conditional requests . : param environ : the WSGI environment of the request to be checked . : param etag : the etag for the response for comparison . : param data : or alternatively the data of the response to automatically generate an etag using : func : ` generate _ etag ` . : param last _ modified : an optional date of the last modification . : param ignore _ if _ range : If ` False ` , ` If - Range ` header will be taken into account . : return : ` True ` if the resource was modified , otherwise ` False ` ."""
if etag is None and data is not None : etag = generate_etag ( data ) elif data is not None : raise TypeError ( "both data and etag given" ) if environ [ "REQUEST_METHOD" ] not in ( "GET" , "HEAD" ) : return False unmodified = False if isinstance ( last_modified , string_types ) : last_modified = parse_date ( last_modified ) # ensure that microsecond is zero because the HTTP spec does not transmit # that either and we might have some false positives . See issue # 39 if last_modified is not None : last_modified = last_modified . replace ( microsecond = 0 ) if_range = None if not ignore_if_range and "HTTP_RANGE" in environ : # https : / / tools . ietf . org / html / rfc7233 # section - 3.2 # A server MUST ignore an If - Range header field received in a request # that does not contain a Range header field . if_range = parse_if_range_header ( environ . get ( "HTTP_IF_RANGE" ) ) if if_range is not None and if_range . date is not None : modified_since = if_range . date else : modified_since = parse_date ( environ . get ( "HTTP_IF_MODIFIED_SINCE" ) ) if modified_since and last_modified and last_modified <= modified_since : unmodified = True if etag : etag , _ = unquote_etag ( etag ) if if_range is not None and if_range . etag is not None : unmodified = parse_etags ( if_range . etag ) . contains ( etag ) else : if_none_match = parse_etags ( environ . get ( "HTTP_IF_NONE_MATCH" ) ) if if_none_match : # https : / / tools . ietf . org / html / rfc7232 # section - 3.2 # " A recipient MUST use the weak comparison function when comparing # entity - tags for If - None - Match " unmodified = if_none_match . contains_weak ( etag ) # https : / / tools . ietf . org / html / rfc7232 # section - 3.1 # " Origin server MUST use the strong comparison function when # comparing entity - tags for If - Match " if_match = parse_etags ( environ . get ( "HTTP_IF_MATCH" ) ) if if_match : unmodified = not if_match . is_strong ( etag ) return not unmodified
def regroup_if_changed ( group , op_list , name = None ) : """Creates a new group for op _ list if it has changed . Args : group : The current group . It is returned if op _ list is unchanged . op _ list : The list of operations to check . name : The name to use if a new group is created . Returns : Either group or a new group ( or if op _ list is empty then no _ op ) ."""
has_deltas = isinstance ( op_list , sequence_with_deltas . SequenceWithDeltas ) if ( group is None or len ( group . control_inputs ) != len ( op_list ) or ( has_deltas and op_list . has_changed ( ) ) ) : if has_deltas : op_list . mark ( ) if op_list : return tf . group ( * op_list , name = name ) else : return tf . no_op ( name = name ) else : return group
def make_argparser ( ) : """Setup argparse arguments . : return : The parser which : class : ` MypolrCli ` expects parsed arguments from . : rtype : argparse . ArgumentParser"""
parser = argparse . ArgumentParser ( prog = 'mypolr' , description = "Interacts with the Polr Project's API.\n\n" "User Guide and documentation: https://mypolr.readthedocs.io" , formatter_class = argparse . ArgumentDefaultsHelpFormatter , epilog = "NOTE: if configurations are saved, they are stored as plain text on disk, " "and can be read by anyone with access to the file." ) parser . add_argument ( "-v" , "--version" , action = "store_true" , help = "Print version and exit." ) parser . add_argument ( "url" , nargs = '?' , default = None , help = "The url to process." ) api_group = parser . add_argument_group ( 'API server arguments' , 'Use these for configure the API. Can be stored locally with --save.' ) api_group . add_argument ( "-s" , "--server" , default = None , help = "Server hosting the API." ) api_group . add_argument ( "-k" , "--key" , default = None , help = "API_KEY to authenticate against server." ) api_group . add_argument ( "--api-root" , default = DEFAULT_API_ROOT , help = "API endpoint root." ) option_group = parser . add_argument_group ( 'Action options' , 'Configure the API action to use.' ) option_group . add_argument ( "-c" , "--custom" , default = None , help = "Custom short url ending." ) option_group . add_argument ( "--secret" , action = "store_true" , help = "Set option if using secret url." ) option_group . add_argument ( "-l" , "--lookup" , action = "store_true" , help = "Perform lookup action instead of shorten action." ) manage_group = parser . add_argument_group ( 'Manage credentials' , 'Use these to save, delete or update SERVER, KEY and/or ' 'API_ROOT locally in ~/.mypolr/config.ini.' ) manage_group . add_argument ( "--save" , action = "store_true" , help = "Save configuration (including credentials) in plaintext(!)." ) manage_group . add_argument ( "--clear" , action = "store_true" , help = "Clear configuration." ) return parser
def report_data ( self , entity_data ) : """Used to report entity data ( metrics & snapshot ) to the host agent ."""
try : response = None response = self . client . post ( self . __data_url ( ) , data = self . to_json ( entity_data ) , headers = { "Content-Type" : "application/json" } , timeout = 0.8 ) # logger . warn ( " report _ data : response . status _ code is % s " % response . status _ code ) if response . status_code is 200 : self . last_seen = datetime . now ( ) except ( requests . ConnectTimeout , requests . ConnectionError ) : logger . debug ( "report_data: host agent connection error" ) finally : return response
def start_class ( self , class_ ) : """Start all services of a given class . If this manager doesn ' t already have a service of that class , it constructs one and starts it ."""
matches = filter ( lambda svc : isinstance ( svc , class_ ) , self ) if not matches : svc = class_ ( ) self . register ( svc ) matches = [ svc ] map ( self . start , matches ) return matches
def GetEntries ( self , parser_mediator , match = None , ** unused_kwargs ) : """Extracts Safari history items . Args : parser _ mediator ( ParserMediator ) : mediates interactions between parsers and other components , such as storage and dfvfs . match ( Optional [ dict [ str : object ] ] ) : keys extracted from PLIST _ KEYS ."""
format_version = match . get ( 'WebHistoryFileVersion' , None ) if format_version != 1 : parser_mediator . ProduceExtractionWarning ( 'unsupported Safari history version: {0!s}' . format ( format_version ) ) return if 'WebHistoryDates' not in match : return for history_entry in match . get ( 'WebHistoryDates' , { } ) : last_visited_date = history_entry . get ( 'lastVisitedDate' , None ) if last_visited_date is None : parser_mediator . ProduceExtractionWarning ( 'missing last visited date' ) continue try : # Last visited date is a string containing a floating point value . timestamp = float ( last_visited_date ) except ( TypeError , ValueError ) : parser_mediator . ProduceExtractionWarning ( 'unable to convert last visited date {0:s}' . format ( last_visited_date ) ) continue display_title = history_entry . get ( 'displayTitle' , None ) event_data = SafariHistoryEventData ( ) if display_title != event_data . title : event_data . display_title = display_title event_data . title = history_entry . get ( 'title' , None ) event_data . url = history_entry . get ( '' , None ) event_data . visit_count = history_entry . get ( 'visitCount' , None ) event_data . was_http_non_get = history_entry . get ( 'lastVisitWasHTTPNonGet' , None ) # Convert the floating point value to an integer . # TODO : add support for the fractional part of the floating point value . timestamp = int ( timestamp ) date_time = dfdatetime_cocoa_time . CocoaTime ( timestamp = timestamp ) event = time_events . DateTimeValuesEvent ( date_time , definitions . TIME_DESCRIPTION_LAST_VISITED ) parser_mediator . ProduceEventWithEventData ( event , event_data )
def parse_arg_types ( text = None , is_return_included = False ) : """: param text : str of the text to parse , by default uses calling function doc : param is _ return _ included : bool if True return will be return as well : return : dict of args and variable types"""
text = text or function_doc ( 2 ) if is_return_included : text = text . replace ( ':return:' , ':param return:' ) ret = { } def evl ( text_ ) : try : return eval ( text_ ) except Exception as e : return text_ if ':param' in text : for param in text . split ( ':param ' ) [ 1 : ] : name , desc = param . split ( ':' , 1 ) name = name . strip ( ) if desc . strip ( ) . startswith ( 'list of ' ) : ret [ name ] = ( list , evl ( desc . split ( ) [ 2 ] . replace ( 'str' , STR ) ) ) elif desc . strip ( ) . startswith ( 'str timestamp' ) : ret [ name ] = datetime else : ret [ name ] = evl ( desc . split ( None , 1 ) [ 0 ] . replace ( 'str' , STR ) ) return ret
def set_user_name ( uid , name , ** kwargs ) : '''Set user name : param uid : user number [ 1:16] : param name : username ( limit of 16bytes ) : param kwargs : - api _ host = 127.0.0.1 - api _ user = admin - api _ pass = example - api _ port = 623 - api _ kg = None CLI Examples : . . code - block : : bash salt - call ipmi . set _ user _ name uid = 2 name = ' steverweber ' '''
with _IpmiCommand ( ** kwargs ) as s : return s . set_user_name ( uid , name )
def _x_reduced ( self , x , y , n_sersic , r_eff , center_x , center_y ) : """coordinate transform to normalized radius : param x : : param y : : param center _ x : : param center _ y : : return :"""
x_ = x - center_x y_ = y - center_y r = np . sqrt ( x_ ** 2 + y_ ** 2 ) if isinstance ( r , int ) or isinstance ( r , float ) : r = max ( self . _s , r ) else : r [ r < self . _s ] = self . _s x_reduced = ( r / r_eff ) ** ( 1. / n_sersic ) return x_reduced
def set_scale ( self , scale ) : """Set the device scale register . Device must be in standby before calling this function @ param scale : scale factor @ return : No return value"""
register = self . MMA8452Q_Register [ 'XYZ_DATA_CFG' ] self . board . i2c_read_request ( self . address , register , 1 , Constants . I2C_READ | Constants . I2C_END_TX_MASK , self . data_val , Constants . CB_TYPE_DIRECT ) config_reg = self . wait_for_read_result ( ) config_reg = config_reg [ self . data_start ] config_reg &= 0xFC # Mask out scale bits config_reg |= ( scale >> 2 ) self . board . i2c_write_request ( self . address , [ register , config_reg ] )
def get_and_alter ( self , function ) : """Alters the currently stored reference by applying a function on it on and gets the old value . : param function : ( Function ) , A stateful serializable object which represents the Function defined on server side . This object must have a serializable Function counter part registered on server side with the actual ` ` org . hazelcast . core . IFunction ` ` implementation . : return : ( object ) , the old value , the value before the function is applied ."""
check_not_none ( function , "function can't be None" ) return self . _encode_invoke ( atomic_reference_get_and_alter_codec , function = self . _to_data ( function ) )
def get_relation_cols ( self ) : """Returns the filter active FilterRelation cols"""
retlst = [ ] for flt , value in zip ( self . filters , self . values ) : if isinstance ( flt , FilterRelation ) and value : retlst . append ( flt . column_name ) return retlst
async def ModelSet ( self , config ) : '''config : typing . Mapping [ str , typing . Any ] Returns - > None'''
# map input types to rpc msg _params = dict ( ) msg = dict ( type = 'Client' , request = 'ModelSet' , version = 2 , params = _params ) _params [ 'config' ] = config reply = await self . rpc ( msg ) return reply
def _set_random_data ( self ) : """sets page data from random request"""
rdata = self . _load_response ( 'random' ) rdata = rdata [ 'query' ] [ 'random' ] [ 0 ] pageid = rdata . get ( 'id' ) title = rdata . get ( 'title' ) self . data . update ( { 'pageid' : pageid , 'title' : title } )
def queue ( self , queue , message , params = { } , uids = [ ] ) : """Queue a job in Rhumba"""
d = { 'id' : uuid . uuid1 ( ) . get_hex ( ) , 'version' : 1 , 'message' : message , 'params' : params } if uids : for uid in uids : self . _get_client ( ) . lpush ( 'rhumba.dq.%s.%s' % ( uid , queue ) , json . dumps ( d ) ) else : self . _get_client ( ) . lpush ( 'rhumba.q.%s' % queue , json . dumps ( d ) ) return d [ 'id' ]
def string_to_file ( path , input ) : """Write a file from a given string ."""
mkdir_p ( os . path . dirname ( path ) ) with codecs . open ( path , "w+" , "UTF-8" ) as file : file . write ( input )
def field_mask ( original , modified ) : """Create a field mask by comparing two messages . Args : original ( ~ google . protobuf . message . Message ) : the original message . If set to None , this field will be interpretted as an empty message . modified ( ~ google . protobuf . message . Message ) : the modified message . If set to None , this field will be interpretted as an empty message . Returns : google . protobuf . field _ mask _ pb2 . FieldMask : field mask that contains the list of field names that have different values between the two messages . If the messages are equivalent , then the field mask is empty . Raises : ValueError : If the ` ` original ` ` or ` ` modified ` ` are not the same type ."""
if original is None and modified is None : return field_mask_pb2 . FieldMask ( ) if original is None and modified is not None : original = copy . deepcopy ( modified ) original . Clear ( ) if modified is None and original is not None : modified = copy . deepcopy ( original ) modified . Clear ( ) if type ( original ) != type ( modified ) : raise ValueError ( "expected that both original and modified should be of the " 'same type, received "{!r}" and "{!r}".' . format ( type ( original ) , type ( modified ) ) ) return field_mask_pb2 . FieldMask ( paths = _field_mask_helper ( original , modified ) )
def get ( args ) : """Get an Aegea configuration parameter by name"""
from . import config for key in args . key . split ( "." ) : config = getattr ( config , key ) print ( json . dumps ( config ) )
def stop ( self ) : """Stop the interface : rtype : None"""
self . debug ( "()" ) try : self . unjoin ( ) time . sleep ( 2 ) except : self . exception ( "Failed to leave audience" ) super ( SensorClient , self ) . stop ( )
def register_workflow ( connection , domain , workflow ) : """Register a workflow type . Return False if this workflow already registered ( and True otherwise ) ."""
args = get_workflow_registration_parameter ( workflow ) try : connection . register_workflow_type ( domain = domain , ** args ) except ClientError as err : if err . response [ 'Error' ] [ 'Code' ] == 'TypeAlreadyExistsFault' : return False # Ignore this error raise return True
def execute_single ( self , request ) : """Builds , sends and handles the response to a single request , returning the response ."""
if self . logger : self . logger . debug ( 'Executing single request: %s' , request ) self . removeRequest ( request ) body = remoting . encode ( self . getAMFRequest ( [ request ] ) , strict = self . strict ) http_request = urllib2 . Request ( self . _root_url , body . getvalue ( ) , self . _get_execute_headers ( ) ) if self . proxy_args : http_request . set_proxy ( * self . proxy_args ) envelope = self . _getResponse ( http_request ) return envelope [ request . id ]
def setSegmentsMap ( self , segmentNameCount , lineMap ) : """Set the segments map : param segmentNameCount : a dict with the segment name and the total number of segments with this name : param lineMap : a list with the concerned segment . If the PV1 segment is the third if the HL7 message , the element in the lineMap list at the third index will be PV1 . If there are 3 SPM segments ( line 9,10,11) in the HL7 message , this list contains 3 elements SPM at index 9,10,11."""
self . segmentNameCount = segmentNameCount self . lineMap = lineMap # Build an alias map for simplicity in get # PID is an alias of PID [ 1 ] if only 1 PID segment is present for segName in segmentNameCount . keys ( ) : if segmentNameCount [ segName ] == 1 : self . aliasSegmentName [ segName ] = segName + "[1]" self . aliasSegmentName [ segName + "[1]" ] = segName
def _elements_to_dict ( data , position , obj_end , opts , result = None ) : """Decode a BSON document into result ."""
if result is None : result = opts . document_class ( ) end = obj_end - 1 while position < end : key , value , position = _element_to_dict ( data , position , obj_end , opts ) result [ key ] = value if position != obj_end : raise InvalidBSON ( 'bad object or element length' ) return result
def auth_user_ldap ( uname , pwd ) : """Attempts to bind using the uname / pwd combo passed in . If that works , returns true . Otherwise returns false ."""
if not uname or not pwd : logging . error ( "Username or password not supplied" ) return False ld = ldap . initialize ( LDAP_URL ) if LDAP_VERSION_3 : ld . set_option ( ldap . VERSION3 , 1 ) ld . start_tls_s ( ) udn = ld . search_s ( LDAP_SEARCH_BASE , ldap . SCOPE_ONELEVEL , '(%s=%s)' % ( LDAP_UNAME_ATTR , uname ) , [ LDAP_BIND_ATTR ] ) if udn : try : bindres = ld . simple_bind_s ( udn [ 0 ] [ 0 ] , pwd ) except ldap . INVALID_CREDENTIALS , ldap . UNWILLING_TO_PERFORM : logging . error ( "Invalid or incomplete credentials for %s" , uname ) return False except Exception as out : logging . error ( "Auth attempt for %s had an unexpected error: %s" , uname , out ) return False else : return True else : logging . error ( "No user by that name" ) return False
def write_gzipped_text ( basefilename : str , text : str ) -> None : """Writes text to a file compressed with ` ` gzip ` ` ( a ` ` . gz ` ` file ) . The filename is used directly for the " inner " file and the extension ` ` . gz ` ` is appended to the " outer " ( zipped ) file ' s name . This function exists primarily because Lintian wants non - timestamped gzip files , or it complains : - https : / / lintian . debian . org / tags / package - contains - timestamped - gzip . html - See http : / / stackoverflow . com / questions / 25728472 / python - gzip - omit - the - original - filename - and - timestamp"""
# noqa zipfilename = basefilename + '.gz' compresslevel = 9 mtime = 0 with open ( zipfilename , 'wb' ) as f : with gzip . GzipFile ( basefilename , 'wb' , compresslevel , f , mtime ) as gz : with io . TextIOWrapper ( gz ) as tw : tw . write ( text )
def to_dict ( self ) : """Convert Term into raw dictionary data ."""
return { 'definition' : self . definition , 'id' : self . term_id , 'image' : self . image . to_dict ( ) , 'rank' : self . rank , 'term' : self . term }
def __locate ( self ) : """Find the schema locally ."""
if self . ns [ 1 ] != self . schema . tns [ 1 ] : return self . schema . locate ( self . ns )
def agent_entities ( self ) : """Returns a list of intent json objects"""
endpoint = self . _entity_uri ( ) entities = self . _get ( endpoint ) # should be list of dicts if isinstance ( entities , dict ) : # error : entities = { status : { error } } raise Exception ( entities [ "status" ] ) return [ Entity ( entity_json = i ) for i in entities if isinstance ( i , dict ) ]
def define ( self , names , ** kwargs ) : """Define variables within the namespace . This is similar to : meth : ` . Problem . define ` except that names must be given as an iterable . This method accepts the same keyword arguments as : meth : ` . Problem . define ` ."""
define_kwargs = dict ( self . _define_kwargs ) define_kwargs . update ( kwargs ) self . _problem . define ( * ( ( self , name ) for name in names ) , ** define_kwargs )
def _hash ( number , alphabet ) : """Hashes ` number ` using the given ` alphabet ` sequence ."""
hashed = '' len_alphabet = len ( alphabet ) while True : hashed = alphabet [ number % len_alphabet ] + hashed number //= len_alphabet if not number : return hashed
def add_line ( self , line , source , * lineno ) : """Add a line to the result"""
self . result . append ( line , source , * lineno )
def merge_segments ( filename , scan , cleanup = True , sizelimit = 0 ) : """Merges cands / noise pkl files from multiple segments to single cands / noise file . Expects segment cands pkls with have ( 1 ) state dict and ( 2 ) cands dict . Writes tuple state dict and duple of numpy arrays A single pkl written per scan using root name fileroot . if cleanup , it will remove segments after merging . if sizelimit , it will reduce the output file to be less than this many MB ."""
workdir = os . path . dirname ( filename ) fileroot = os . path . basename ( filename ) candslist = glob . glob ( os . path . join ( workdir , 'cands_' + fileroot + '_sc' + str ( scan ) + 'seg*.pkl' ) ) noiselist = glob . glob ( os . path . join ( workdir , 'noise_' + fileroot + '_sc' + str ( scan ) + 'seg*.pkl' ) ) candssegs = sorted ( [ candsfile . rstrip ( '.pkl' ) . split ( 'seg' ) [ 1 ] for candsfile in candslist ] ) noisesegs = sorted ( [ noisefile . rstrip ( '.pkl' ) . split ( 'seg' ) [ 1 ] for noisefile in noiselist ] ) # test for good list with segments if not candslist and not noiselist : logger . warn ( 'candslist and noiselist are empty.' ) return # aggregate cands over segments if not os . path . exists ( os . path . join ( workdir , 'cands_' + fileroot + '_sc' + str ( scan ) + '.pkl' ) ) : logger . info ( 'Aggregating cands over segments %s for fileroot %s, scan %d' % ( str ( candssegs ) , fileroot , scan ) ) logger . debug ( '%s' % candslist ) cands = { } for candsfile in candslist : with open ( candsfile , 'r' ) as pkl : state = pickle . load ( pkl ) result = pickle . load ( pkl ) for kk in result . keys ( ) : cands [ kk ] = result [ kk ] segment = state . pop ( 'segment' ) # remove this key , as it has no meaning after merging segments # optionally limit size if sizelimit and len ( cands ) : logger . debug ( 'Checking size of cands dictionary...' ) if 'snr2' in state [ 'features' ] : snrcol = state [ 'features' ] . index ( 'snr2' ) elif 'snr1' in state [ 'features' ] : snrcol = state [ 'features' ] . index ( 'snr1' ) candsize = sys . getsizeof ( cands [ cands . keys ( ) [ 0 ] ] ) / 1e6 maxlen = int ( sizelimit / candsize ) if len ( cands ) > maxlen : # need to reduce length to newlen logger . info ( 'cands dictionary of length %.1f would exceed sizelimit of %d MB. Trimming to strongest %d candidates' % ( len ( cands ) , sizelimit , maxlen ) ) snrs = [ abs ( cands [ k ] [ snrcol ] ) for k in cands . iterkeys ( ) ] # take top snrs snrsort = sorted ( snrs , reverse = True ) snrmax = snrsort [ maxlen ] # get min snr for given length limit cands = { k : v for k , v in cands . items ( ) if abs ( v [ snrcol ] ) > snrmax } # new cands dict # write cands to single file with open ( os . path . join ( workdir , 'cands_' + fileroot + '_sc' + str ( scan ) + '.pkl' ) , 'w' ) as pkl : pickle . dump ( state , pkl , protocol = 2 ) pickle . dump ( ( np . array ( cands . keys ( ) ) , np . array ( cands . values ( ) ) ) , pkl , protocol = 2 ) if cleanup : if os . path . exists ( os . path . join ( workdir , 'cands_' + fileroot + '_sc' + str ( scan ) + '.pkl' ) ) : for candsfile in candslist : os . remove ( candsfile ) else : logger . warn ( 'Merged candsfile already exists for scan %d. Not merged.' % scan ) # aggregate noise over segments if not os . path . exists ( os . path . join ( workdir , 'noise_' + fileroot + '_sc' + str ( scan ) + '.pkl' ) ) : logger . info ( 'Aggregating noise over segments %s for fileroot %s, scan %d' % ( str ( noisesegs ) , fileroot , scan ) ) logger . debug ( '%s' % noiselist ) noise = [ ] for noisefile in noiselist : with open ( noisefile , 'r' ) as pkl : result = pickle . load ( pkl ) # gets all noises for segment as list noise += result # write noise to single file if len ( noise ) : with open ( os . path . join ( workdir , 'noise_' + fileroot + '_sc' + str ( scan ) + '.pkl' ) , 'w' ) as pkl : pickle . dump ( noise , pkl , protocol = 2 ) if cleanup : if os . path . exists ( os . path . join ( workdir , 'noise_' + fileroot + '_sc' + str ( scan ) + '.pkl' ) ) : for noisefile in noiselist : os . remove ( noisefile ) else : logger . warn ( 'Merged noisefile already exists for scan %d. Not merged.' % scan )
def get_orders ( self , ** params ) : """https : / / developers . coinbase . com / api / v2 # list - orders"""
response = self . _get ( 'v2' , 'orders' , params = params ) return self . _make_api_object ( response , Order )
def parseTree ( self , root , state : ParseState ) -> List [ Dict ] : """Parses the XML ast tree recursively to generate a JSON AST which can be ingested by other scripts to generate Python scripts . Args : root : The current root of the tree . state : The current state of the tree defined by an object of the ParseState class . Returns : ast : A JSON ast that defines the structure of the Fortran file ."""
if root . tag in self . AST_TAG_HANDLERS : return self . AST_TAG_HANDLERS [ root . tag ] ( root , state ) elif root . tag in self . libRtns : return self . process_libRtn ( root , state ) else : prog = [ ] for node in root : prog += self . parseTree ( node , state ) return prog
def add_port ( zone , port , permanent = True ) : '''Allow specific ports in a zone . . . versionadded : : 2015.8.0 CLI Example : . . code - block : : bash salt ' * ' firewalld . add _ port internal 443 / tcp'''
cmd = '--zone={0} --add-port={1}' . format ( zone , port ) if permanent : cmd += ' --permanent' return __firewall_cmd ( cmd )
def _wr_txt_nts ( self , fout_txt , desc2nts , objgowr , verbose ) : """Write grouped and sorted GO IDs to GOs ."""
with open ( fout_txt , 'w' ) as prt : self . _prt_ver_n_key ( prt , verbose ) prt . write ( '\n\n' ) prt . write ( '# ----------------------------------------------------------------\n' ) prt . write ( '# - Sections and GO IDs\n' ) prt . write ( '# ----------------------------------------------------------------\n' ) prtfmt = self . _get_prtfmt ( objgowr , verbose ) summary_dct = objgowr . prt_txt_desc2nts ( prt , desc2nts , prtfmt ) if summary_dct : print ( " {N:>5} GO IDs WROTE: {FOUT} ({S} sections)" . format ( N = desc2nts [ 'num_items' ] , FOUT = fout_txt , S = desc2nts [ 'num_sections' ] ) ) else : print ( " WROTE: {TXT}" . format ( TXT = fout_txt ) )
def get_amqp_settings ( ) : """Return all settings in dict in following format : : " submodule _ name " : { " vhost " : VIRTUALHOST , " exchange " : EXCHANGE , " queues " : { QUEUE _ NAME : ROUTING _ KEY , QUEUE _ NAME : ROUTING _ KEY " in _ key " : INPUT _ KEY , " out _ key " : OUTPUT _ KEY"""
amqp_settings = { } for vhost in filter ( lambda x : x . endswith ( "VIRTUALHOST" ) , globals ( ) . keys ( ) ) : vhost = "RABBITMQ_" + vhost . split ( "_" ) [ 1 ] queues = { globals ( ) [ vhost + "_INPUT_QUEUE" ] : globals ( ) [ vhost + "_INPUT_KEY" ] , globals ( ) [ vhost + "_OUTPUT_QUEUE" ] : globals ( ) [ vhost + "_OUTPUT_KEY" ] } amqp_settings [ vhost . split ( "_" ) [ - 1 ] . lower ( ) ] = { "vhost" : globals ( ) [ vhost + "_VIRTUALHOST" ] , "exchange" : globals ( ) [ vhost + "_EXCHANGE" ] , "queues" : queues , "in_key" : globals ( ) [ vhost + "_INPUT_KEY" ] , "out_key" : globals ( ) [ vhost + "_OUTPUT_KEY" ] } return amqp_settings
def _create_shade_data ( self , position_data = None , room_id = None ) : """Create a shade data object to be sent to the hub"""
base = { ATTR_SHADE : { ATTR_ID : self . id } } if position_data : base [ ATTR_SHADE ] [ ATTR_POSITION_DATA ] = position_data if room_id : base [ ATTR_SHADE ] [ ATTR_ROOM_ID ] = room_id return base
def state_in ( self , value ) : """Parse ranges ."""
value = [ val for val in Traverser ( value , self . groups ) ] if not value or not value [ 0 ] : for val in self . literals - set ( value ) : return ( yield val ) yield value [ 0 ]
def get_activity_laps ( self , activity_id ) : """Gets the laps from an activity . http : / / strava . github . io / api / v3 / activities / # laps : param activity _ id : The activity for which to fetch laps . : type activity _ id : int : return : An iterator of : class : ` stravalib . model . ActivityLaps ` objects . : rtype : : class : ` BatchedResultsIterator `"""
result_fetcher = functools . partial ( self . protocol . get , '/activities/{id}/laps' , id = activity_id ) return BatchedResultsIterator ( entity = model . ActivityLap , bind_client = self , result_fetcher = result_fetcher )
def parse ( self , raise_parsing_errors = True ) : """Process the file content . Usage : : > > > plist _ file _ parser = PlistFileParser ( " standard . plist " ) > > > plist _ file _ parser . parse ( ) True > > > plist _ file _ parser . elements . keys ( ) [ u ' Dictionary A ' , u ' Number A ' , u ' Array A ' , u ' String A ' , u ' Date A ' , u ' Boolean A ' , u ' Data A ' ] : param raise _ parsing _ errors : Raise parsing errors . : type raise _ parsing _ errors : bool : return : Method success . : rtype : bool"""
LOGGER . debug ( "> Reading elements from: '{0}'." . format ( self . path ) ) element_tree_parser = ElementTree . iterparse ( self . path ) self . __parsing_errors = [ ] for action , element in element_tree_parser : unmarshal = self . __unserializers . get ( element . tag ) if unmarshal : data = unmarshal ( element ) element . clear ( ) element . text = data elif element . tag != "plist" : self . __parsing_errors . append ( foundations . exceptions . FileStructureParsingError ( "Unknown element: {0}" . format ( element . tag ) ) ) if self . __parsing_errors : if raise_parsing_errors : raise foundations . exceptions . FileStructureParsingError ( "{0} | '{1}' structure is invalid, parsing exceptions occured!" . format ( self . __class__ . __name__ , self . path ) ) else : self . __elements = foundations . common . get_first_item ( element_tree_parser . root ) . text return True
def write_summary ( summary : dict , cache_dir : str ) : """Write the ` summary ` JSON to ` cache _ dir ` . Updated the accessed timestamp to now before writing ."""
# update the summary last - accessed timestamp summary [ 'accessed' ] = time ( ) with open ( join ( cache_dir , 'summary.json' ) , 'w' ) as summary_file : summary_file . write ( json . dumps ( summary , indent = 4 , sort_keys = True ) )
def snapshots ( self , space_id , environment_id , resource_id , resource_kind = 'entries' ) : """Provides access to snapshot management methods . API reference : https : / / www . contentful . com / developers / docs / references / content - management - api / # / reference / snapshots : return : : class : ` SnapshotsProxy < contentful _ management . snapshots _ proxy . SnapshotsProxy > ` object . : rtype : contentful . snapshots _ proxy . SnapshotsProxy Usage : > > > entry _ snapshots _ proxy = client . snapshots ( ' cfexampleapi ' , ' master ' , ' nyancat ' ) < SnapshotsProxy [ entries ] space _ id = " cfexampleapi " environment _ id = " master " parent _ resource _ id = " nyancat " > > > > content _ type _ snapshots _ proxy = client . snapshots ( ' cfexampleapi ' , ' master ' , ' cat ' , ' content _ types ' ) < SnapshotsProxy [ content _ types ] space _ id = " cfexampleapi " environment _ id = " master " parent _ resource _ id = " cat " >"""
return SnapshotsProxy ( self , space_id , environment_id , resource_id , resource_kind )
def sanity_check_net ( self , net ) : """Check that net is a valid LogicNet ."""
from . wire import Input , Output , Const from . memory import _MemReadBase # general sanity checks that apply to all operations if not isinstance ( net , LogicNet ) : raise PyrtlInternalError ( 'error, net must be of type LogicNet' ) if not isinstance ( net . args , tuple ) : raise PyrtlInternalError ( 'error, LogicNet args must be tuple' ) if not isinstance ( net . dests , tuple ) : raise PyrtlInternalError ( 'error, LogicNet dests must be tuple' ) for w in net . args + net . dests : self . sanity_check_wirevector ( w ) if w . _block is not self : raise PyrtlInternalError ( 'error, net references different block' ) if w not in self . wirevector_set : raise PyrtlInternalError ( 'error, net with unknown source "%s"' % w . name ) # checks that input and output wirevectors are not misused for w in net . dests : if isinstance ( w , ( Input , Const ) ) : raise PyrtlInternalError ( 'error, Inputs, Consts cannot be destinations to a net' ) for w in net . args : if isinstance ( w , Output ) : raise PyrtlInternalError ( 'error, Outputs cannot be arguments for a net' ) if net . op not in self . legal_ops : raise PyrtlInternalError ( 'error, net op "%s" not from acceptable set %s' % ( net . op , self . legal_ops ) ) # operation specific checks on arguments if net . op in 'w~rsm' and len ( net . args ) != 1 : raise PyrtlInternalError ( 'error, op only allowed 1 argument' ) if net . op in '&|^n+-*<>=' and len ( net . args ) != 2 : raise PyrtlInternalError ( 'error, op only allowed 2 arguments' ) if net . op == 'x' : if len ( net . args ) != 3 : raise PyrtlInternalError ( 'error, op only allowed 3 arguments' ) if net . args [ 1 ] . bitwidth != net . args [ 2 ] . bitwidth : raise PyrtlInternalError ( 'error, args have mismatched bitwidths' ) if net . args [ 0 ] . bitwidth != 1 : raise PyrtlInternalError ( 'error, mux select must be a single bit' ) if net . op == '@' and len ( net . args ) != 3 : raise PyrtlInternalError ( 'error, op only allowed 3 arguments' ) if net . op in '&|^n+-*<>=' and net . args [ 0 ] . bitwidth != net . args [ 1 ] . bitwidth : raise PyrtlInternalError ( 'error, args have mismatched bitwidths' ) if net . op in 'm@' and net . args [ 0 ] . bitwidth != net . op_param [ 1 ] . addrwidth : raise PyrtlInternalError ( 'error, mem addrwidth mismatch' ) if net . op == '@' and net . args [ 1 ] . bitwidth != net . op_param [ 1 ] . bitwidth : raise PyrtlInternalError ( 'error, mem bitwidth mismatch' ) if net . op == '@' and net . args [ 2 ] . bitwidth != 1 : raise PyrtlInternalError ( 'error, mem write enable must be 1 bit' ) # operation specific checks on op _ params if net . op in 'w~&|^n+-*<>=xcr' and net . op_param is not None : raise PyrtlInternalError ( 'error, op_param should be None' ) if net . op == 's' : if not isinstance ( net . op_param , tuple ) : raise PyrtlInternalError ( 'error, select op requires tuple op_param' ) for p in net . op_param : if not isinstance ( p , int ) : raise PyrtlInternalError ( 'error, select op_param requires ints' ) if p < 0 or p >= net . args [ 0 ] . bitwidth : raise PyrtlInternalError ( 'error, op_param out of bounds' ) if net . op in 'm@' : if not isinstance ( net . op_param , tuple ) : raise PyrtlInternalError ( 'error, mem op requires tuple op_param' ) if len ( net . op_param ) != 2 : raise PyrtlInternalError ( 'error, mem op requires 2 op_params in tuple' ) if not isinstance ( net . op_param [ 0 ] , int ) : raise PyrtlInternalError ( 'error, mem op requires first operand as int' ) if not isinstance ( net . op_param [ 1 ] , _MemReadBase ) : raise PyrtlInternalError ( 'error, mem op requires second operand of a memory type' ) # check destination validity if net . op in 'w~&|^nr' and net . dests [ 0 ] . bitwidth > net . args [ 0 ] . bitwidth : raise PyrtlInternalError ( 'error, upper bits of destination unassigned' ) if net . op in '<>=' and net . dests [ 0 ] . bitwidth != 1 : raise PyrtlInternalError ( 'error, destination should be of bitwidth=1' ) if net . op in '+-' and net . dests [ 0 ] . bitwidth > net . args [ 0 ] . bitwidth + 1 : raise PyrtlInternalError ( 'error, upper bits of destination unassigned' ) if net . op == '*' and net . dests [ 0 ] . bitwidth > 2 * net . args [ 0 ] . bitwidth : raise PyrtlInternalError ( 'error, upper bits of destination unassigned' ) if net . op == 'x' and net . dests [ 0 ] . bitwidth > net . args [ 1 ] . bitwidth : raise PyrtlInternalError ( 'error, upper bits of mux output undefined' ) if net . op == 'c' and net . dests [ 0 ] . bitwidth > sum ( x . bitwidth for x in net . args ) : raise PyrtlInternalError ( 'error, upper bits of concat output undefined' ) if net . op == 's' and net . dests [ 0 ] . bitwidth > len ( net . op_param ) : raise PyrtlInternalError ( 'error, upper bits of select output undefined' ) if net . op == 'm' and net . dests [ 0 ] . bitwidth != net . op_param [ 1 ] . bitwidth : raise PyrtlInternalError ( 'error, mem read dest bitwidth mismatch' ) if net . op == '@' and net . dests != ( ) : raise PyrtlInternalError ( 'error, mem write dest should be empty tuple' )
def atoms_string_from_file ( filename ) : """Reads atomic shells from file such as feff . inp or ATOMS file The lines are arranged as follows : x y z ipot Atom Symbol Distance Number with distance being the shell radius and ipot an integer identifying the potential used . Args : filename : File name containing atomic coord data . Returns : Atoms string ."""
with zopen ( filename , "rt" ) as fobject : f = fobject . readlines ( ) coords = 0 atoms_str = [ ] for line in f : if coords == 0 : find_atoms = line . find ( "ATOMS" ) if find_atoms >= 0 : coords = 1 if coords == 1 and not ( "END" in line ) : atoms_str . append ( line . replace ( "\r" , "" ) ) return '' . join ( atoms_str )
def _wrap_client_error ( e ) : """Wrap botocore ClientError exception into ServerlessRepoClientError . : param e : botocore exception : type e : ClientError : return : S3PermissionsRequired or InvalidS3UriError or general ServerlessRepoClientError"""
error_code = e . response [ 'Error' ] [ 'Code' ] message = e . response [ 'Error' ] [ 'Message' ] if error_code == 'BadRequestException' : if "Failed to copy S3 object. Access denied:" in message : match = re . search ( 'bucket=(.+?), key=(.+?)$' , message ) if match : return S3PermissionsRequired ( bucket = match . group ( 1 ) , key = match . group ( 2 ) ) if "Invalid S3 URI" in message : return InvalidS3UriError ( message = message ) return ServerlessRepoClientError ( message = message )
def listar_por_nome ( self , nome ) : """Obtém um equipamento a partir do seu nome . : param nome : Nome do equipamento . : return : Dicionário com a seguinte estrutura : { ' equipamento ' : { ' id ' : < id _ equipamento > , ' nome ' : < nome _ equipamento > , ' id _ tipo _ equipamento ' : < id _ tipo _ equipamento > , ' nome _ tipo _ equipamento ' : < nome _ tipo _ equipamento > , ' id _ modelo ' : < id _ modelo > , ' nome _ modelo ' : < nome _ modelo > , ' id _ marca ' : < id _ marca > , ' nome _ marca ' : < nome _ marca > } } : raise EquipamentoNaoExisteError : Equipamento com o nome informado não cadastrado . : raise InvalidParameterError : O nome do equipamento é nulo ou vazio . : raise DataBaseError : Falha na networkapi ao acessar o banco de dados . : raise XMLError : Falha na networkapi ao gerar o XML de resposta ."""
if nome == '' or nome is None : raise InvalidParameterError ( u'O nome do equipamento não foi informado.' ) url = 'equipamento/nome/' + urllib . quote ( nome ) + '/' code , xml = self . submit ( None , 'GET' , url ) return self . response ( code , xml )
def ParseRecord ( self , parser_mediator , key , structure ) : """Parses a record and produces a Bash history event . Args : parser _ mediator ( ParserMediator ) : mediates interactions between parsers and other components , such as storage and dfvfs . key ( str ) : name of the parsed structure . structure ( pyparsing . ParseResults ) : elements parsed from the file . Raises : ParseError : when the structure type is unknown ."""
if key != 'log_entry' : raise errors . ParseError ( 'Unable to parse record, unknown structure: {0:s}' . format ( key ) ) event_data = BashHistoryEventData ( ) event_data . command = structure . command date_time = dfdatetime_posix_time . PosixTime ( timestamp = structure . timestamp ) event = time_events . DateTimeValuesEvent ( date_time , definitions . TIME_DESCRIPTION_MODIFICATION ) parser_mediator . ProduceEventWithEventData ( event , event_data )
def create_embeded_pkcs7_signature ( data , cert , key ) : """Creates an embeded ( " nodetached " ) pkcs7 signature . This is equivalent to the output of : : openssl smime - sign - signer cert - inkey key - outform DER - nodetach < data : type data : bytes : type cert : str : type key : str"""
# noqa : E501 assert isinstance ( data , bytes ) assert isinstance ( cert , str ) try : pkey = crypto . load_privatekey ( crypto . FILETYPE_PEM , key ) signcert = crypto . load_certificate ( crypto . FILETYPE_PEM , cert ) except crypto . Error as e : raise exceptions . CorruptCertificate from e bio_in = crypto . _new_mem_buf ( data ) pkcs7 = crypto . _lib . PKCS7_sign ( signcert . _x509 , pkey . _pkey , crypto . _ffi . NULL , bio_in , PKCS7_NOSIGS ) bio_out = crypto . _new_mem_buf ( ) crypto . _lib . i2d_PKCS7_bio ( bio_out , pkcs7 ) signed_data = crypto . _bio_to_string ( bio_out ) return signed_data
def evaluate_model_single_recording ( model_file , recording ) : """Evaluate a model for a single recording . Parameters model _ file : string Model file ( . tar ) recording : The handwritten recording ."""
( preprocessing_queue , feature_list , model , output_semantics ) = load_model ( model_file ) results = evaluate_model_single_recording_preloaded ( preprocessing_queue , feature_list , model , output_semantics , recording ) return results
def parse_imports ( self , original_first_thunk , first_thunk , forwarder_chain ) : """Parse the imported symbols . It will fill a list , which will be available as the dictionary attribute " imports " . Its keys will be the DLL names and the values all the symbols imported from that object ."""
imported_symbols = [ ] # The following has been commented as a PE does not # need to have the import data necessarily witin # a section , it can keep it in gaps between sections # or overlapping other data . # imports _ section = self . get _ section _ by _ rva ( first _ thunk ) # if not imports _ section : # raise PEFormatError , ' Invalid / corrupt imports . ' # Import Lookup Table . Contains ordinals or pointers to strings . ilt = self . get_import_table ( original_first_thunk ) # Import Address Table . May have identical content to ILT if # PE file is not bounded , Will contain the address of the # imported symbols once the binary is loaded or if it is already # bound . iat = self . get_import_table ( first_thunk ) # OC Patch : # Would crash if IAT or ILT had None type if ( not iat or len ( iat ) == 0 ) and ( not ilt or len ( ilt ) == 0 ) : raise PEFormatError ( 'Invalid Import Table information. ' + 'Both ILT and IAT appear to be broken.' ) table = None if ilt : table = ilt elif iat : table = iat else : return None if self . PE_TYPE == OPTIONAL_HEADER_MAGIC_PE : ordinal_flag = IMAGE_ORDINAL_FLAG elif self . PE_TYPE == OPTIONAL_HEADER_MAGIC_PE_PLUS : ordinal_flag = IMAGE_ORDINAL_FLAG64 for idx in xrange ( len ( table ) ) : imp_ord = None imp_hint = None imp_name = None name_offset = None hint_name_table_rva = None if table [ idx ] . AddressOfData : # If imported by ordinal , we will append the ordinal number if table [ idx ] . AddressOfData & ordinal_flag : import_by_ordinal = True imp_ord = table [ idx ] . AddressOfData & 0xffff imp_name = None name_offset = None else : import_by_ordinal = False try : hint_name_table_rva = table [ idx ] . AddressOfData & 0x7fffffff data = self . get_data ( hint_name_table_rva , 2 ) # Get the Hint imp_hint = self . get_word_from_data ( data , 0 ) imp_name = self . get_string_at_rva ( table [ idx ] . AddressOfData + 2 ) if not is_valid_function_name ( imp_name ) : imp_name = '*invalid*' name_offset = self . get_offset_from_rva ( table [ idx ] . AddressOfData + 2 ) except PEFormatError , e : pass # by nriva : we want the ThunkRVA and ThunkOffset thunk_offset = table [ idx ] . get_file_offset ( ) thunk_rva = self . get_rva_from_offset ( thunk_offset ) imp_address = first_thunk + self . OPTIONAL_HEADER . ImageBase + idx * 4 struct_iat = None try : if iat and ilt and ilt [ idx ] . AddressOfData != iat [ idx ] . AddressOfData : imp_bound = iat [ idx ] . AddressOfData struct_iat = iat [ idx ] else : imp_bound = None except IndexError : imp_bound = None # The file with hashes : # MD5 : bfe97192e8107d52dd7b4010d12b2924 # SHA256 : 3d22f8b001423cb460811ab4f4789f277b35838d45c62ec0454c877e7c82c7f5 # has an invalid table built in a way that it ' s parseable but contains invalid # entries that lead pefile to take extremely long amounts of time to # parse . It also leads to extreme memory consumption . # To prevent similar cases , if invalid entries are found in the middle of a # table the parsing will be aborted if imp_ord == None and imp_name == None : raise PEFormatError ( 'Invalid entries in the Import Table. Aborting parsing.' ) if imp_name != '' and ( imp_ord or imp_name ) : imported_symbols . append ( ImportData ( pe = self , struct_table = table [ idx ] , struct_iat = struct_iat , # for bound imports if any import_by_ordinal = import_by_ordinal , ordinal = imp_ord , ordinal_offset = table [ idx ] . get_file_offset ( ) , hint = imp_hint , name = imp_name , name_offset = name_offset , bound = imp_bound , address = imp_address , hint_name_table_rva = hint_name_table_rva , thunk_offset = thunk_offset , thunk_rva = thunk_rva ) ) return imported_symbols
def query ( self , parents = None ) : """Compose the query and generate SPARQL ."""
# TODO : benchmark single - query strategy q = Select ( [ ] ) q = self . project ( q , parent = True ) q = self . filter ( q , parents = parents ) if self . parent is None : subq = Select ( [ self . var ] ) subq = self . filter ( subq , parents = parents ) subq = subq . offset ( self . node . offset ) subq = subq . limit ( self . node . limit ) subq = subq . distinct ( ) # TODO : sorting . subq = subq . order_by ( desc ( self . var ) ) q = q . where ( subq ) # if hasattr ( self . context , ' identifier ' ) : # q . _ where = graph ( self . context . identifier , q . _ where ) log . debug ( "Compiled query: %r" , q . compile ( ) ) return q
def do_mailfy ( self , query , ** kwargs ) : """Verifying a mailfy query in this platform . This might be redefined in any class inheriting from Platform . The only condition is that any of this should return an equivalent array . Args : query : The element to be searched . Return : A list of elements to be appended . A sample output format is as follows : " attributes " : [ " attributes " : [ ] , " type " : " i3visio . email " , " value " : " contacto @ i3visio . com " " attributes " : [ ] , " type " : " i3visio . alias " , " value " : " contacto " " attributes " : [ ] , " type " : " i3visio . domain " , " value " : " i3visio . com " " attributes " : [ ] , " type " : " i3visio . platform " , " value " : " Twitter " " type " : " i3visio . profile " , " value " : " Twitter - contacto @ i3visio . com " """
if self . check_mailfy ( query , kwargs ) : expandedEntities = general . expandEntitiesFromEmail ( query ) r = { "type" : "i3visio.profile" , "value" : self . platformName + " - " + query , "attributes" : expandedEntities + [ { "type" : "i3visio.platform" , "value" : self . platformName , "attributes" : [ ] } ] } return [ r ] return [ ]
def _ln_rnn ( x , gamma , beta ) : r"""Applies layer normalization . Normalizes the last dimension of the tensor ` x ` . Args : x : A ` Tensor ` . gamma : A constant ` Tensor ` . Scale parameter . Default is 1. beta : A constant ` Tensor ` . Offset parameter . Default is 0. Returns : A ` Tensor ` with the same shape as ` x ` ."""
# calc layer mean , variance for final axis mean , variance = tf . nn . moments ( x , axes = [ len ( x . get_shape ( ) ) - 1 ] , keep_dims = True ) # apply layer normalization x = ( x - mean ) / tf . sqrt ( variance + tf . sg_eps ) # apply parameter return gamma * x + beta
def run ( self , args ) : """Erases the device connected to the J - Link . Args : self ( EraseCommand ) : the ` ` EraseCommand ` ` instance args ( Namespace ) : the arguments passed on the command - line Returns : ` ` None ` `"""
jlink = self . create_jlink ( args ) erased = jlink . erase ( ) print ( 'Bytes Erased: %d' % erased )
def format_pkg_list ( packages , versions_as_list , attr ) : '''Formats packages according to parameters for list _ pkgs .'''
ret = copy . deepcopy ( packages ) if attr : requested_attr = { 'epoch' , 'version' , 'release' , 'arch' , 'install_date' , 'install_date_time_t' } if attr != 'all' : requested_attr &= set ( attr + [ 'version' ] ) for name in ret : versions = [ ] for all_attr in ret [ name ] : filtered_attr = { } for key in requested_attr : if all_attr [ key ] : filtered_attr [ key ] = all_attr [ key ] versions . append ( filtered_attr ) ret [ name ] = versions return ret for name in ret : ret [ name ] = [ format_version ( d [ 'epoch' ] , d [ 'version' ] , d [ 'release' ] ) for d in ret [ name ] ] if not versions_as_list : stringify ( ret ) return ret
def write ( self , addr , data ) : '''Write to dummy memory Parameters addr : int The register address . data : list , tuple Data ( byte array ) to be written . Returns nothing'''
logger . debug ( "Dummy SiTransferLayer.write addr: %s data: %s" % ( hex ( addr ) , data ) ) for curr_addr , d in enumerate ( data , start = addr ) : self . mem [ curr_addr ] = array . array ( 'B' , [ d ] ) [ 0 ]
def make_path_unique ( path , counts , new ) : """Given a path , a list of existing paths and counts for each of the existing paths ."""
added = False while any ( path == c [ : i ] for c in counts for i in range ( 1 , len ( c ) + 1 ) ) : count = counts [ path ] counts [ path ] += 1 if ( not new and len ( path ) > 1 ) or added : path = path [ : - 1 ] else : added = True path = path + ( int_to_roman ( count ) , ) if len ( path ) == 1 : path = path + ( int_to_roman ( counts . get ( path , 1 ) ) , ) if path not in counts : counts [ path ] = 1 return path
def render_unicode ( self , * args , ** data ) : """Render the output of this template as a unicode object ."""
return runtime . _render ( self , self . callable_ , args , data , as_unicode = True )
def geometry_within_radius ( geometry , center , radius ) : """To valid whether point or linestring or polygon is inside a radius around a center Keyword arguments : geometry - - point / linstring / polygon geojson object center - - point geojson object radius - - radius if ( geometry inside radius ) return true else false"""
if geometry [ 'type' ] == 'Point' : return point_distance ( geometry , center ) <= radius elif geometry [ 'type' ] == 'LineString' or geometry [ 'type' ] == 'Polygon' : point = { } # it ' s enough to check the exterior ring of the Polygon coordinates = geometry [ 'coordinates' ] [ 0 ] if geometry [ 'type' ] == 'Polygon' else geometry [ 'coordinates' ] for coordinate in coordinates : point [ 'coordinates' ] = coordinate if point_distance ( point , center ) > radius : return False return True
def vecs_to_datmesh ( x , y ) : """Converts input arguments x and y to a 2d meshgrid , suitable for calling Means , Covariances and Realizations ."""
x , y = meshgrid ( x , y ) out = zeros ( x . shape + ( 2 , ) , dtype = float ) out [ : , : , 0 ] = x out [ : , : , 1 ] = y return out
def dict_copy ( func ) : "copy dict args , to avoid modifying caller ' s copy"
def proxy ( * args , ** kwargs ) : new_args = [ ] new_kwargs = { } for var in kwargs : if isinstance ( kwargs [ var ] , dict ) : new_kwargs [ var ] = dict ( kwargs [ var ] ) else : new_kwargs [ var ] = kwargs [ var ] for arg in args : if isinstance ( arg , dict ) : new_args . append ( dict ( arg ) ) else : new_args . append ( arg ) return func ( * new_args , ** new_kwargs ) return proxy
def decorator ( f ) : """Creates a paramatric decorator from a function . The resulting decorator will optionally take keyword arguments ."""
@ functools . wraps ( f ) def decoratored_function ( * args , ** kwargs ) : if args and len ( args ) == 1 : return f ( * args , ** kwargs ) if args : raise TypeError ( "This decorator only accepts extra keyword arguments." ) return lambda g : f ( g , ** kwargs ) return decoratored_function
def write_perseus ( f , df ) : """Export a dataframe to Perseus ; recreating the format : param f : : param df : : return :"""
# # # Generate the Perseus like type index FIELD_TYPE_MAP = { 'Amino acid' : 'C' , 'Charge' : 'C' , 'Reverse' : 'C' , 'Potential contaminant' : 'C' , 'Multiplicity' : 'C' , 'Localization prob' : 'N' , 'PEP' : 'N' , 'Score' : 'N' , 'Delta score' : 'N' , 'Score for localization' : 'N' , 'Mass error [ppm]' : 'N' , 'Intensity' : 'N' , 'Position' : 'N' , 'Proteins' : 'T' , 'Positions within proteins' : 'T' , 'Leading proteins' : 'T' , 'Protein names' : 'T' , 'Gene names' : 'T' , 'Sequence window' : 'T' , 'Unique identifier' : 'T' , } def map_field_type ( n , c ) : try : t = FIELD_TYPE_MAP [ c ] except : t = "E" # In the first element , add type indicator if n == 0 : t = "#!{Type}%s" % t return t df = df . copy ( ) df . columns = pd . MultiIndex . from_tuples ( [ ( k , map_field_type ( n , k ) ) for n , k in enumerate ( df . columns ) ] , names = [ "Label" , "Type" ] ) df = df . transpose ( ) . reset_index ( ) . transpose ( ) df . to_csv ( f , index = False , header = False )
def _pb_from_query ( query ) : """Convert a Query instance to the corresponding protobuf . : type query : : class : ` Query ` : param query : The source query . : rtype : : class : ` . query _ pb2 . Query ` : returns : A protobuf that can be sent to the protobuf API . N . b . that it does not contain " in - flight " fields for ongoing query executions ( cursors , offset , limit ) ."""
pb = query_pb2 . Query ( ) for projection_name in query . projection : pb . projection . add ( ) . property . name = projection_name if query . kind : pb . kind . add ( ) . name = query . kind composite_filter = pb . filter . composite_filter composite_filter . op = query_pb2 . CompositeFilter . AND if query . ancestor : ancestor_pb = query . ancestor . to_protobuf ( ) # Filter on _ _ key _ _ HAS _ ANCESTOR = = ancestor . ancestor_filter = composite_filter . filters . add ( ) . property_filter ancestor_filter . property . name = "__key__" ancestor_filter . op = query_pb2 . PropertyFilter . HAS_ANCESTOR ancestor_filter . value . key_value . CopyFrom ( ancestor_pb ) for property_name , operator , value in query . filters : pb_op_enum = query . OPERATORS . get ( operator ) # Add the specific filter property_filter = composite_filter . filters . add ( ) . property_filter property_filter . property . name = property_name property_filter . op = pb_op_enum # Set the value to filter on based on the type . if property_name == "__key__" : key_pb = value . to_protobuf ( ) property_filter . value . key_value . CopyFrom ( key_pb ) else : helpers . _set_protobuf_value ( property_filter . value , value ) if not composite_filter . filters : pb . ClearField ( "filter" ) for prop in query . order : property_order = pb . order . add ( ) if prop . startswith ( "-" ) : property_order . property . name = prop [ 1 : ] property_order . direction = property_order . DESCENDING else : property_order . property . name = prop property_order . direction = property_order . ASCENDING for distinct_on_name in query . distinct_on : pb . distinct_on . add ( ) . name = distinct_on_name return pb
def step_context ( self ) : """Access the step _ context : returns : twilio . rest . studio . v1 . flow . engagement . step . step _ context . StepContextList : rtype : twilio . rest . studio . v1 . flow . engagement . step . step _ context . StepContextList"""
if self . _step_context is None : self . _step_context = StepContextList ( self . _version , flow_sid = self . _solution [ 'flow_sid' ] , engagement_sid = self . _solution [ 'engagement_sid' ] , step_sid = self . _solution [ 'sid' ] , ) return self . _step_context
def set_creds ( self , newCreds ) : """Manually update the current creds ."""
self . creds = newCreds self . on_creds_changed ( newCreds ) return self
def commit_pushdb ( self , coordinates , postscript = None ) : """Commit changes to the pushdb with a message containing the provided coordinates ."""
self . scm . commit ( 'pants build committing publish data for push of {coordinates}' '{postscript}' . format ( coordinates = coordinates , postscript = postscript or '' ) , verify = self . get_options ( ) . verify_commit )
def make_node ( cls , id_ , params , lineno ) : """This will return an AST node for a function / procedure call ."""
assert isinstance ( params , SymbolARGLIST ) entry = gl . SYMBOL_TABLE . access_func ( id_ , lineno ) if entry is None : # A syntax / semantic error return None if entry . callable is False : # Is it NOT callable ? if entry . type_ != Type . string : errmsg . syntax_error_not_array_nor_func ( lineno , id_ ) return None gl . SYMBOL_TABLE . check_class ( id_ , CLASS . function , lineno ) entry . accessed = True if entry . declared and not entry . forwarded : check_call_arguments ( lineno , id_ , params ) else : # All functions goes to global scope by default if not isinstance ( entry , SymbolFUNCTION ) : entry = SymbolVAR . to_function ( entry , lineno ) gl . SYMBOL_TABLE . move_to_global_scope ( id_ ) gl . FUNCTION_CALLS . append ( ( id_ , params , lineno , ) ) return cls ( entry , params , lineno )
def Max ( left : vertex_constructor_param_types , right : vertex_constructor_param_types , label : Optional [ str ] = None ) -> Vertex : """Finds the maximum between two vertices : param left : one of the vertices to find the maximum of : param right : one of the vertices to find the maximum of"""
return Double ( context . jvm_view ( ) . MaxVertex , label , cast_to_double_vertex ( left ) , cast_to_double_vertex ( right ) )
def identifySQLError ( self , sql , args , e ) : """Identify an appropriate SQL error object for the given message for the supported versions of sqlite . @ return : an SQLError"""
message = e . args [ 0 ] if message . startswith ( "table" ) and message . endswith ( "already exists" ) : return errors . TableAlreadyExists ( sql , args , e ) return errors . SQLError ( sql , args , e )
def evaluateplanarPotentials ( Pot , R , phi = None , t = 0. , dR = 0 , dphi = 0 ) : """NAME : evaluateplanarPotentials PURPOSE : evaluate a ( list of ) planarPotential instance ( s ) INPUT : Pot - ( list of ) planarPotential instance ( s ) R - Cylindrical radius ( can be Quantity ) phi = azimuth ( optional ; can be Quantity ) t = time ( optional ; can be Quantity ) dR = , dphi = if set to non - zero integers , return the dR , dphi ' t derivative instead OUTPUT : Phi ( R ( , phi , t ) ) HISTORY : 2010-07-13 - Written - Bovy ( NYU )"""
return _evaluateplanarPotentials ( Pot , R , phi = phi , t = t , dR = dR , dphi = dphi )
def zscale ( image , nsamples = 1000 , contrast = 0.25 ) : """Implement IRAF zscale algorithm nsamples = 1000 and contrast = 0.25 are the IRAF display task defaults image is a 2 - d numpy array returns ( z1 , z2)"""
# Sample the image samples = zsc_sample ( image , nsamples ) return zscale_samples ( samples , contrast = contrast )
def timeseries ( self ) : """Feed - in time series of generator It returns the actual time series used in power flow analysis . If : attr : ` _ timeseries ` is not : obj : ` None ` , it is returned . Otherwise , : meth : ` timeseries ` looks for generation and curtailment time series of the according type of technology ( and weather cell ) in : class : ` ~ . grid . network . TimeSeries ` . Returns : pandas : ` pandas . DataFrame < dataframe > ` DataFrame containing active power in kW in column ' p ' and reactive power in kVA in column ' q ' ."""
if self . _timeseries is None : # get time series for active power depending on if they are # differentiated by weather cell ID or not if isinstance ( self . grid . network . timeseries . generation_fluctuating . columns , pd . MultiIndex ) : if self . weather_cell_id : try : timeseries = self . grid . network . timeseries . generation_fluctuating [ self . type , self . weather_cell_id ] . to_frame ( 'p' ) except KeyError : logger . exception ( "No time series for type {} and " "weather cell ID {} given." . format ( self . type , self . weather_cell_id ) ) raise else : logger . exception ( "No weather cell ID provided for " "fluctuating generator {}." . format ( repr ( self ) ) ) raise KeyError else : try : timeseries = self . grid . network . timeseries . generation_fluctuating [ self . type ] . to_frame ( 'p' ) except KeyError : logger . exception ( "No time series for type {} " "given." . format ( self . type ) ) raise timeseries = timeseries * self . nominal_capacity # subtract curtailment if self . curtailment is not None : timeseries = timeseries . join ( self . curtailment . to_frame ( 'curtailment' ) , how = 'left' ) timeseries . p = timeseries . p - timeseries . curtailment . fillna ( 0 ) if self . timeseries_reactive is not None : timeseries [ 'q' ] = self . timeseries_reactive else : timeseries [ 'q' ] = timeseries [ 'p' ] * self . q_sign * tan ( acos ( self . power_factor ) ) return timeseries else : return self . _timeseries . loc [ self . grid . network . timeseries . timeindex , : ]
def alpha_cased ( text , lower = False ) : """Filter text to just letters and homogenize case . : param str text : what to filter and homogenize . : param bool lower : whether to convert to lowercase ; default uppercase . : return str : input filtered to just letters , with homogenized case ."""
text = "" . join ( filter ( lambda c : c . isalpha ( ) or c == GENERIC_PROTOCOL_KEY , text ) ) return text . lower ( ) if lower else text . upper ( )
def chunk_count ( swatch ) : """return the number of byte - chunks in a swatch object this recursively walks the swatch list , returning 1 for a single color & returns 2 for each folder plus 1 for each color it contains"""
if type ( swatch ) is dict : if 'data' in swatch : return 1 if 'swatches' in swatch : return 2 + len ( swatch [ 'swatches' ] ) else : return sum ( map ( chunk_count , swatch ) )
def rel_path_to ( self , dest ) : """Builds a relative path leading from this one to the given ` dest ` . Note that these paths might be both relative , in which case they ' ll be assumed to start from the same directory ."""
dest = self . __class__ ( dest ) orig_list = self . norm_case ( ) . _components ( ) dest_list = dest . _components ( ) i = - 1 for i , ( orig_part , dest_part ) in enumerate ( zip ( orig_list , dest_list ) ) : if orig_part != self . _normcase ( dest_part ) : up = [ '..' ] * ( len ( orig_list ) - i ) return self . __class__ ( * ( up + dest_list [ i : ] ) ) if len ( orig_list ) <= len ( dest_list ) : if len ( dest_list ) > i + 1 : return self . __class__ ( * dest_list [ i + 1 : ] ) else : return self . __class__ ( '' ) else : up = [ '..' ] * ( len ( orig_list ) - i - 1 ) return self . __class__ ( * up )
def get_context_data ( self , ** kwargs ) : """Hook for adding arguments to the context ."""
context = { 'obj' : self . object } if 'queryset' in kwargs : context [ 'conf_msg' ] = self . get_confirmation_message ( kwargs [ 'queryset' ] ) context . update ( kwargs ) return context
def neighbor ( self , ** kwargs ) : """Experimental neighbor method . Args : ip _ addr ( str ) : IP Address of BGP neighbor . remote _ as ( str ) : Remote ASN of BGP neighbor . rbridge _ id ( str ) : The rbridge ID of the device on which BGP will be configured in a VCS fabric . afis ( list ) : A list of AFIs to configure . Do not include IPv4 or IPv6 unicast as these are inferred from the ` ip _ addr ` parameter . delete ( bool ) : Deletes the neighbor if ` delete ` is ` ` True ` ` . get ( bool ) : Get config instead of editing config . ( True , False ) callback ( function ) : A function executed upon completion of the method . The only parameter passed to ` callback ` will be the ` ` ElementTree ` ` ` config ` . Returns : Return value of ` callback ` . Raises : KeyError : if ` remote _ as ` or ` ip _ addr ` is not specified . Examples : > > > import pynos . device > > > conn = ( ' 10.24.39.203 ' , ' 22 ' ) > > > auth = ( ' admin ' , ' password ' ) > > > with pynos . device . Device ( conn = conn , auth = auth ) as dev : . . . output = dev . bgp . local _ asn ( local _ as = ' 65535 ' , . . . rbridge _ id = ' 225 ' ) . . . output = dev . bgp . neighbor ( ip _ addr = ' 10.10.10.10 ' , . . . remote _ as = ' 65535 ' , rbridge _ id = ' 225 ' ) . . . output = dev . bgp . neighbor ( remote _ as = ' 65535 ' , . . . rbridge _ id = ' 225 ' , . . . ip _ addr = ' 2001:4818 : f000:1ab : cafe : beef : 1000:1 ' ) . . . output = dev . bgp . neighbor ( ip _ addr = ' 10.10.10.10 ' , . . . delete = True , rbridge _ id = ' 225 ' , remote _ as = ' 65535 ' ) . . . output = dev . bgp . neighbor ( remote _ as = ' 65535 ' , . . . rbridge _ id = ' 225 ' , delete = True , . . . ip _ addr = ' 2001:4818 : f000:1ab : cafe : beef : 1000:1 ' )"""
ip_addr = ip_interface ( unicode ( kwargs . pop ( 'ip_addr' ) ) ) rbridge_id = kwargs . pop ( 'rbridge_id' , '1' ) delete = kwargs . pop ( 'delete' , False ) callback = kwargs . pop ( 'callback' , self . _callback ) remote_as = kwargs . pop ( 'remote_as' , None ) get_config = kwargs . pop ( 'get' , False ) if not get_config and remote_as is None : raise ValueError ( 'When configuring a neighbor, you must specify ' 'its remote-as.' ) neighbor_args = dict ( router_bgp_neighbor_address = str ( ip_addr . ip ) , remote_as = remote_as , rbridge_id = rbridge_id ) if ip_addr . version == 6 : neighbor_args [ 'router_bgp_neighbor_ipv6_address' ] = str ( ip_addr . ip ) neighbor , ip_addr_path = self . _unicast_xml ( ip_addr . version ) config = neighbor ( ** neighbor_args ) if ip_addr . version == 6 and not delete : config = self . _build_ipv6 ( ip_addr , config , rbridge_id ) if delete and config . find ( ip_addr_path ) is not None : if ip_addr . version == 4 : config . find ( ip_addr_path ) . set ( 'operation' , 'delete' ) config . find ( './/*router-bgp-neighbor-address' ) . set ( 'operation' , 'delete' ) elif ip_addr . version == 6 : config . find ( ip_addr_path ) . set ( 'operation' , 'delete' ) config . find ( './/*router-bgp-neighbor-ipv6-address' ) . set ( 'operation' , 'delete' ) if get_config : return callback ( config , handler = 'get_config' ) return callback ( config )
def flush ( self ) : """Remove the whole storage"""
annotation = self . get_annotation ( ) if annotation . get ( ATTACHMENTS_STORAGE ) is not None : del annotation [ ATTACHMENTS_STORAGE ]
def match ( Class , path , pattern , flags = re . I , sortkey = None , ext = None ) : """for a given path and regexp pattern , return the files that match"""
return sorted ( [ Class ( fn = fn ) for fn in rglob ( path , f"*{ext or ''}" ) if re . search ( pattern , os . path . basename ( fn ) , flags = flags ) is not None and os . path . basename ( fn ) [ 0 ] != '~' # omit temp files ] , key = sortkey , )
def interface_list ( env , securitygroup_id , sortby ) : """List interfaces associated with security groups ."""
mgr = SoftLayer . NetworkManager ( env . client ) table = formatting . Table ( COLUMNS ) table . sortby = sortby mask = ( '''networkComponentBindings[ networkComponentId, networkComponent[ id, port, guest[ id, hostname, primaryBackendIpAddress, primaryIpAddress ] ] ]''' ) secgroup = mgr . get_securitygroup ( securitygroup_id , mask = mask ) for binding in secgroup . get ( 'networkComponentBindings' , [ ] ) : interface_id = binding [ 'networkComponentId' ] try : interface = binding [ 'networkComponent' ] vsi = interface [ 'guest' ] vsi_id = vsi [ 'id' ] hostname = vsi [ 'hostname' ] priv_pub = 'PRIVATE' if interface [ 'port' ] == 0 else 'PUBLIC' ip_address = ( vsi [ 'primaryBackendIpAddress' ] if interface [ 'port' ] == 0 else vsi [ 'primaryIpAddress' ] ) except KeyError : vsi_id = "N/A" hostname = "Not enough permission to view" priv_pub = "N/A" ip_address = "N/A" table . add_row ( [ interface_id , vsi_id , hostname , priv_pub , ip_address ] ) env . fout ( table )
def minimum_sys ( cls , inherit_path ) : """Return the minimum sys necessary to run this interpreter , a la python - S . : returns : ( sys . path , sys . path _ importer _ cache , sys . modules ) tuple of a bare python installation ."""
site_libs = set ( cls . site_libs ( ) ) for site_lib in site_libs : TRACER . log ( 'Found site-library: %s' % site_lib ) for extras_path in cls . _extras_paths ( ) : TRACER . log ( 'Found site extra: %s' % extras_path ) site_libs . add ( extras_path ) site_libs = set ( os . path . normpath ( path ) for path in site_libs ) sys_path , sys_path_importer_cache = cls . minimum_sys_path ( site_libs , inherit_path ) sys_modules = cls . minimum_sys_modules ( site_libs ) return sys_path , sys_path_importer_cache , sys_modules
def _to_edges ( self , tokens ) : """This is an iterator that returns the nodes of our graph : " This is a test " - > " None This " " This is " " is a " " a test " " test None " Each is annotated with a boolean that tracks whether whitespace was found between the two tokens ."""
# prepend self . order Nones chain = self . _end_context + tokens + self . _end_context has_space = False context = [ ] for i in xrange ( len ( chain ) ) : context . append ( chain [ i ] ) if len ( context ) == self . order : if chain [ i ] == self . SPACE_TOKEN_ID : context . pop ( ) has_space = True continue yield tuple ( context ) , has_space context . pop ( 0 ) has_space = False
def envelope ( ** kwargs ) : """Create OAI - PMH envelope for response ."""
e_oaipmh = Element ( etree . QName ( NS_OAIPMH , 'OAI-PMH' ) , nsmap = NSMAP ) e_oaipmh . set ( etree . QName ( NS_XSI , 'schemaLocation' ) , '{0} {1}' . format ( NS_OAIPMH , NS_OAIPMH_XSD ) ) e_tree = ElementTree ( element = e_oaipmh ) if current_app . config [ 'OAISERVER_XSL_URL' ] : e_oaipmh . addprevious ( etree . ProcessingInstruction ( 'xml-stylesheet' , 'type="text/xsl" href="{0}"' . format ( current_app . config [ 'OAISERVER_XSL_URL' ] ) ) ) e_responseDate = SubElement ( e_oaipmh , etree . QName ( NS_OAIPMH , 'responseDate' ) ) # date should be first possible moment e_responseDate . text = datetime_to_datestamp ( datetime . utcnow ( ) ) e_request = SubElement ( e_oaipmh , etree . QName ( NS_OAIPMH , 'request' ) ) for key , value in kwargs . items ( ) : if key == 'from_' or key == 'until' : value = datetime_to_datestamp ( value ) elif key == 'resumptionToken' : value = value [ 'token' ] e_request . set ( key , value ) e_request . text = url_for ( 'invenio_oaiserver.response' , _external = True ) return e_tree , e_oaipmh
def _message_to_payload ( cls , message ) : '''Returns a Python object or a ProtocolError .'''
try : return json . loads ( message . decode ( ) ) except UnicodeDecodeError : message = 'messages must be encoded in UTF-8' except json . JSONDecodeError : message = 'invalid JSON' raise cls . _error ( cls . PARSE_ERROR , message , True , None )
def form_invalid ( self , form ) : '''Builds the JSON for the errors'''
response = { self . errors_key : { } } response [ self . non_field_errors_key ] = form . non_field_errors ( ) response . update ( self . get_hidden_fields_errors ( form ) ) for field in form . visible_fields ( ) : if field . errors : response [ self . errors_key ] [ field . html_name ] = self . _get_field_error_dict ( field ) if self . include_success : response [ self . sucess_key ] = False return self . _render_json ( response )
def main ( ) : parser = getparser ( ) args = parser . parse_args ( ) fn = args . fn sitename = args . sitename # User - specified output extent # Note : not checked , untested if args . extent is not None : extent = ( args . extent ) . split ( ) else : extent = ( geolib . site_dict [ sitename ] ) . extent if args . refdem_fn is not None : refdem_fn = args . refdem_fn else : refdem_fn = ( geolib . site_dict [ sitename ] ) . refdem_fn # Max elevation difference between shot and sampled DEM max_z_DEM_diff = 200 # Max elevation std for sampled DEM values in padded window around shot max_DEMhiresArElv_std = 50.0 f = h5py . File ( fn ) t = f . get ( 'Data_40HZ/Time/d_UTCTime_40' ) [ : ] # pyt0 = datetime ( 1 , 1 , 1 , 0 , 0) # utct0 = datetime ( 1970 , 1 , 1 , 0 , 0) # t0 = datetime ( 2000 , 1 , 1 , 12 , 0 , 0) # offset _ s = ( t0 - utct0 ) . total _ seconds ( ) offset_s = 946728000.0 t += offset_s dt = timelib . np_utc2dt ( t ) dt_o = timelib . dt2o ( dt ) # dts = timelib . np _ print _ dt ( dt ) # dt _ decyear = timelib . np _ dt2decyear ( dt ) dt_int = np . array ( [ ts . strftime ( '%Y%m%d' ) for ts in dt ] , dtype = long ) lat = np . ma . masked_equal ( f . get ( 'Data_40HZ/Geolocation/d_lat' ) [ : ] , 1.7976931348623157e+308 ) lon = np . ma . masked_equal ( f . get ( 'Data_40HZ/Geolocation/d_lon' ) [ : ] , 1.7976931348623157e+308 ) lon = geolib . lon360to180 ( lon ) z = np . ma . masked_equal ( f . get ( 'Data_40HZ/Elevation_Surfaces/d_elev' ) [ : ] , 1.7976931348623157e+308 ) print ( 'Input: %i' % z . count ( ) ) # Now spatial filter - should do this up front x = lon y = lat xmin , xmax , ymin , ymax = extent # This is True if point is within extent valid_idx = ( ( x >= xmin ) & ( x <= xmax ) & ( y >= ymin ) & ( y <= ymax ) ) # Prepare output array # out = np . ma . vstack ( [ dt _ decyear , dt _ o , dt _ int , lat , lon , z ] ) . T out = np . ma . vstack ( [ dt_o , dt_int , lat , lon , z ] ) . T # Create a mask to ensure all four values are valid for each point mask = ~ ( np . any ( np . ma . getmaskarray ( out ) , axis = 1 ) ) mask *= valid_idx out = out [ mask ] valid_idx = ~ ( np . any ( np . ma . getmaskarray ( out ) , axis = 1 ) ) # Lon and lat indices xcol = 3 ycol = 2 zcol = 4 if out . shape [ 0 ] == 0 : sys . exit ( "No points within specified extent\n" ) else : print ( "Spatial filter: %i" % out . shape [ 0 ] ) # out _ fmt = [ ' % 0.8f ' , ' % 0.8f ' , ' % i ' , ' % 0.6f ' , ' % 0.6f ' , ' % 0.2f ' ] # out _ hdr = [ ' dt _ decyear , dt _ ordinal ' , ' dt _ YYYYMMDD ' , ' lat ' , ' lon ' , ' z _ WGS84 ' ] out_fmt = [ '%0.8f' , '%i' , '%0.6f' , '%0.6f' , '%0.2f' ] out_hdr = [ 'dt_ordinal' , 'dt_YYYYMMDD' , 'lat' , 'lon' , 'z_WGS84' ] """ICESat - 1 filters"""
# Saturation Correction Flag # These are 0 to 5 , not _ saturated inconsequential applicable not _ computed not _ applicable sat_corr_flg = f . get ( 'Data_40HZ/Quality/sat_corr_flg' ) [ mask ] # valid _ idx * = ( sat _ corr _ flg < 2) # Correction to elevation for saturated waveforms # Notes suggest this might not be desirable over land satElevCorr = np . ma . masked_equal ( f . get ( 'Data_40HZ/Elevation_Corrections/d_satElevCorr' ) [ mask ] , 1.7976931348623157e+308 ) # z [ sat _ corr _ flg < 3 ] + = satElevCorr . filled ( 0.0 ) [ sat _ corr _ flg < 3] out [ : , zcol ] += satElevCorr . filled ( 0.0 ) # Correction to elevation based on post flight analysis for biases determined for each campaign ElevBiasCorr = np . ma . masked_equal ( f . get ( 'Data_40HZ/Elevation_Corrections/d_ElevBiasCorr' ) [ mask ] , 1.7976931348623157e+308 ) out [ : , zcol ] += ElevBiasCorr . filled ( 0.0 ) # Surface elevation ( T / P ellipsoid ) minus surface elevation ( WGS84 ellipsoid ) . # Approximately 0.7 m , so WGS is lower ; need to subtract from d _ elev deltaEllip = np . ma . masked_equal ( f . get ( 'Data_40HZ/Geophysical/d_deltaEllip' ) [ mask ] , 1.7976931348623157e+308 ) out [ : , zcol ] -= deltaEllip # These are 1 for valid , 0 for invalid valid_idx *= ~ ( np . ma . getmaskarray ( out [ : , zcol ] ) ) print ( "z corrections: %i" % valid_idx . nonzero ( ) [ 0 ] . size ) if False : # Reflectivity , not corrected for atmospheric effects reflctUC = np . ma . masked_equal ( f . get ( 'Data_40HZ/Reflectivity/d_reflctUC' ) [ mask ] , 1.7976931348623157e+308 ) # This was minimum used for ice sheets min_reflctUC = 0.025 valid_idx *= ( reflctUC > min_reflctUC ) . data print ( "reflctUC: %i" % valid_idx . nonzero ( ) [ 0 ] . size ) if False : # The Standard deviation of the difference between the functional fit and the received echo \ # using alternate parameters . It is directly taken from GLA05 parameter d _ wfFitSDev _ 1 LandVar = np . ma . masked_equal ( f . get ( 'Data_40HZ/Elevation_Surfaces/d_LandVar' ) [ mask ] , 1.7976931348623157e+308 ) # This was max used for ice sheets max_LandVar = 0.04 valid_idx *= ( LandVar < max_LandVar ) . data print ( "LandVar: %i" % valid_idx . nonzero ( ) [ 0 ] . size ) if True : # Flag indicating whether the elevations on this record should be used . # 0 = valid , 1 = not valid elev_use_flg = f . get ( 'Data_40HZ/Quality/elev_use_flg' ) [ mask ] . astype ( 'Bool' ) valid_idx *= ~ elev_use_flg print ( "elev_use_flg: %i" % valid_idx . nonzero ( ) [ 0 ] . size ) if False : # Cloud contamination ; Indicates if Gain > flag value , indicating probable cloud contamination . elv_cloud_flg = f . get ( 'Data_40HZ/Elevation_Flags/elv_cloud_flg' ) [ mask ] . astype ( 'Bool' ) valid_idx *= ~ elv_cloud_flg print ( "elv_cloud_flg: %i" % valid_idx . nonzero ( ) [ 0 ] . size ) if False : # Full resolution 1064 Quality Flag ; 0 - 12 indicate Cloud detected FRir_qa_flg = f . get ( 'Data_40HZ/Atmosphere/FRir_qa_flg' ) [ mask ] valid_idx *= ( FRir_qa_flg == 15 ) . data print ( "FRir_qa_flg: %i" % valid_idx . nonzero ( ) [ 0 ] . size ) if False : # This is elevation extracted from SRTM30 DEM_elv = np . ma . masked_equal ( f . get ( 'Data_40HZ/Geophysical/d_DEM_elv' ) [ mask ] , 1.7976931348623157e+308 ) z_DEM_diff = np . abs ( out [ : , zcol ] - DEM_elv ) valid_idx *= ( z_DEM_diff < max_z_DEM_diff ) . data print ( "z_DEM_diff: %i" % valid_idx . nonzero ( ) [ 0 ] . size ) # d _ DEMhiresArElv is a 9 element array of high resolution DEM values . The array index corresponds to the position of the DEM value relative to the spot . ( 5 ) is the footprint center . DEMhiresArElv = np . ma . masked_equal ( f . get ( 'Data_40HZ/Geophysical/d_DEMhiresArElv' ) [ mask ] , 1.7976931348623157e+308 ) DEMhiresArElv_std = np . ma . std ( DEMhiresArElv , axis = 1 ) valid_idx *= ( DEMhiresArElv_std < max_DEMhiresArElv_std ) . data print ( "max_DEMhiresArElv_std: %i" % valid_idx . nonzero ( ) [ 0 ] . size ) # Compute slope # Apply cumulative filter to output out = out [ valid_idx ] out_fn = os . path . splitext ( fn ) [ 0 ] + '_%s.csv' % sitename print ( "Writing out %i records to: %s\n" % ( out . shape [ 0 ] , out_fn ) ) out_fmt_str = ', ' . join ( out_fmt ) out_hdr_str = ', ' . join ( out_hdr ) np . savetxt ( out_fn , out , fmt = out_fmt_str , delimiter = ',' , header = out_hdr_str ) iolib . writevrt ( out_fn , x = 'lon' , y = 'lat' ) # Extract our own DEM values - should be better than default GLAS reference DEM stats if True : print ( "Loading reference DEM: %s" % refdem_fn ) dem_ds = gdal . Open ( refdem_fn ) print ( "Converting coords for DEM" ) dem_mX , dem_mY = geolib . ds_cT ( dem_ds , out [ : , xcol ] , out [ : , ycol ] , geolib . wgs_srs ) print ( "Sampling" ) dem_samp = geolib . sample ( dem_ds , dem_mX , dem_mY , pad = 'glas' ) abs_dem_z_diff = np . abs ( out [ : , zcol ] - dem_samp [ : , 0 ] ) valid_idx *= ~ ( np . ma . getmaskarray ( abs_dem_z_diff ) ) print ( "Valid DEM extract: %i" % valid_idx . nonzero ( ) [ 0 ] . size ) valid_idx *= ( abs_dem_z_diff < max_z_DEM_diff ) . data print ( "Valid abs DEM diff: %i" % valid_idx . nonzero ( ) [ 0 ] . size ) valid_idx *= ( dem_samp [ : , 1 ] < max_DEMhiresArElv_std ) . data print ( "Valid DEM mad: %i" % valid_idx . nonzero ( ) [ 0 ] . size ) if valid_idx . nonzero ( ) [ 0 ] . size == 0 : sys . exit ( "No valid points remain" ) out = np . ma . hstack ( [ out , dem_samp ] ) out_fmt . extend ( [ '%0.2f' , '%0.2f' ] ) out_hdr . extend ( [ 'z_refdem_med_WGS84' , 'z_refdem_nmad' ] ) # Apply cumulative filter to output out = out [ valid_idx ] out_fn = os . path . splitext ( out_fn ) [ 0 ] + '_refdemfilt.csv' print ( "Writing out %i records to: %s\n" % ( out . shape [ 0 ] , out_fn ) ) out_fmt_str = ', ' . join ( out_fmt ) out_hdr_str = ', ' . join ( out_hdr ) np . savetxt ( out_fn , out , fmt = out_fmt_str , delimiter = ',' , header = out_hdr_str ) iolib . writevrt ( out_fn , x = 'lon' , y = 'lat' ) # This will sample land - use / land - cover or percent bareground products # Can be used to isolate points over exposed rock # if args . rockfilter : if True : # This should automatically identify appropriate LULC source based on refdem extent lulc_source = dem_mask . get_lulc_source ( dem_ds ) # Looks like NED extends beyond NCLD , force use NLCD for conus # if sitename = = ' conus ' : # lulc _ source = ' nlcd ' lulc_ds = dem_mask . get_lulc_ds_full ( dem_ds , lulc_source ) print ( "Converting coords for LULC" ) lulc_mX , lulc_mY = geolib . ds_cT ( lulc_ds , out [ : , xcol ] , out [ : , ycol ] , geolib . wgs_srs ) print ( "Sampling LULC: %s" % lulc_source ) # Note : want to make sure we ' re not interpolating integer values for NLCD # Should be safe with pad = 0 , even with pad > 0 , should take median , not mean lulc_samp = geolib . sample ( lulc_ds , lulc_mX , lulc_mY , pad = 0 ) l = lulc_samp [ : , 0 ] . data if lulc_source == 'nlcd' : # This passes rock and ice pixels valid_idx = np . logical_or ( ( l == 31 ) , ( l == 12 ) ) elif lulc_source == 'bareground' : # This preserves pixels with bareground percentation > 85% minperc = 85 valid_idx = ( l >= minperc ) else : print ( "Unknown LULC source" ) print ( "LULC: %i" % valid_idx . nonzero ( ) [ 0 ] . size ) if l . ndim == 1 : l = l [ : , np . newaxis ] out = np . ma . hstack ( [ out , l ] ) out_fmt . append ( '%i' ) out_hdr . append ( 'lulc' ) # Apply cumulative filter to output out = out [ valid_idx ] out_fn = os . path . splitext ( out_fn ) [ 0 ] + '_lulcfilt.csv' print ( "Writing out %i records to: %s\n" % ( out . shape [ 0 ] , out_fn ) ) out_fmt_str = ', ' . join ( out_fmt ) out_hdr_str = ', ' . join ( out_hdr ) np . savetxt ( out_fn , out , fmt = out_fmt_str , delimiter = ',' , header = out_hdr_str ) iolib . writevrt ( out_fn , x = 'lon' , y = 'lat' )
def _log_band_edge_information ( bs , edge_data ) : """Log data about the valence band maximum or conduction band minimum . Args : bs ( : obj : ` ~ pymatgen . electronic _ structure . bandstructure . BandStructureSymmLine ` ) : The band structure . edge _ data ( dict ) : The : obj : ` dict ` from ` ` bs . get _ vbm ( ) ` ` or ` ` bs . get _ cbm ( ) ` `"""
if bs . is_spin_polarized : spins = edge_data [ 'band_index' ] . keys ( ) b_indices = [ ', ' . join ( [ str ( i + 1 ) for i in edge_data [ 'band_index' ] [ spin ] ] ) + '({})' . format ( spin . name . capitalize ( ) ) for spin in spins ] b_indices = ', ' . join ( b_indices ) else : b_indices = ', ' . join ( [ str ( i + 1 ) for i in edge_data [ 'band_index' ] [ Spin . up ] ] ) kpoint = edge_data [ 'kpoint' ] kpoint_str = kpt_str . format ( k = kpoint . frac_coords ) k_indices = ', ' . join ( map ( str , edge_data [ 'kpoint_index' ] ) ) if kpoint . label : k_loc = kpoint . label else : branch = bs . get_branch ( edge_data [ 'kpoint_index' ] [ 0 ] ) [ 0 ] k_loc = 'between {}' . format ( branch [ 'name' ] ) logging . info ( ' Energy: {:.3f} eV' . format ( edge_data [ 'energy' ] ) ) logging . info ( ' k-point: {}' . format ( kpoint_str ) ) logging . info ( ' k-point location: {}' . format ( k_loc ) ) logging . info ( ' k-point indices: {}' . format ( k_indices ) ) logging . info ( ' Band indices: {}' . format ( b_indices ) )
def lock_instance ( cls , instance_or_pk , read = False ) : """Return a locked model instance in ` ` db . session ` ` . : param instance _ or _ pk : An instance of this model class , or a primary key . A composite primary key can be passed as a tuple . : param read : If ` True ` , a reading lock is obtained instead of a writing lock ."""
mapper = inspect ( cls ) pk_attrs = [ mapper . get_property_by_column ( c ) . class_attribute for c in mapper . primary_key ] pk_values = cls . get_pk_values ( instance_or_pk ) clause = and_ ( * [ attr == value for attr , value in zip ( pk_attrs , pk_values ) ] ) return cls . query . filter ( clause ) . with_for_update ( read = read ) . one_or_none ( )
def validation_curve ( model , X , y , param_name , param_range , ax = None , logx = False , groups = None , cv = None , scoring = None , n_jobs = 1 , pre_dispatch = "all" , ** kwargs ) : """Displays a validation curve for the specified param and values , plotting both the train and cross - validated test scores . The validation curve is a visual , single - parameter grid search used to tune a model to find the best balance between error due to bias and error due to variance . This helper function is a wrapper to use the ValidationCurve in a fast , visual analysis . Parameters model : a scikit - learn estimator An object that implements ` ` fit ` ` and ` ` predict ` ` , can be a classifier , regressor , or clusterer so long as there is also a valid associated scoring metric . Note that the object is cloned for each validation . X : array - like , shape ( n _ samples , n _ features ) Training vector , where n _ samples is the number of samples and n _ features is the number of features . y : array - like , shape ( n _ samples ) or ( n _ samples , n _ features ) , optional Target relative to X for classification or regression ; None for unsupervised learning . param _ name : string Name of the parameter that will be varied . param _ range : array - like , shape ( n _ values , ) The values of the parameter that will be evaluated . ax : matplotlib . Axes object , optional The axes object to plot the figure on . logx : boolean , optional If True , plots the x - axis with a logarithmic scale . groups : array - like , with shape ( n _ samples , ) Optional group labels for the samples used while splitting the dataset into train / test sets . cv : int , cross - validation generator or an iterable , optional Determines the cross - validation splitting strategy . Possible inputs for cv are : - None , to use the default 3 - fold cross - validation , - integer , to specify the number of folds . - An object to be used as a cross - validation generator . - An iterable yielding train / test splits . see the scikit - learn ` cross - validation guide < http : / / scikit - learn . org / stable / modules / cross _ validation . html > ` _ for more information on the possible strategies that can be used here . scoring : string , callable or None , optional , default : None A string or scorer callable object / function with signature ` ` scorer ( estimator , X , y ) ` ` . See scikit - learn model evaluation documentation for names of possible metrics . n _ jobs : integer , optional Number of jobs to run in parallel ( default 1 ) . pre _ dispatch : integer or string , optional Number of predispatched jobs for parallel execution ( default is all ) . The option can reduce the allocated memory . The string can be an expression like ' 2 * n _ jobs ' . kwargs : dict Keyword arguments that are passed to the base class and may influence the visualization as defined in other Visualizers . These arguments are also passed to the ` poof ( ) ` method , e . g . can pass a path to save the figure to . Returns ax : matplotlib . Axes The axes object that the validation curves were drawn on ."""
# Initialize the visualizer oz = ValidationCurve ( model , param_name , param_range , ax = ax , logx = logx , groups = groups , cv = cv , scoring = scoring , n_jobs = n_jobs , pre_dispatch = pre_dispatch ) # Fit and poof the visualizer oz . fit ( X , y ) oz . poof ( ** kwargs ) return oz . ax
def decode_aes256_base64_auto ( data , encryption_key ) : """Guesses AES cipher ( EBC or CBD ) from the length of the base64 encoded data ."""
assert isinstance ( data , bytes ) length = len ( data ) if length == 0 : return b'' elif data [ 0 ] == b'!' [ 0 ] : return decode_aes256_cbc_base64 ( data , encryption_key ) else : return decode_aes256_ecb_base64 ( data , encryption_key )
def callback ( self , request , ** kwargs ) : """Called from the Service when the user accept to activate it the url to go back after the external service call : param request : contains the current session : param kwargs : keyword args : type request : dict : type kwargs : dict : rtype : string"""
code = request . GET . get ( 'code' , '' ) redirect_uri = '%s://%s%s' % ( request . scheme , request . get_host ( ) , reverse ( "reddit_callback" ) ) reddit = RedditApi ( client_id = self . consumer_key , client_secret = self . consumer_secret , redirect_uri = redirect_uri , user_agent = self . user_agent ) token = reddit . auth . authorize ( code ) UserService . objects . filter ( user = request . user , name = 'ServiceReddit' ) . update ( token = token ) return 'reddit/callback.html'
def output ( self , stream , disabletransferencoding = None ) : """Set output stream and send response immediately"""
if self . _sendHeaders : raise HttpProtocolException ( 'Cannot modify response, headers already sent' ) self . outputstream = stream try : content_length = len ( stream ) except Exception : pass else : self . header ( b'Content-Length' , str ( content_length ) . encode ( 'ascii' ) ) if disabletransferencoding is not None : self . disabledeflate = disabletransferencoding self . _startResponse ( )
def sync_hooks ( user_id , repositories ) : """Sync repository hooks for a user ."""
from . api import GitHubAPI try : # Sync hooks gh = GitHubAPI ( user_id = user_id ) for repo_id in repositories : try : with db . session . begin_nested ( ) : gh . sync_repo_hook ( repo_id ) # We commit per repository , because while the task is running # the user might enable / disable a hook . db . session . commit ( ) except RepositoryAccessError as e : current_app . logger . warning ( e . message , exc_info = True ) except NoResultFound : pass # Repository not in DB yet except Exception as exc : sync_hooks . retry ( exc = exc )
def checkIfHashIsCracked ( hash = None , api_key = None ) : '''Method that checks if the given hash is stored in the md5crack . com website . : param hash : hash to verify . : param api _ key : api _ key to be used in md5crack . com . If not provided , the API key will be searched in the config _ api _ keys . py file . : return : Python structure for the Json received . It has the following structure : " phrase " : " 4d186321c1a7f0f354b297e8914ab240 " , " code " : 6, " parsed " : " hola " , " response " : " The MD5 hash was cracked . "'''
# This is for i3visio if api_key == None : # api _ key = raw _ input ( " Insert the API KEY here : \ t " ) allKeys = config_api_keys . returnListOfAPIKeys ( ) try : api_key_data = allKeys [ "md5crack_com" ] api_key = api_key_data [ "api_key" ] except : # API _ Key not found return { } apiURL = "http://api.md5crack.com/crack/" + api_key + "/" + hash # Accessing the HIBP API data = urllib2 . urlopen ( apiURL ) . read ( ) if "\"parsed\":null" in data : data = data . replace ( "\"parsed\":null" , "\"parsed\":\"\"" ) # Reading the text data onto python structures jsonData = json . loads ( data ) # print json . dumps ( jsonData , indent = 2) return jsonData
def _generate_doc ( ret ) : '''Create a object that will be saved into the database based on options .'''
# Create a copy of the object that we will return . retc = ret . copy ( ) # Set the ID of the document to be the JID . retc [ "_id" ] = ret [ "jid" ] # Add a timestamp field to the document retc [ "timestamp" ] = time . time ( ) return retc
def write_result ( self , result ) : """Send back the result of this call . Only one of this and ` write _ exc _ info ` may be called . : param result : Return value of the call"""
assert not self . finished , "Already sent a response" if not self . result . thrift_spec : self . finished = True return spec = self . result . thrift_spec [ 0 ] if result is not None : assert spec , "Tried to return a result for a void method." setattr ( self . result , spec [ 2 ] , result ) self . finished = True