signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def get_settings ( self ) : """GetSettings . [ Preview API ] : rtype : : class : ` < NotificationAdminSettings > < azure . devops . v5_0 . notification . models . NotificationAdminSettings > `"""
response = self . _send ( http_method = 'GET' , location_id = 'cbe076d8-2803-45ff-8d8d-44653686ea2a' , version = '5.0-preview.1' ) return self . _deserialize ( 'NotificationAdminSettings' , response )
def validate_supported_quil ( program : Program ) -> None : """Ensure that a program is supported Quil which can run on any QPU , otherwise raise a ValueError . We support a global RESET before any gates , and MEASUREs on each qubit after any gates on that qubit . PRAGMAs and DECLAREs are always allowed , and a final HALT instruction is allowed . : param program : The Quil program to validate ."""
gates_seen = False measured_qubits : Set [ int ] = set ( ) for i , instr in enumerate ( program . instructions ) : if isinstance ( instr , Pragma ) or isinstance ( instr , Declare ) : continue elif isinstance ( instr , Halt ) : if i != len ( program . instructions ) - 1 : raise ValueError ( f"Cannot have instructions after HALT" ) elif isinstance ( instr , Gate ) : gates_seen = True if any ( q . index in measured_qubits for q in instr . qubits ) : raise ValueError ( "Cannot apply gates to qubits that were already measured." ) elif isinstance ( instr , Reset ) : if gates_seen : raise ValueError ( "RESET can only be applied before any gate applications." ) elif isinstance ( instr , ResetQubit ) : raise ValueError ( "Only global RESETs are currently supported." ) elif isinstance ( instr , Measurement ) : if instr . qubit . index in measured_qubits : raise ValueError ( "Multiple measurements per qubit is not supported." ) measured_qubits . add ( instr . qubit . index ) else : raise ValueError ( f"Unhandled instruction type in supported Quil validation: {instr}" )
def display ( self , image ) : """Takes a 1 - bit : py : mod : ` PIL . Image ` and dumps it to the ST7567 LCD display"""
assert ( image . mode == self . mode ) assert ( image . size == self . size ) image = self . preprocess ( image ) set_page_address = 0xB0 image_data = image . getdata ( ) pixels_per_page = self . width * 8 buf = bytearray ( self . width ) for y in range ( 0 , int ( self . _pages * pixels_per_page ) , pixels_per_page ) : self . command ( set_page_address , 0x04 , 0x10 ) set_page_address += 1 offsets = [ y + self . width * i for i in range ( 8 ) ] for x in range ( self . width ) : buf [ x ] = ( image_data [ x + offsets [ 0 ] ] and 0x01 ) | ( image_data [ x + offsets [ 1 ] ] and 0x02 ) | ( image_data [ x + offsets [ 2 ] ] and 0x04 ) | ( image_data [ x + offsets [ 3 ] ] and 0x08 ) | ( image_data [ x + offsets [ 4 ] ] and 0x10 ) | ( image_data [ x + offsets [ 5 ] ] and 0x20 ) | ( image_data [ x + offsets [ 6 ] ] and 0x40 ) | ( image_data [ x + offsets [ 7 ] ] and 0x80 ) self . data ( list ( buf ) )
def _getThread ( self , given_thread_id = None , given_thread_type = None ) : """Checks if thread ID is given , checks if default is set and returns correct values : raises ValueError : If thread ID is not given and there is no default : return : Thread ID and thread type : rtype : tuple"""
if given_thread_id is None : if self . _default_thread_id is not None : return self . _default_thread_id , self . _default_thread_type else : raise ValueError ( "Thread ID is not set" ) else : return given_thread_id , given_thread_type
def GetBlockByHeight ( self , height ) : """Get a block by its height . Args : height ( int ) : the height of the block to retrieve . Returns : neo . Core . Block : block instance ."""
hash = self . GetBlockHash ( height ) if hash is not None : return self . GetBlockByHash ( hash )
def uploadByParts ( self , registerID , filePath , commit = True ) : """loads the data by small parts . If commit is set to true , then parts will be merged together . If commit is false , the function will return the registerID so a manual commit can occur . If the user ' s file is over 10mbs , the uploadByParts should be used . Inputs : registerID - ID of the registered item filePath - path of the file to upload commit - default True , lets the function know if server will piece the file back together on the server side . Output : dictionary or string"""
url = self . _url + "/%s/uploadPart" % registerID params = { "f" : "json" } with open ( filePath , 'rb' ) as f : mm = mmap . mmap ( f . fileno ( ) , 0 , access = mmap . ACCESS_READ ) size = 1000000 steps = int ( os . fstat ( f . fileno ( ) ) . st_size / size ) if os . fstat ( f . fileno ( ) ) . st_size % size > 0 : steps += 1 for i in range ( steps ) : files = { } tempFile = os . path . join ( os . environ [ 'TEMP' ] , "split.part%s" % i ) if os . path . isfile ( tempFile ) : os . remove ( tempFile ) with open ( tempFile , 'wb' ) as writer : writer . write ( mm . read ( size ) ) writer . flush ( ) writer . close ( ) del writer files [ 'file' ] = tempFile params [ 'partNum' ] = i + 1 res = self . _post ( url = url , param_dict = params , files = files , securityHandler = self . _securityHandler , proxy_url = self . _proxy_url , proxy_port = self . _proxy_port ) os . remove ( tempFile ) del files del mm return self . commit ( registerID )
def create_widget ( self ) : """Create the underlying widget ."""
d = self . declaration style = d . style if d . style else ( '@attr/borderlessButtonStyle' if d . flat else '@attr/buttonStyle' ) self . widget = Button ( self . get_context ( ) , None , style )
def delete_group ( self , group ) : """Group was deleted ."""
try : lgroup = self . _get_group ( group . name ) delete ( lgroup , database = self . _database ) except ObjectDoesNotExist : # it doesn ' t matter if it doesn ' t exist pass
def context_range ( self , context ) : """Return the 1 - offset , right - open range of lines spanned by a particular context name . Parameters context : str Raises ValueError , if context is not present in the file ."""
if not context . startswith ( self . prefix ) : context = self . prefix + '.' + context lo = hi = None for idx , line_context in enumerate ( self . lines , 1 ) : # context is hierarchical - - context spans itself # and any suffix . if line_context . startswith ( context ) : lo = lo or idx hi = idx if lo is None : raise ValueError ( "Context %s does not exist in file %s" % ( context , self . filename ) ) return lo , hi + 1
def patience_sort ( xs ) : '''Patience sort an iterable , xs . This function generates a series of pairs ( x , pile ) , where " pile " is the 0 - based index of the pile " x " should be placed on top of . Elements of " xs " must be less - than comparable .'''
pile_tops = list ( ) for x in xs : pile = bisect . bisect_left ( pile_tops , x ) if pile == len ( pile_tops ) : pile_tops . append ( x ) else : pile_tops [ pile ] = x yield x , pile
def notify_event_nowait ( self , conn_string , name , event ) : """Notify an event . This will move the notification to the background event loop and return immediately . It is useful for situations where you cannot await notify _ event but keep in mind that it prevents back - pressure when you are notifying too fast so should be used sparingly . Note that calling this method will push the notification to a background task so it can be difficult to reason about when it will precisely occur . For that reason , : meth : ` notify _ event ` should be preferred when possible since that method guarantees that all callbacks will be called synchronously before it finishes . Args : conn _ string ( str ) : The connection string for the device that the event is associated with . name ( str ) : The name of the event . Must be in SUPPORTED _ EVENTS . event ( object ) : The event object . The type of this object will depend on what is being notified ."""
if self . _loop . stopping : self . _logger . debug ( "Ignoring notification %s from %s because loop is shutting down" , name , conn_string ) return self . _loop . log_coroutine ( self . _notify_event_internal , conn_string , name , event )
def direct_callback ( self , event ) : """This function is called for every OS keyboard event and decides if the event should be blocked or not , and passes a copy of the event to other , non - blocking , listeners . There are two ways to block events : remapped keys , which translate events by suppressing and re - emitting ; and blocked hotkeys , which suppress specific hotkeys ."""
# Pass through all fake key events , don ' t even report to other handlers . if self . is_replaying : return True if not all ( hook ( event ) for hook in self . blocking_hooks ) : return False event_type = event . event_type scan_code = event . scan_code # Update tables of currently pressed keys and modifiers . with _pressed_events_lock : if event_type == KEY_DOWN : if is_modifier ( scan_code ) : self . active_modifiers . add ( scan_code ) _pressed_events [ scan_code ] = event hotkey = tuple ( sorted ( _pressed_events ) ) if event_type == KEY_UP : self . active_modifiers . discard ( scan_code ) if scan_code in _pressed_events : del _pressed_events [ scan_code ] # Mappings based on individual keys instead of hotkeys . for key_hook in self . blocking_keys [ scan_code ] : if not key_hook ( event ) : return False # Default accept . accept = True if self . blocking_hotkeys : if self . filtered_modifiers [ scan_code ] : origin = 'modifier' modifiers_to_update = set ( [ scan_code ] ) else : modifiers_to_update = self . active_modifiers if is_modifier ( scan_code ) : modifiers_to_update = modifiers_to_update | { scan_code } callback_results = [ callback ( event ) for callback in self . blocking_hotkeys [ hotkey ] ] if callback_results : accept = all ( callback_results ) origin = 'hotkey' else : origin = 'other' for key in sorted ( modifiers_to_update ) : transition_tuple = ( self . modifier_states . get ( key , 'free' ) , event_type , origin ) should_press , new_accept , new_state = self . transition_table [ transition_tuple ] if should_press : press ( key ) if new_accept is not None : accept = new_accept self . modifier_states [ key ] = new_state if accept : if event_type == KEY_DOWN : _logically_pressed_keys [ scan_code ] = event elif event_type == KEY_UP and scan_code in _logically_pressed_keys : del _logically_pressed_keys [ scan_code ] # Queue for handlers that won ' t block the event . self . queue . put ( event ) return accept
def _turn ( self , speed , degrees , brake = True , block = True ) : """Rotate in place ' degrees ' . Both wheels must turn at the same speed for us to rotate in place ."""
# The distance each wheel needs to travel distance_mm = ( abs ( degrees ) / 360 ) * self . circumference_mm # The number of rotations to move distance _ mm rotations = distance_mm / self . wheel . circumference_mm log . debug ( "%s: turn() degrees %s, distance_mm %s, rotations %s, degrees %s" % ( self , degrees , distance_mm , rotations , degrees ) ) # If degrees is positive rotate clockwise if degrees > 0 : MoveTank . on_for_rotations ( self , speed , speed * - 1 , rotations , brake , block ) # If degrees is negative rotate counter - clockwise else : rotations = distance_mm / self . wheel . circumference_mm MoveTank . on_for_rotations ( self , speed * - 1 , speed , rotations , brake , block )
def to_file_path ( self , path_prefix ) : """Write the embedding matrix and the vocab to < path _ prefix > . npy and < path _ prefix > . vocab . : param ( str ) path _ prefix : path prefix of the saved files"""
with self . _path_prefix_to_files ( path_prefix , 'w' ) as ( array_file , vocab_file ) : self . to_files ( array_file , vocab_file )
def rebuild ( self , ** kwargs ) : '''Repopulate the node - tracking data structures . Shouldn ' t really ever be needed .'''
self . nodes = [ ] self . node_types = [ ] self . id_dict = { } self . type_dict = { } self . add_node ( self . root )
def ip4_address ( self ) : """Returns the IPv4 address of the network interface . If multiple interfaces are provided , the address of the first found is returned ."""
if self . _ip4_address is None and self . network is not None : self . _ip4_address = self . _get_ip_address ( libvirt . VIR_IP_ADDR_TYPE_IPV4 ) return self . _ip4_address
def cursor ( self ) : """Returns a cursor for the currently assembled query , creating it if it doesn ' t already exist ."""
if not self . _active_cursor : self . _active_cursor = self . model . find ( self . query , self . projection or None , ** self . options ) return self . _active_cursor
def get_ticket_for_sns_token ( self ) : """This is a shortcut for getting the sns _ token , as a post data of request body ."""
self . logger . info ( "%s\t%s" % ( self . request_method , self . request_url ) ) return { "openid" : self . get_openid ( ) , "persistent_code" : self . get_persistent_code ( ) , }
def parse ( self , data , extent , desc_tag ) : # type : ( bytes , int , UDFTag ) - > None '''Parse the passed in data into a UDF Anchor Volume Structure . Parameters : data - The data to parse . extent - The extent that this descriptor currently lives at . desc _ tag - A UDFTag object that represents the Descriptor Tag . Returns : Nothing .'''
if self . _initialized : raise pycdlibexception . PyCdlibInternalError ( 'Anchor Volume Structure already initialized' ) ( tag_unused , self . main_vd_length , self . main_vd_extent , self . reserve_vd_length , self . reserve_vd_extent ) = struct . unpack_from ( self . FMT , data , 0 ) self . desc_tag = desc_tag self . orig_extent_loc = extent self . _initialized = True
def plot_comp ( df_var , fig = None , ax = None , ** kwargs ) : """Short summary . Parameters df _ var : pd . DataFrame DataFrame containing variables to plot with datetime as index Returns MPL . figure figure showing 1:1 line plot"""
if fig is None and ax is None : fig , ax = plt . subplots ( ) elif fig is None : fig = ax . get_figure ( ) elif ax is None : ax = fig . gca ( ) # plt . clf ( ) # plt . cla ( ) # ax = sns . regplot ( # x = ' Obs ' , y = ' Sim ' , # data = df _ var , # fit _ reg = True ) # add regression expression df_var_fit = df_var . dropna ( how = 'any' ) # regr = linear _ model . LinearRegression ( ) # val _ x = df _ var _ fit [ ' Obs ' ] . values . reshape ( - 1 , 1) # val _ y = df _ var _ fit [ ' Sim ' ] . values . reshape ( - 1 , 1) # regr . fit ( val _ x , val _ y ) val_x = df_var_fit [ 'Obs' ] val_y = df_var_fit [ 'Sim' ] slope , intercept , r_value , p_value , std_err = stats . linregress ( val_x , val_y ) mae = ( val_y - val_x ) . abs ( ) . mean ( ) sns . regplot ( x = 'Obs' , y = 'Sim' , data = df_var , ax = ax , fit_reg = True , line_kws = { 'label' : "y={0:.2f}x{1}{2:.2f}" . format ( slope , '+' if intercept > 0 else '' , intercept ) + '\n' + '$R^2$={0:.4f}' . format ( r_value ) + '\n' + 'MAE={0:.2f}' . format ( mae ) + '\n' + 'n={}' . format ( df_var . shape [ 0 ] ) } , ** kwargs ) # ax . plot ( val _ x , y _ pred , color = ' red ' , linewidth = 2, # label = ' r2 = ' + str ( " % . 3f " % r2 ) + ' \ n ' + # ' y = ' + str ( " % . 3f " % a [ 0 ] [ 0 ] ) + ' x + ' + str ( " % . 2f " % b [ 0 ] ) ) # ax . legend ( fontsize = 15) ax . legend ( ) # ax . set _ title ( var + ' _ ' + title ) # set equal plotting range x0 , x1 = ax . get_xlim ( ) y0 , y1 = ax . get_ylim ( ) lim_low , lim_high = np . min ( [ x0 , y0 ] ) , np . max ( [ x1 , y1 ] ) ax . set_xlim ( lim_low , lim_high ) ax . set_ylim ( lim_low , lim_high ) # set 1:1 aspect ratio ax . set_aspect ( 'equal' ) # add 1:1 line ax . plot ( [ lim_low , lim_high ] , [ lim_low , lim_high ] , color = 'red' , linewidth = 1 , zorder = 0 ) # fig = ax . figure return fig , ax
def invoke ( self , ctx ) : """Given a context , this invokes the attached callback ( if it exists ) in the right way ."""
if self . callback is not None : loop = asyncio . get_event_loop ( ) return loop . run_until_complete ( self . async_invoke ( ctx ) )
def create_platform ( platform ) : '''. . versionadded : : 2019.2.0 Create a new device platform platform String of device platform , e . g . , ` ` junos ` ` CLI Example : . . code - block : : bash salt myminion netbox . create _ platform junos'''
nb_platform = get_ ( 'dcim' , 'platforms' , slug = slugify ( platform ) ) if nb_platform : return False else : payload = { 'name' : platform , 'slug' : slugify ( platform ) } plat = _add ( 'dcim' , 'platforms' , payload ) if plat : return { 'dcim' : { 'platforms' : payload } } else : return False
def parse_cmd ( self , tree , inp_cmd = None ) : """Extract command and options from string . The tree argument should contain a specifically formatted dict which describes the available commands , options , arguments and callbacks to methods for completion of arguments . TODO : document dict format The inp _ cmd argument should contain a list of strings containing the complete command to parse , such as sys . argv ( without the first element which specified the command itself ) ."""
# reset state from previous execution self . exe = None self . arg = None self . exe_options = { } self . children = tree [ 'children' ] self . key = tree [ 'children' ] option_parsing = False self . _scoop_rest_arguments = False if inp_cmd is not None : self . inp_cmd = inp_cmd # iterate the list of inputted commands i = 0 while i < len ( self . inp_cmd ) : p = self . inp_cmd [ i ] self . key = { } # Find which of the valid commands matches the current element of inp _ cmd if self . children is not None : self . key_complete = False match = False for param , content in self . children . items ( ) : # match string to command if param . find ( p ) == 0 : self . key [ param ] = content match = True # If we have an exact match , make sure that # is the only element in self . key if p == param and len ( self . inp_cmd ) > i + 1 : self . key_complete = True self . key = { param : content } break # if we are in scoop - rest - mode , place elements not matching # anything in argument - array if not match : if self . _scoop_rest_arguments : self . arg . append ( p ) else : raise InvalidCommand ( "Invalid argument: " + p ) else : raise InvalidCommand ( 'ran out of parameters; command too long' ) # Note that there are two reasons self . key can contain entries : # 1 ) The current string ( p ) contained something and matched a param # 2 ) The current string ( p ) is empty and matches all children # If p is empty we don ' t really have a match but still need to # have data in self . key to show all possible completions at this # level . Therefore , we skip the command matching stuff when # len ( p ) = = 0 if len ( p ) != 0 and len ( self . key ) == 1 : key , val = list ( self . key . items ( ) ) [ 0 ] i , option_parsing = self . _examine_key ( key , val , p , i , option_parsing ) i += 1
def program_files ( self , executable ) : """Determine the file paths to be adopted"""
if self . _get_version ( ) == 6 : paths = self . REQUIRED_PATHS_6 elif self . _get_version ( ) > 6 : paths = self . REQUIRED_PATHS_7_1 return paths
def _set_role ( self , v , load = False ) : """Setter method for role , mapped from YANG variable / role ( container ) If this variable is read - only ( config : false ) in the source YANG file , then _ set _ role is considered as a private method . Backends looking to populate this variable should do so via calling thisObj . _ set _ role ( ) directly ."""
if hasattr ( v , "_utype" ) : v = v . _utype ( v ) try : t = YANGDynClass ( v , base = role . role , is_container = 'container' , presence = False , yang_name = "role" , rest_name = "role" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'Role configuration' , u'cli-incomplete-no' : None , u'sort-priority' : u'9' } } , namespace = 'urn:brocade.com:mgmt:brocade-aaa' , defining_module = 'brocade-aaa' , yang_type = 'container' , is_config = True ) except ( TypeError , ValueError ) : raise ValueError ( { 'error-string' : """role must be of a type compatible with container""" , 'defined-type' : "container" , 'generated-type' : """YANGDynClass(base=role.role, is_container='container', presence=False, yang_name="role", rest_name="role", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Role configuration', u'cli-incomplete-no': None, u'sort-priority': u'9'}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='container', is_config=True)""" , } ) self . __role = t if hasattr ( self , '_set' ) : self . _set ( )
def email ( value , allow_empty = False , ** kwargs ) : """Validate that ` ` value ` ` is a valid email address . . . note : : Email address validation is . . . complicated . The methodology that we have adopted here is * generally * compliant with ` RFC 5322 < https : / / tools . ietf . org / html / rfc5322 > ` _ and uses a combination of string parsing and regular expressions . String parsing in particular is used to validate certain * highly unusual * but still valid email patterns , including the use of escaped text and comments within an email address ' local address ( the user name part ) . This approach ensures more complete coverage for unusual edge cases , while still letting us use regular expressions that perform quickly . : param value : The value to validate . : type value : : class : ` str < python : str > ` / : obj : ` None < python : None > ` : param allow _ empty : If ` ` True ` ` , returns : obj : ` None < python : None > ` if ` ` value ` ` is empty . If ` ` False ` ` , raises a : class : ` EmptyValueError < validator _ collection . errors . EmptyValueError > ` if ` ` value ` ` is empty . Defaults to ` ` False ` ` . : type allow _ empty : : class : ` bool < python : bool > ` : returns : ` ` value ` ` / : obj : ` None < python : None > ` : rtype : : class : ` str < python : str > ` / : obj : ` None < python : None > ` : raises EmptyValueError : if ` ` value ` ` is empty and ` ` allow _ empty ` ` is ` ` False ` ` : raises CannotCoerceError : if ` ` value ` ` is not a : class : ` str < python : str > ` or : obj : ` None < python : None > ` : raises InvalidEmailError : if ` ` value ` ` is not a valid email address or empty with ` ` allow _ empty ` ` set to ` ` True ` `"""
# pylint : disable = too - many - branches , too - many - statements , R0914 if not value and not allow_empty : raise errors . EmptyValueError ( 'value (%s) was empty' % value ) elif not value : return None if not isinstance ( value , basestring ) : raise errors . CannotCoerceError ( 'value must be a valid string, ' 'was %s' % type ( value ) ) if '@' not in value : raise errors . InvalidEmailError ( 'value (%s) is not a valid email address' % value ) if '(' in value and ')' in value : open_parentheses = value . find ( '(' ) close_parentheses = value . find ( ')' ) + 1 if close_parentheses < open_parentheses : raise errors . InvalidEmailError ( 'value (%s) is not a valid email ' 'address' % value ) commented_value = value [ open_parentheses : close_parentheses ] value = value . replace ( commented_value , '' ) elif '(' in value : raise errors . InvalidEmailError ( 'value (%s) is not a valid email address' % value ) elif ')' in value : raise errors . InvalidEmailError ( 'value (%s) is not a valid email address' % value ) if '<' in value or '>' in value : lt_position = value . find ( '<' ) gt_position = value . find ( '>' ) first_quote_position = - 1 second_quote_position = - 1 if lt_position >= 0 : first_quote_position = value . find ( '"' , 0 , lt_position ) if gt_position >= 0 : second_quote_position = value . find ( '"' , gt_position ) if first_quote_position < 0 or second_quote_position < 0 : raise errors . InvalidEmailError ( 'value (%s) is not a valid email ' 'address' % value ) at_count = value . count ( '@' ) if at_count > 1 : last_at_position = 0 last_quote_position = 0 for x in range ( 0 , at_count ) : # pylint : disable = W0612 at_position = value . find ( '@' , last_at_position + 1 ) if at_position >= 0 : first_quote_position = value . find ( '"' , last_quote_position , at_position ) second_quote_position = value . find ( '"' , first_quote_position ) if first_quote_position < 0 or second_quote_position < 0 : raise errors . InvalidEmailError ( 'value (%s) is not a valid email address' % value ) last_at_position = at_position last_quote_position = second_quote_position split_values = value . split ( '@' ) if len ( split_values ) < 2 : raise errors . InvalidEmailError ( 'value (%s) is not a valid email address' % value ) local_value = '' . join ( split_values [ : - 1 ] ) domain_value = split_values [ - 1 ] is_domain = False is_ip = False try : if domain_value . startswith ( '[' ) and domain_value . endswith ( ']' ) : domain_value = domain_value [ 1 : - 1 ] domain ( domain_value ) is_domain = True except ValueError : is_domain = False if not is_domain : try : ip_address ( domain_value , force_run = True ) # pylint : disable = E1123 is_ip = True except ValueError : is_ip = False if not is_domain and is_ip : try : email ( local_value + '@test.com' , force_run = True ) # pylint : disable = E1123 except ValueError : raise errors . InvalidEmailError ( 'value (%s) is not a valid email ' 'address' % value ) return value if not is_domain : raise errors . InvalidEmailError ( 'value (%s) is not a valid email address' % value ) else : is_valid = EMAIL_REGEX . search ( value ) if not is_valid : raise errors . InvalidEmailError ( 'value (%s) is not a valid email ' 'address' % value ) matched_string = is_valid . group ( 0 ) position = value . find ( matched_string ) if position > 0 : prefix = value [ : position ] if prefix [ 0 ] in string_ . punctuation : raise errors . InvalidEmailError ( 'value (%s) is not a valid email ' 'address' % value ) if '..' in prefix : raise errors . InvalidEmailError ( 'value (%s) is not a valid email ' 'address' % value ) end_of_match = position + len ( matched_string ) suffix = value [ end_of_match : ] if suffix : raise errors . InvalidEmailError ( 'value (%s) is not a valid email ' 'address' % value ) return value
def validate_query ( query ) : """Simple helper function to indicate whether a search query is a valid FTS5 query . Note : this simply looks at the characters being used , and is not guaranteed to catch all problematic queries ."""
tokens = _quote_re . findall ( query ) for token in tokens : if token . startswith ( '"' ) and token . endswith ( '"' ) : continue if set ( token ) & _invalid_ascii : return False return True
def Import ( context , request ) : """Beckman Coulter Access 2 analysis results"""
infile = request . form [ 'rochecobas_taqman_model48_file' ] fileformat = request . form [ 'rochecobas_taqman_model48_format' ] artoapply = request . form [ 'rochecobas_taqman_model48_artoapply' ] override = request . form [ 'rochecobas_taqman_model48_override' ] instrument = request . form . get ( 'instrument' , None ) errors = [ ] logs = [ ] warns = [ ] # Load the most suitable parser according to file extension / options / etc . . . parser = None if not hasattr ( infile , 'filename' ) : errors . append ( _ ( "No file selected" ) ) if fileformat == 'rsf' : parser = RocheCobasTaqmanParser ( infile ) if fileformat == 'csv' : parser = RocheCobasTaqmanParser ( infile , "csv" ) else : errors . append ( t ( _ ( "Unrecognized file format ${fileformat}" , mapping = { "fileformat" : fileformat } ) ) ) if parser : # Load the importer status = [ 'sample_received' , 'attachment_due' , 'to_be_verified' ] if artoapply == 'received' : status = [ 'sample_received' ] elif artoapply == 'received_tobeverified' : status = [ 'sample_received' , 'attachment_due' , 'to_be_verified' ] over = [ False , False ] if override == 'nooverride' : over = [ False , False ] elif override == 'override' : over = [ True , False ] elif override == 'overrideempty' : over = [ True , True ] importer = RocheCobasTaqman48Importer ( parser = parser , context = context , allowed_ar_states = status , allowed_analysis_states = None , override = over , instrument_uid = instrument ) tbex = '' try : importer . process ( ) except : tbex = traceback . format_exc ( ) errors = importer . errors logs = importer . logs warns = importer . warns if tbex : errors . append ( tbex ) results = { 'errors' : errors , 'log' : logs , 'warns' : warns } return json . dumps ( results )
def path_tails ( self , rr_id : str ) -> str : """Return path to tails file for input revocation registry identifier . : param rr _ id : revocation registry identifier of interest : return : path to tails file for input revocation registry identifier"""
return Tails . linked ( self . _dir_tails , rr_id )
def Docker ( ) : """Get Docker setup information"""
docker_info = { 'server' : { } , 'env' : '' , 'type' : '' , 'os' : '' } # get docker server version try : d_client = docker . from_env ( ) docker_info [ 'server' ] = d_client . version ( ) except Exception as e : # pragma : no cover logger . error ( "Can't get docker info " + str ( e ) ) # get operating system system = System ( ) docker_info [ 'os' ] = system # check if native or using docker - machine if 'DOCKER_MACHINE_NAME' in environ : # using docker - machine docker_info [ 'env' ] = environ [ 'DOCKER_MACHINE_NAME' ] docker_info [ 'type' ] = 'docker-machine' elif 'DOCKER_HOST' in environ : # not native docker_info [ 'env' ] = environ [ 'DOCKER_HOST' ] docker_info [ 'type' ] = 'remote' else : # using " local " server docker_info [ 'type' ] = 'native' return docker_info
async def call_cmd_async ( cmdlist , stdin = None , env = None ) : """Given a command , call that command asynchronously and return the output . This function only handles ` OSError ` when creating the subprocess , any other exceptions raised either durring subprocess creation or while exchanging data with the subprocess are the caller ' s responsibility to handle . If such an ` OSError ` is caught , then returncode will be set to 1 , and the error value will be set to the str ( ) method fo the exception . : type cmdlist : list of str : param stdin : string to pipe to the process : type stdin : str : return : Tuple of stdout , stderr , returncode : rtype : tuple [ str , str , int ]"""
termenc = urwid . util . detected_encoding cmdlist = [ s . encode ( termenc ) for s in cmdlist ] environment = os . environ . copy ( ) if env is not None : environment . update ( env ) logging . debug ( 'ENV = %s' , environment ) logging . debug ( 'CMD = %s' , cmdlist ) try : proc = await asyncio . create_subprocess_exec ( * cmdlist , env = environment , stdout = asyncio . subprocess . PIPE , stderr = asyncio . subprocess . PIPE , stdin = asyncio . subprocess . PIPE if stdin else None ) except OSError as e : return ( '' , str ( e ) , 1 ) out , err = await proc . communicate ( stdin . encode ( termenc ) if stdin else None ) return ( out . decode ( termenc ) , err . decode ( termenc ) , proc . returncode )
def get_vexrc ( options , environ ) : """Get a representation of the contents of the config file . : returns : a Vexrc instance ."""
# Complain if user specified nonexistent file with - - config . # But we don ' t want to complain just because ~ / . vexrc doesn ' t exist . if options . config and not os . path . exists ( options . config ) : raise exceptions . InvalidVexrc ( "nonexistent config: {0!r}" . format ( options . config ) ) filename = options . config or os . path . expanduser ( '~/.vexrc' ) vexrc = config . Vexrc . from_file ( filename , environ ) return vexrc
def validate_properties ( props , required ) : """Ensures the key set contains the base supported properties for a Parser : param props : a set of property names to validate against those supported"""
props = set ( props ) required = set ( required or _supported_props ) if len ( required . intersection ( props ) ) < len ( required ) : missing = required - props raise ValidationError ( 'Missing property names: {props}' , props = ',' . join ( missing ) , missing = missing )
def add_term ( self , t ) : """Add a term to this section and set it ' s ownership . Should only be used on root level terms"""
if t not in self . terms : if t . parent_term_lc == 'root' : self . terms . append ( t ) self . doc . add_term ( t , add_section = False ) t . set_ownership ( ) else : raise GenerateError ( "Can only add or move root-level terms. Term '{}' parent is '{}' " . format ( t , t . parent_term_lc ) ) assert t . section or t . join_lc == 'root.root' , t
def verify_draft_url ( url ) : """Return ` ` True ` ` if the given URL has a valid draft mode HMAC in its querystring ."""
url = urlparse . urlparse ( url ) # QueryDict requires a bytestring as its first argument query = QueryDict ( force_bytes ( url . query ) ) # TODO Support legacy ' edit ' param name for now preview_hmac = query . get ( 'preview' ) or query . get ( 'edit' ) if preview_hmac : salt , hmac = preview_hmac . split ( ':' ) return hmac == get_draft_hmac ( salt , url . path ) return False
def _getusers ( self , ids = None , names = None , match = None ) : """Return a list of users that match criteria . : kwarg ids : list of user ids to return data on : kwarg names : list of user names to return data on : kwarg match : list of patterns . Returns users whose real name or login name match the pattern . : raises XMLRPC Fault : Code 51 : if a Bad Login Name was sent to the names array . Code 304 : if the user was not authorized to see user they requested . Code 505 : user is logged out and can ' t use the match or ids parameter . Available in Bugzilla - 3.4 +"""
params = { } if ids : params [ 'ids' ] = self . _listify ( ids ) if names : params [ 'names' ] = self . _listify ( names ) if match : params [ 'match' ] = self . _listify ( match ) if not params : raise BugzillaError ( '_get() needs one of ids, ' ' names, or match kwarg.' ) return self . _proxy . User . get ( params )
def attention_bias_local ( length , max_backward , max_forward ) : """Create an bias tensor to be added to attention logits . A position may attend to positions at most max _ distance from it , forward and backwards . This does not actually save any computation . Args : length : int max _ backward : int , maximum distance backward to attend . Negative values indicate unlimited . max _ forward : int , maximum distance forward to attend . Negative values indicate unlimited . Returns : a ` Tensor ` with shape [ 1 , 1 , length , length ] ."""
band = common_layers . ones_matrix_band_part ( length , length , max_backward , max_forward , out_shape = [ 1 , 1 , length , length ] ) return - 1e9 * ( 1.0 - band )
def get_proficiency_search_session ( self , proxy ) : """Gets the ` ` OsidSession ` ` associated with the proficiency search service . : param proxy : a proxy : type proxy : ` ` osid . proxy . Proxy ` ` : return : a ` ` ProficiencySearchSession ` ` : rtype : ` ` osid . learning . ProficiencySearchSession ` ` : raise : ` ` NullArgument ` ` - - ` ` proxy ` ` is ` ` null ` ` : raise : ` ` OperationFailed ` ` - - unable to complete request : raise : ` ` Unimplemented ` ` - - ` ` supports _ proficiency _ search ( ) ` ` is ` ` false ` ` * compliance : optional - - This method must be implemented if ` ` supports _ proficiency _ search ( ) ` ` is ` ` true ` ` . *"""
if not self . supports_proficiency_search ( ) : raise Unimplemented ( ) try : from . import sessions except ImportError : raise OperationFailed ( ) proxy = self . _convert_proxy ( proxy ) try : session = sessions . ProficiencySearchSession ( proxy = proxy , runtime = self . _runtime ) except AttributeError : raise OperationFailed ( ) return session
def format ( self , formatter , link_resolver , output ) : """Banana banana"""
if not self . title and self . name : title = os . path . splitext ( self . name ) [ 0 ] self . title = os . path . basename ( title ) . replace ( '-' , ' ' ) self . formatted_contents = u'' self . build_path = os . path . join ( formatter . get_output_folder ( self ) , self . link . ref ) if self . ast : out , diags = cmark . ast_to_html ( self . ast , link_resolver ) for diag in diags : warn ( diag . code , message = diag . message , filename = self . source_file or self . name ) self . formatted_contents += out if not self . formatted_contents : self . __format_page_comment ( formatter , link_resolver ) self . output_attrs = defaultdict ( lambda : defaultdict ( dict ) ) formatter . prepare_page_attributes ( self ) self . __format_symbols ( formatter , link_resolver ) self . detailed_description = formatter . format_page ( self ) [ 0 ] if output : formatter . cache_page ( self )
def _save_json_file ( self , file , val , pretty = False , compact = True , sort = True , encoder = None ) : """Save data to json file : param file : Writable file or path to file : type file : FileIO | str | unicode : param val : Value or struct to save : type val : None | int | float | str | list | dict : param pretty : Format data to be readable ( default : False ) : type pretty : bool : param compact : Format data to be compact ( default : True ) : type compact : bool : param sort : Sort keys ( default : True ) : type sort : bool : param encoder : Use custom json encoder : type encoder : T < = flotils . loadable . DateTimeEncoder : rtype : None : raises IOError : Failed to save"""
try : save_json_file ( file , val , pretty , compact , sort , encoder ) except : self . exception ( "Failed to save to {}" . format ( file ) ) raise IOError ( "Saving file failed" )
def hostname ( self , value ) : """The hostname where the log message was created . Should be the first part of the hostname , or an IP address . Should NOT be set to a fully qualified domain name ."""
if value is None : value = socket . gethostname ( ) self . _hostname = value
def items ( stream , ** kwargs ) : """External facing items . Will return item from stream as available . Currently waits in loop waiting for next item . Can pass keywords that json . loads accepts ( such as object _ pairs _ hook )"""
for s in yield_json ( stream ) : yield json . loads ( s , ** kwargs )
def summarize ( self , geom , stat = None ) : """Returns a new RasterQuerySet with subsetted / summarized ndarrays . Arguments : geom - - geometry for masking or spatial subsetting Keyword args : stat - - any numpy summary stat method as str ( min / max / mean / etc )"""
if not hasattr ( geom , 'num_coords' ) : raise TypeError ( 'Need OGR or GEOS geometry, %s found' % type ( geom ) ) clone = self . _clone ( ) for obj in clone : arr = obj . array ( geom ) if arr is not None : if stat : arr = agg_dims ( arr , stat ) try : arr = arr . squeeze ( ) except ValueError : pass obj . image = arr return clone
def select_config_sections ( configfile_sections , desired_section_patterns ) : """Select a subset of the sections in a configuration file by using a list of section names of list of section name patters ( supporting : mod : ` fnmatch ` wildcards ) . : param configfile _ sections : List of config section names ( as strings ) . : param desired _ section _ patterns : : return : List of selected section names or empty list ( as generator ) ."""
for section_name in configfile_sections : for desired_section_pattern in desired_section_patterns : if fnmatch ( section_name , desired_section_pattern ) : yield section_name
def import_end_event_to_graph ( diagram_graph , process_id , process_attributes , element ) : """Adds to graph the new element that represents BPMN end event . End event inherits sequence of eventDefinitionRef from Event type . Separate methods for each event type are required since each of them has different variants ( Message , Error , Signal etc . ) . : param diagram _ graph : NetworkX graph representing a BPMN process diagram , : param process _ id : string object , representing an ID of process element , : param process _ attributes : dictionary that holds attribute values of ' process ' element , which is parent of imported flow node , : param element : object representing a BPMN XML ' endEvent ' element ."""
end_event_definitions = { 'messageEventDefinition' , 'signalEventDefinition' , 'escalationEventDefinition' , 'errorEventDefinition' , 'compensateEventDefinition' , 'terminateEventDefinition' } BpmnDiagramGraphImport . import_flow_node_to_graph ( diagram_graph , process_id , process_attributes , element ) BpmnDiagramGraphImport . import_event_definition_elements ( diagram_graph , element , end_event_definitions )
def peek ( self , deserialized_tx ) : """Peeks into first tx and sets self attrs or raise ."""
self . batch_id = deserialized_tx . object . batch_id self . prev_batch_id = deserialized_tx . object . prev_batch_id self . producer = deserialized_tx . object . producer if self . batch_history . exists ( batch_id = self . batch_id ) : raise BatchAlreadyProcessed ( f"Batch {self.batch_id} has already been processed" ) if self . prev_batch_id != self . batch_id : if not self . batch_history . exists ( batch_id = self . prev_batch_id ) : raise InvalidBatchSequence ( f"Invalid import sequence. History does not exist for prev_batch_id. " f"Got file='{self.filename}', prev_batch_id=" f"{self.prev_batch_id}, batch_id={self.batch_id}." )
def write_xml_document ( self , document ) : """Writes a string representation of an ` ` ElementTree ` ` object to the output stream . : param document : An ` ` ElementTree ` ` object ."""
self . _out . write ( ET . tostring ( document ) ) self . _out . flush ( )
def available_method ( method_name ) : '''ruturn the method for earliest package in ` ` pkg _ preferences ` ` , if package is available ( based on : meth : ` pkg _ available ` )'''
pkg_prefs_copy = list ( pkg_prefs ) if method_name in method_prefs : pkg_prefs_copy = [ method_prefs [ method_name ] ] + pkg_prefs_copy for pkg in pkg_prefs_copy : if pkg in pkgs : if method_name in dir ( pkgs [ pkg ] ) : return getattr ( pkgs [ pkg ] , method_name ) nl . notify ( 'Error: Could not find implementation of %s on this computer' % ( method_name ) , level = nl . level . error )
def deleteOTPK ( self , otpk_pub ) : """Delete a one - time pre key , either publicly visible or hidden . : param otpk _ pub : The public key of the one - time pre key to delete , encoded as a bytes - like object ."""
self . __checkSPKTimestamp ( ) for otpk in self . __otpks : if otpk . pub == otpk_pub : self . __otpks . remove ( otpk ) for otpk in self . __hidden_otpks : if otpk . pub == otpk_pub : self . __hidden_otpks . remove ( otpk ) self . __refillOTPKs ( )
def create_submission ( student_item_dict , answer , submitted_at = None , attempt_number = None ) : """Creates a submission for assessment . Generic means by which to submit an answer for assessment . Args : student _ item _ dict ( dict ) : The student _ item this submission is associated with . This is used to determine which course , student , and location this submission belongs to . answer ( JSON - serializable ) : The answer given by the student to be assessed . submitted _ at ( datetime ) : The date in which this submission was submitted . If not specified , defaults to the current date . attempt _ number ( int ) : A student may be able to submit multiple attempts per question . This allows the designated attempt to be overridden . If the attempt is not specified , it will take the most recent submission , as specified by the submitted _ at time , and use its attempt _ number plus one . Returns : dict : A representation of the created Submission . The submission contains five attributes : student _ item , attempt _ number , submitted _ at , created _ at , and answer . ' student _ item ' is the ID of the related student item for the submission . ' attempt _ number ' is the attempt this submission represents for this question . ' submitted _ at ' represents the time this submission was submitted , which can be configured , versus the ' created _ at ' date , which is when the submission is first created . Raises : SubmissionRequestError : Raised when there are validation errors for the student item or submission . This can be caused by the student item missing required values , the submission being too long , the attempt _ number is negative , or the given submitted _ at time is invalid . SubmissionInternalError : Raised when submission access causes an internal error . Examples : > > > student _ item _ dict = dict ( > > > student _ id = " Tim " , > > > item _ id = " item _ 1 " , > > > course _ id = " course _ 1 " , > > > item _ type = " type _ one " > > > create _ submission ( student _ item _ dict , " The answer is 42 . " , datetime . utcnow , 1) ' student _ item ' : 2, ' attempt _ number ' : 1, ' submitted _ at ' : datetime . datetime ( 2014 , 1 , 29 , 17 , 14 , 52 , 649284 tzinfo = < UTC > ) , ' created _ at ' : datetime . datetime ( 2014 , 1 , 29 , 17 , 14 , 52 , 668850 , tzinfo = < UTC > ) , ' answer ' : u ' The answer is 42 . '"""
student_item_model = _get_or_create_student_item ( student_item_dict ) if attempt_number is None : try : submissions = Submission . objects . filter ( student_item = student_item_model ) [ : 1 ] except DatabaseError : error_message = u"An error occurred while filtering submissions for student item: {}" . format ( student_item_dict ) logger . exception ( error_message ) raise SubmissionInternalError ( error_message ) attempt_number = submissions [ 0 ] . attempt_number + 1 if submissions else 1 model_kwargs = { "student_item" : student_item_model . pk , "answer" : answer , "attempt_number" : attempt_number , } if submitted_at : model_kwargs [ "submitted_at" ] = submitted_at try : submission_serializer = SubmissionSerializer ( data = model_kwargs ) if not submission_serializer . is_valid ( ) : raise SubmissionRequestError ( field_errors = submission_serializer . errors ) submission_serializer . save ( ) sub_data = submission_serializer . data _log_submission ( sub_data , student_item_dict ) return sub_data except DatabaseError : error_message = u"An error occurred while creating submission {} for student item: {}" . format ( model_kwargs , student_item_dict ) logger . exception ( error_message ) raise SubmissionInternalError ( error_message )
def join ( self , iterable ) : """Return a string which is the concatenation of the strings in the iterable . : param iterable : Join items in this iterable ."""
return self . __class__ ( super ( ColorStr , self ) . join ( iterable ) , keep_tags = True )
def warning ( self ) : """Checks Stimulus for any warning conditions : returns : str - - warning message , if any , 0 otherwise"""
signals , docs , overs = self . expandedStim ( ) if np . any ( np . array ( overs ) > 0 ) : msg = 'Stimuli in this test are over the maximum allowable \ voltage output. They will be rescaled with a maximum \ undesired attenuation of {:.2f}dB.' . format ( np . amax ( overs ) ) return msg return 0
def run_from_argv ( self , prog , subcommand , global_options , argv ) : """Set up any environment changes requested , then run this command ."""
self . prog_name = prog parser = self . create_parser ( prog , subcommand ) options , args = parser . parse_args ( argv ) self . global_options = global_options self . options = options self . args = args self . execute ( args , options , global_options )
def Handle_Events ( self , events ) : """Handle events from poll ( ) : events : A list of tuples form zmq . poll ( ) : type events : list : returns : None"""
for e in events : sock = e [ 0 ] event_type = e [ 1 ] if event_type == zmq . POLLIN : msg = sock . recv ( ) reply = self . Handle_Receive ( msg ) sock . send ( reply ) elif event_type == zmq . POLLOUT : pass # FIXME - - handle this correctly elif event_type == zmq . POLLERR : pass # FIXME - - handle this correctly else : pass
def search_in_workspace ( self , workspace , params = { } , ** options ) : """The search endpoint allows you to build complex queries to find and fetch exactly the data you need from Asana . For a more comprehensive description of all the query parameters and limitations of this endpoint , see our [ long - form documentation ] ( / developers / documentation / getting - started / search - api ) for this feature . Parameters workspace : { Id } The workspace or organization in which to search for tasks . [ params ] : { Object } Parameters for the request"""
path = "/workspaces/%s/tasks/search" % ( workspace ) return self . client . get_collection ( path , params , ** options )
def on_channel_open ( self , channel ) : """Input channel creation callback Queue declaration done here Args : channel : input channel"""
self . in_channel . exchange_declare ( exchange = 'input_exc' , type = 'topic' , durable = True ) channel . queue_declare ( callback = self . on_input_queue_declare , queue = self . INPUT_QUEUE_NAME )
def get ( self , * args , ** kwargs ) : """Return the single item from the filtered queryset ."""
assert not args assert list ( kwargs . keys ( ) ) == [ 'pk' ] pk = kwargs [ 'pk' ] model_name = self . model . __name__ object_spec = ( model_name , pk , None ) instances = self . cache . get_instances ( ( object_spec , ) ) try : model_data = instances [ ( model_name , pk ) ] [ 0 ] except KeyError : raise self . model . DoesNotExist ( "No match for %r with args %r, kwargs %r" % ( self . model , args , kwargs ) ) else : return CachedModel ( self . model , model_data )
def run_kernel ( self , func , gpu_args , threads , grid ) : """runs the OpenCL kernel passed as ' func ' : param func : An OpenCL Kernel : type func : pyopencl . Kernel : param gpu _ args : A list of arguments to the kernel , order should match the order in the code . Allowed values are either variables in global memory or single values passed by value . : type gpu _ args : list ( pyopencl . Buffer , numpy . int32 , . . . ) : param threads : A tuple listing the number of work items in each dimension of the work group . : type threads : tuple ( int , int , int ) : param grid : A tuple listing the number of work groups in each dimension of the NDRange . : type grid : tuple ( int , int )"""
global_size = ( grid [ 0 ] * threads [ 0 ] , grid [ 1 ] * threads [ 1 ] , grid [ 2 ] * threads [ 2 ] ) local_size = threads event = func ( self . queue , global_size , local_size , * gpu_args ) event . wait ( )
def smoothed_hazard_confidence_intervals_ ( self , bandwidth , hazard_ = None ) : """Parameters bandwidth : float the bandwidth to use in the Epanechnikov kernel . > 0 hazard _ : numpy array a computed ( n , ) numpy array of estimated hazard rates . If none , uses ` ` smoothed _ hazard _ ` `"""
if hazard_ is None : hazard_ = self . smoothed_hazard_ ( bandwidth ) . values [ : , 0 ] timeline = self . timeline z = inv_normal_cdf ( 1 - self . alpha / 2 ) self . _cumulative_sq . iloc [ 0 ] = 0 var_hazard_ = self . _cumulative_sq . diff ( ) . fillna ( self . _cumulative_sq . iloc [ 0 ] ) C = var_hazard_ . values != 0.0 # only consider the points with jumps std_hazard_ = np . sqrt ( 1.0 / ( bandwidth ** 2 ) * np . dot ( epanechnikov_kernel ( timeline [ : , None ] , timeline [ C ] [ None , : ] , bandwidth ) ** 2 , var_hazard_ . values [ C ] ) ) values = { self . ci_labels [ 0 ] : hazard_ * np . exp ( z * std_hazard_ / hazard_ ) , self . ci_labels [ 1 ] : hazard_ * np . exp ( - z * std_hazard_ / hazard_ ) , } return pd . DataFrame ( values , index = timeline )
def create_repository ( self , repository , body , params = None ) : """Registers a shared file system repository . ` < http : / / www . elastic . co / guide / en / elasticsearch / reference / current / modules - snapshots . html > ` _ : arg repository : A repository name : arg body : The repository definition : arg master _ timeout : Explicit operation timeout for connection to master node : arg timeout : Explicit operation timeout : arg verify : Whether to verify the repository after creation"""
for param in ( repository , body ) : if param in SKIP_IN_PATH : raise ValueError ( "Empty value passed for a required argument." ) return self . transport . perform_request ( 'PUT' , _make_path ( '_snapshot' , repository ) , params = params , body = body )
def create ( ctx , archive_name , authority_name , versioned = True , tag = None , helper = False ) : '''Create an archive'''
tags = list ( tag ) _generate_api ( ctx ) args , kwargs = _parse_args_and_kwargs ( ctx . args ) assert len ( args ) == 0 , 'Unrecognized arguments: "{}"' . format ( args ) var = ctx . obj . api . create ( archive_name , authority_name = authority_name , versioned = versioned , metadata = kwargs , tags = tags , helper = helper ) verstring = 'versioned archive' if versioned else 'archive' click . echo ( 'created {} {}' . format ( verstring , var ) )
def consume ( self , source ) : """Parse source and consume tokens from tinycss2. Arguments : source ( string ) : Source content to parse . Returns : dict : Retrieved rules ."""
manifest = OrderedDict ( ) rules = parse_stylesheet ( source , skip_comments = True , skip_whitespace = True , ) for rule in rules : # Gather rule selector + properties name = self . digest_prelude ( rule ) # Ignore everything out of styleguide namespace if not name . startswith ( RULE_BASE_PREFIX ) : continue properties = self . digest_content ( rule ) manifest [ name ] = properties return manifest
def purge ( self , ignore_ignores ) : """Delete everything that shown up on status ."""
command = [ 'status' , '--xml' ] if ignore_ignores : command . append ( '--no-ignore' ) d = self . _dovccmd ( command , collectStdout = True ) @ d . addCallback def parseAndRemove ( stdout ) : files = [ ] for filename in self . getUnversionedFiles ( stdout , self . keep_on_purge ) : filename = self . build . path_module . join ( self . workdir , filename ) files . append ( filename ) if not files : d = defer . succeed ( 0 ) else : if self . workerVersionIsOlderThan ( 'rmdir' , '2.14' ) : d = self . removeFiles ( files ) else : d = self . runRmdir ( files , abandonOnFailure = False , timeout = self . timeout ) return d @ d . addCallback def evaluateCommand ( rc ) : if rc != 0 : log . msg ( "Failed removing files" ) raise buildstep . BuildStepFailed ( ) return rc return d
def _get_translation ( self , ims_width , ims_height ) : """Returns x and y for a bitmap translation"""
# Get cell attributes cell_attributes = self . code_array . cell_attributes [ self . key ] justification = cell_attributes [ "justification" ] vertical_align = cell_attributes [ "vertical_align" ] angle = cell_attributes [ "angle" ] scale_x , scale_y = self . _get_scalexy ( ims_width , ims_height ) scale = min ( scale_x , scale_y ) if angle not in ( 90 , 180 , - 90 ) : # Standard direction x = - 2 # Otherwise there is a white border y = - 2 # Otherwise there is a white border if scale_x > scale_y : if justification == "center" : x += ( self . rect [ 2 ] - ims_width * scale ) / 2 elif justification == "right" : x += self . rect [ 2 ] - ims_width * scale else : if vertical_align == "middle" : y += ( self . rect [ 3 ] - ims_height * scale ) / 2 elif vertical_align == "bottom" : y += self . rect [ 3 ] - ims_height * scale if angle == 90 : x = - ims_width * scale + 2 y = - 2 if scale_y > scale_x : if justification == "center" : y += ( self . rect [ 2 ] - ims_height * scale ) / 2 elif justification == "right" : y += self . rect [ 2 ] - ims_height * scale else : if vertical_align == "middle" : x -= ( self . rect [ 3 ] - ims_width * scale ) / 2 elif vertical_align == "bottom" : x -= self . rect [ 3 ] - ims_width * scale elif angle == 180 : x = - ims_width * scale + 2 y = - ims_height * scale + 2 if scale_x > scale_y : if justification == "center" : x -= ( self . rect [ 2 ] - ims_width * scale ) / 2 elif justification == "right" : x -= self . rect [ 2 ] - ims_width * scale else : if vertical_align == "middle" : y -= ( self . rect [ 3 ] - ims_height * scale ) / 2 elif vertical_align == "bottom" : y -= self . rect [ 3 ] - ims_height * scale elif angle == - 90 : x = - 2 y = - ims_height * scale + 2 if scale_y > scale_x : if justification == "center" : y -= ( self . rect [ 2 ] - ims_height * scale ) / 2 elif justification == "right" : y -= self . rect [ 2 ] - ims_height * scale else : if vertical_align == "middle" : x += ( self . rect [ 3 ] - ims_width * scale ) / 2 elif vertical_align == "bottom" : x += self . rect [ 3 ] - ims_width * scale return x , y
def load_dom ( self , domtree , initialize = True ) : """Load manifest from DOM tree . If initialize is True ( default ) , reset existing attributes first ."""
if domtree . nodeType == Node . DOCUMENT_NODE : rootElement = domtree . documentElement elif domtree . nodeType == Node . ELEMENT_NODE : rootElement = domtree else : raise InvalidManifestError ( "Invalid root element node type " + str ( rootElement . nodeType ) + " - has to be one of (DOCUMENT_NODE, " "ELEMENT_NODE)" ) allowed_names = ( "assembly" , "assemblyBinding" , "configuration" , "dependentAssembly" ) if rootElement . tagName not in allowed_names : raise InvalidManifestError ( "Invalid root element <%s> - has to be one of <%s>" % ( rootElement . tagName , ">, <" . join ( allowed_names ) ) ) # logger . info ( " loading manifest metadata from element < % s > " , rootElement . tagName ) if rootElement . tagName == "configuration" : for windows in rootElement . getCEByTN ( "windows" ) : for assemblyBinding in windows . getCEByTN ( "assemblyBinding" ) : self . load_dom ( assemblyBinding , initialize ) else : if initialize : self . __init__ ( ) self . manifestType = rootElement . tagName self . manifestVersion = [ int ( i ) for i in ( rootElement . getA ( "manifestVersion" ) or "1.0" ) . split ( "." ) ] self . noInheritable = bool ( rootElement . getFCEByTN ( "noInheritable" ) ) self . noInherit = bool ( rootElement . getFCEByTN ( "noInherit" ) ) for assemblyIdentity in rootElement . getCEByTN ( "assemblyIdentity" ) : self . type = assemblyIdentity . getA ( "type" ) or None self . name = assemblyIdentity . getA ( "name" ) or None self . language = assemblyIdentity . getA ( "language" ) or None self . processorArchitecture = assemblyIdentity . getA ( "processorArchitecture" ) or None version = assemblyIdentity . getA ( "version" ) if version : self . version = [ int ( i ) for i in version . split ( "." ) ] self . publicKeyToken = assemblyIdentity . getA ( "publicKeyToken" ) or None for publisherPolicy in rootElement . getCEByTN ( "publisherPolicy" ) : self . applyPublisherPolicy = ( publisherPolicy . getA ( "apply" ) or "" ) . lower ( ) == "yes" for description in rootElement . getCEByTN ( "description" ) : if description . firstChild : self . description = description . firstChild . wholeText for trustInfo in rootElement . getCEByTN ( "trustInfo" ) : for security in trustInfo . getCEByTN ( "security" ) : for reqPriv in security . getCEByTN ( "requestedPrivileges" ) : for reqExeLev in reqPriv . getCEByTN ( "requestedExecutionLevel" ) : self . requestedExecutionLevel = reqExeLev . getA ( "level" ) self . uiAccess = ( reqExeLev . getA ( "uiAccess" ) or "" ) . lower ( ) == "true" if rootElement . tagName == "assemblyBinding" : dependencies = [ rootElement ] else : dependencies = rootElement . getCEByTN ( "dependency" ) for dependency in dependencies : for dependentAssembly in dependency . getCEByTN ( "dependentAssembly" ) : manifest = ManifestFromDOM ( dependentAssembly ) if not manifest . name : # invalid , skip continue manifest . optional = ( dependency . getA ( "optional" ) or "" ) . lower ( ) == "yes" self . dependentAssemblies . append ( manifest ) if self . filename : # Enable search for private assembly by assigning bogus # filename ( only the directory has to be correct ) self . dependentAssemblies [ - 1 ] . filename = ":" . join ( ( self . filename , manifest . name ) ) for bindingRedirect in rootElement . getCEByTN ( "bindingRedirect" ) : oldVersion = [ [ int ( i ) for i in part . split ( "." ) ] for part in bindingRedirect . getA ( "oldVersion" ) . split ( "-" ) ] newVersion = [ int ( i ) for i in bindingRedirect . getA ( "newVersion" ) . split ( "." ) ] self . bindingRedirects . append ( ( oldVersion , newVersion ) ) for file_ in rootElement . getCEByTN ( "file" ) : self . add_file ( name = file_ . getA ( "name" ) , hashalg = file_ . getA ( "hashalg" ) , hash = file_ . getA ( "hash" ) )
def _find_types ( pkgs ) : '''Form a package names list , find prefixes of packages types .'''
return sorted ( { pkg . split ( ':' , 1 ) [ 0 ] for pkg in pkgs if len ( pkg . split ( ':' , 1 ) ) == 2 } )
def maps_get_default_rules_output_rules_rbridgeid ( self , ** kwargs ) : """Auto Generated Code"""
config = ET . Element ( "config" ) maps_get_default_rules = ET . Element ( "maps_get_default_rules" ) config = maps_get_default_rules output = ET . SubElement ( maps_get_default_rules , "output" ) rules = ET . SubElement ( output , "rules" ) rbridgeid = ET . SubElement ( rules , "rbridgeid" ) rbridgeid . text = kwargs . pop ( 'rbridgeid' ) callback = kwargs . pop ( 'callback' , self . _callback ) return callback ( config )
def moderators ( self , limit = None ) : """GETs moderators for this subreddit . Calls : meth : ` narwal . Reddit . moderators ` . : param limit : max number of items to return"""
return self . _reddit . moderators ( self . display_name , limit = limit )
def median_grouped ( name , num , minimum = 0 , maximum = 0 , ref = None ) : '''Calculates the grouped mean of the ` ` num ` ` most recent values . Requires a list . USAGE : . . code - block : : yaml foo : calc . median _ grouped : - name : myregentry - num : 5'''
return calc ( name = name , num = num , oper = 'median_grouped' , minimum = minimum , maximum = maximum , ref = ref )
def from_conll ( this_class , stream ) : """Construct a Corpus . stream is an iterable over strings where each string is a line in CoNLL - X format ."""
stream = iter ( stream ) corpus = this_class ( ) while 1 : # read until we get an empty sentence sentence = Sentence . from_conll ( stream ) if sentence : corpus . append ( sentence ) else : break return corpus
def parseParams ( string ) : """Parse parameters"""
all = params_re . findall ( string ) allParameters = [ ] for tup in all : paramList = [ tup [ 0 ] ] # tup looks like ( name , valuesString ) for pair in param_values_re . findall ( tup [ 1 ] ) : # pair looks like ( ' ' , value ) or ( value , ' ' ) if pair [ 0 ] != '' : paramList . append ( pair [ 0 ] ) else : paramList . append ( pair [ 1 ] ) allParameters . append ( paramList ) return allParameters
def querysets_from_title_prefix ( title_prefix = None , model = DEFAULT_MODEL , app = DEFAULT_APP ) : """Return a list of Querysets from a list of model numbers"""
if title_prefix is None : title_prefix = [ None ] filter_dicts = [ ] model_list = [ ] if isinstance ( title_prefix , basestring ) : title_prefix = title_prefix . split ( ',' ) elif not isinstance ( title_prefix , dict ) : title_prefix = title_prefix if isinstance ( title_prefix , ( list , tuple ) ) : for i , title_prefix in enumerate ( title_prefix ) : if isinstance ( title_prefix , basestring ) : if title_prefix . lower ( ) . endswith ( 'sales' ) : title_prefix = title_prefix [ : - 5 ] . strip ( '_' ) title_prefix += [ title_prefix ] model_list += [ 'WikiItem' ] else : model_list += [ DEFAULT_MODEL ] filter_dicts += [ { 'model__startswith' : title_prefix } ] elif isinstance ( title_prefix , dict ) : filter_dicts = [ title_prefix ] elif isinstance ( title_prefix , ( list , tuple ) ) : filter_dicts = util . listify ( title_prefix ) model = get_model ( model , app ) querysets = [ ] for filter_dict , model in zip ( filter_dicts , model_list ) : filter_dict = filter_dict or { } querysets += [ model . objects . filter ( ** filter_dict ) ]
def explain_weights_df ( estimator , ** kwargs ) : # type : ( . . . ) - > pd . DataFrame """Explain weights and export them to ` ` pandas . DataFrame ` ` . All keyword arguments are passed to : func : ` eli5 . explain _ weights ` . Weights of all features are exported by default ."""
kwargs = _set_defaults ( kwargs ) return format_as_dataframe ( eli5 . explain_weights ( estimator , ** kwargs ) )
def from_callback ( cls , cb , nx = None , nparams = None , ** kwargs ) : """Generate a SymbolicSys instance from a callback . Parameters cb : callable Should have the signature ` ` cb ( x , p , backend ) - > list of exprs ` ` . nx : int Number of unknowns , when not given it is deduced from ` ` kwargs [ ' names ' ] ` ` . nparams : int Number of parameters , when not given it is deduced from ` ` kwargs [ ' param _ names ' ] ` ` . \\ * \\ * kwargs : Keyword arguments passed on to : class : ` SymbolicSys ` . See also : class : ` pyneqsys . NeqSys ` . Examples > > > symbolicsys = SymbolicSys . from _ callback ( lambda x , p , be : [ . . . x [ 0 ] * x [ 1 ] - p [ 0 ] , . . . be . exp ( - x [ 0 ] ) + be . exp ( - x [ 1 ] ) - p [ 0 ] * * - 2 . . . ] , 2 , 1)"""
if kwargs . get ( 'x_by_name' , False ) : if 'names' not in kwargs : raise ValueError ( "Need ``names`` in kwargs." ) if nx is None : nx = len ( kwargs [ 'names' ] ) elif nx != len ( kwargs [ 'names' ] ) : raise ValueError ( "Inconsistency between nx and length of ``names``." ) if kwargs . get ( 'par_by_name' , False ) : if 'param_names' not in kwargs : raise ValueError ( "Need ``param_names`` in kwargs." ) if nparams is None : nparams = len ( kwargs [ 'param_names' ] ) elif nparams != len ( kwargs [ 'param_names' ] ) : raise ValueError ( "Inconsistency between ``nparam`` and length of ``param_names``." ) if nparams is None : nparams = 0 if nx is None : raise ValueError ( "Need ``nx`` of ``names`` together with ``x_by_name==True``." ) be = Backend ( kwargs . pop ( 'backend' , None ) ) x = be . real_symarray ( 'x' , nx ) p = be . real_symarray ( 'p' , nparams ) _x = dict ( zip ( kwargs [ 'names' ] , x ) ) if kwargs . get ( 'x_by_name' , False ) else x _p = dict ( zip ( kwargs [ 'param_names' ] , p ) ) if kwargs . get ( 'par_by_name' , False ) else p try : exprs = cb ( _x , _p , be ) except TypeError : exprs = _ensure_3args ( cb ) ( _x , _p , be ) return cls ( x , exprs , p , backend = be , ** kwargs )
def get_equipment ( self , ** kwargs ) : """Return list environments related with environment vip"""
uri = 'api/v3/equipment/' uri = self . prepare_url ( uri , kwargs ) return super ( ApiEquipment , self ) . get ( uri )
def calculate_size ( name , function ) : """Calculates the request payload size"""
data_size = 0 data_size += calculate_size_str ( name ) data_size += calculate_size_data ( function ) return data_size
def kraus_iscomplete ( kraus : Kraus ) -> bool : """Returns True if the collection of ( weighted ) Kraus operators are complete . ( Which is necessary for a CPTP map to preserve trace )"""
qubits = kraus . qubits N = kraus . qubit_nb ident = Gate ( np . eye ( 2 ** N ) , qubits ) # FIXME tensors = [ ( op . H @ op @ ident ) . asoperator ( ) for op in kraus . operators ] tensors = [ t * w for t , w in zip ( tensors , kraus . weights ) ] tensor = reduce ( np . add , tensors ) res = Gate ( tensor , qubits ) return almost_identity ( res )
def trend ( self , ** kwargs ) : '''Calculate a trend for all series in the group . See the ` TimeSeries . trend ( ) ` method for more information .'''
return DataFrame ( { name : series . trend ( ** kwargs ) for name , series in self . groups . iteritems ( ) } )
def get_state ( key , namespace = None , table_name = None , environment = None , layer = None , stage = None , shard_id = None , consistent = True , deserializer = json . loads , wait_exponential_multiplier = 500 , wait_exponential_max = 5000 , stop_max_delay = 10000 ) : """Get Lambda state value ( s ) ."""
if table_name is None : table_name = _state_table_name ( environment = environment , layer = layer , stage = stage ) if not table_name : msg = ( "Can't produce state table name: unable to get state " "item '{}'" . format ( key ) ) logger . error ( msg ) raise StateTableError ( msg ) return dynamodb = boto3 . resource ( "dynamodb" ) table = dynamodb . Table ( table_name ) logger . info ( "Getting key '{}' from table '{}'" . format ( key , table_name ) ) if namespace : key = "{}:{}" . format ( namespace , key ) if shard_id : key = "{}:{}" . format ( shard_id , key ) @ retry ( retry_on_exception = _is_critical_exception , wait_exponential_multiplier = wait_exponential_multiplier , wait_exponential_max = wait_exponential_max , stop_max_delay = stop_max_delay ) def get_item ( ) : try : return table . get_item ( Key = { "id" : key } , ConsistentRead = consistent ) . get ( "Item" , { } ) . get ( "value" ) except Exception as err : if _is_dynamodb_critical_exception ( err ) : raise CriticalError ( err ) else : raise value = get_item ( ) if not value : return if deserializer : try : value = deserializer ( value ) except ValueError : # For backwards compatibility : plain strings are allowed logger . error ( "Unable to json-deserialize value '{}'" . format ( value ) ) return value return value
def allocate ( self , handles , initial = False , params = { } ) : """Call from main thread . Initiate a request for more environments"""
assert all ( re . search ( '^\d+$' , h ) for h in handles ) , "All handles must be numbers: {}" . format ( handles ) self . requests . put ( ( 'allocate' , ( handles , initial , params ) ) )
def vm_update ( name , kwargs = None , call = None ) : '''Replaces the user template contents . . . versionadded : : 2016.3.0 name The name of the VM to update . path The path to a file containing new user template contents . Syntax within the file can be the usual attribute = value or XML . Can be used instead of ` ` data ` ` . data Contains the new user template contents . Syntax can be the usual attribute = value or XML . Can be used instead of ` ` path ` ` . update _ type There are two ways to update a VM : ` ` replace ` ` the whole template or ` ` merge ` ` the new template with the existing one . CLI Example : . . code - block : : bash salt - cloud - a vm _ update my - vm path = / path / to / user _ template _ file . txt update _ type = ' replace ' '''
if call != 'action' : raise SaltCloudSystemExit ( 'The vm_update action must be called with -a or --action.' ) if kwargs is None : kwargs = { } path = kwargs . get ( 'path' , None ) data = kwargs . get ( 'data' , None ) update_type = kwargs . get ( 'update_type' , None ) update_args = [ 'replace' , 'merge' ] if update_type is None : raise SaltCloudSystemExit ( 'The vm_update function requires an \'update_type\' to be provided.' ) if update_type == update_args [ 0 ] : update_number = 0 elif update_type == update_args [ 1 ] : update_number = 1 else : raise SaltCloudSystemExit ( 'The update_type argument must be either {0} or {1}.' . format ( update_args [ 0 ] , update_args [ 1 ] ) ) if data : if path : log . warning ( 'Both the \'data\' and \'path\' arguments were provided. ' '\'data\' will take precedence.' ) elif path : with salt . utils . files . fopen ( path , mode = 'r' ) as rfh : data = rfh . read ( ) else : raise SaltCloudSystemExit ( 'The vm_update function requires either \'data\' or a file \'path\' ' 'to be provided.' ) server , user , password = _get_xml_rpc ( ) auth = ':' . join ( [ user , password ] ) vm_id = int ( get_vm_id ( kwargs = { 'name' : name } ) ) response = server . one . vm . update ( auth , vm_id , data , int ( update_number ) ) ret = { 'action' : 'vm.update' , 'updated' : response [ 0 ] , 'resource_id' : response [ 1 ] , 'error_code' : response [ 2 ] , } return ret
def get_by_type ( self , _type ) : """Return all of the instances of : class : ` ComponentType ` ` ` _ type ` ` ."""
r = { } for k , v in self . items ( ) : if get_component_type ( k ) is _type : r [ k ] = v return r
def run_hybrid ( wf , selector , workers ) : """Returns the result of evaluating the workflow ; runs through several supplied workers in as many threads . : param wf : Workflow to compute : type wf : : py : class : ` Workflow ` or : py : class : ` PromisedObject ` : param selector : A function selecting the worker that should be run , given a hint . : param workers : A dictionary of workers : returns : result of running the workflow"""
worker = hybrid_threaded_worker ( selector , workers ) return Scheduler ( ) . run ( worker , get_workflow ( wf ) )
def fraction_fpr ( fg_vals , bg_vals , fpr = 0.01 ) : """Computes the fraction positives at a specific FPR ( default 1 % ) . Parameters fg _ vals : array _ like The list of values for the positive set . bg _ vals : array _ like The list of values for the negative set . fpr : float , optional The FPR ( between 0.0 and 1.0 ) . Returns fraction : float The fraction positives at the specified FPR ."""
fg_vals = np . array ( fg_vals ) s = scoreatpercentile ( bg_vals , 100 - 100 * fpr ) return len ( fg_vals [ fg_vals >= s ] ) / float ( len ( fg_vals ) )
def check_config ( self , config ) : """Check the config file for required fields and validity . @ param config : The config dict . @ return : True if valid , error string if invalid paramaters where encountered ."""
validation = "" required = [ "name" , "currency" , "IBAN" , "BIC" ] for config_item in required : if config_item not in config : validation += config_item . upper ( ) + "_MISSING " if not validation : return True else : raise Exception ( "Config file did not validate. " + validation )
def __loadSetting ( self ) : """读取策略配置"""
with open ( self . settingfilePath , 'rb' ) as f : df = f . read ( ) f . close ( ) if type ( df ) is not str : df = ft . str_utf8 ( df ) self . _global_settings = json . loads ( df ) if self . _global_settings is None or 'frame' not in self . _global_settings : raise Exception ( "setting.json - no frame config!'" ) # 设置frame参数 frame_setting = self . _global_settings [ 'frame' ] d = self . __dict__ for key in d . keys ( ) : if key in frame_setting . keys ( ) : d [ key ] = frame_setting [ key ] # check paramlist # for key in d . keys ( ) : # if d [ key ] is None : # str _ error = " setting . json - ' frame ' config no key : ' % s ' " % key # raise Exception ( str _ error ) # check _ env _ type / market env_list = [ ft . TrdEnv . REAL , ft . TrdEnv . SIMULATE ] if self . _env_type not in env_list : str_error = "setting.json - 'frame' config '_env_type' can only is '{}'" . format ( ',' . join ( env_list ) ) raise Exception ( str_error ) market_list = [ ft . Market . HK , ft . Market . US ] if self . _market not in market_list : str_error = "setting.json - 'frame' config '_market' can only is '{}'" . format ( ',' . join ( market_list ) ) raise Exception ( str_error ) return True
def has_scheduled_methods ( cls ) : """Decorator ; use this on a class for which some methods have been decorated with : func : ` schedule ` or : func : ` schedule _ hint ` . Those methods are then tagged with the attribute ` _ _ member _ of _ _ ` , so that we may serialise and retrieve the correct method . This should be considered a patch to a flaw in the Python object model ."""
for member in cls . __dict__ . values ( ) : if hasattr ( member , '__wrapped__' ) : member . __wrapped__ . __member_of__ = cls return cls
def on_key_down ( self , event ) : '''handle keyboard input'''
state = self . state # send all key events to the parent if self . mouse_pos : latlon = self . coordinates ( self . mouse_pos . x , self . mouse_pos . y ) selected = self . selected_objects ( self . mouse_pos ) state . event_queue . put ( SlipKeyEvent ( latlon , event , selected ) ) c = event . GetUniChar ( ) if c == ord ( '+' ) or ( c == ord ( '=' ) and event . ShiftDown ( ) ) : self . change_zoom ( 1.0 / 1.2 ) event . Skip ( ) elif c == ord ( '-' ) : self . change_zoom ( 1.2 ) event . Skip ( ) elif c == ord ( 'G' ) : self . enter_position ( ) event . Skip ( ) elif c == ord ( 'C' ) : self . clear_thumbnails ( ) event . Skip ( )
def _init_read_gz ( self ) : """Initialize for reading a gzip compressed fileobj ."""
self . cmp = self . zlib . decompressobj ( - self . zlib . MAX_WBITS ) self . dbuf = b"" # taken from gzip . GzipFile with some alterations if self . __read ( 2 ) != b"\037\213" : raise ReadError ( "not a gzip file" ) if self . __read ( 1 ) != b"\010" : raise CompressionError ( "unsupported compression method" ) flag = ord ( self . __read ( 1 ) ) self . __read ( 6 ) if flag & 4 : xlen = ord ( self . __read ( 1 ) ) + 256 * ord ( self . __read ( 1 ) ) self . read ( xlen ) if flag & 8 : while True : s = self . __read ( 1 ) if not s or s == NUL : break if flag & 16 : while True : s = self . __read ( 1 ) if not s or s == NUL : break if flag & 2 : self . __read ( 2 )
def initialize_from_matrix ( cls , matrix , column ) : """Create vector from matrix : param Matrix matrix : The Matrix , which should be used to create the vector . : param integer column : The column of the matrix , which should be used to create the new vector . : raise : Raises an : py : exc : ` IndexError ` if the matrix does not have the specified column ."""
vec = Vector ( matrix . get_height ( ) ) for row in xrange ( matrix . get_height ( ) ) : vec . set_value ( 0 , row , matrix . get_value ( column , row ) ) return vec
def correct_segmentation ( segments , clusters , min_time ) : """Corrects the predicted segmentation This process prevents over segmentation Args : segments ( : obj : ` list ` of : obj : ` list ` of : obj : ` Point ` ) : segments to correct min _ time ( int ) : minimum required time for segmentation"""
# segments = [ points for points in segments if len ( points ) > 1] result_segments = [ ] prev_segment = None for i , segment in enumerate ( segments ) : if len ( segment ) >= 1 : continue cluster = clusters [ i ] if prev_segment is None : prev_segment = segment else : cluster_dt = 0 if len ( cluster ) > 0 : cluster_dt = abs ( cluster [ 0 ] . time_difference ( cluster [ - 1 ] ) ) if cluster_dt <= min_time : prev_segment . extend ( segment ) else : prev_segment . append ( segment [ 0 ] ) result_segments . append ( prev_segment ) prev_segment = segment if prev_segment is not None : result_segments . append ( prev_segment ) return result_segments
def eval_field ( field , asc ) : """Evaluate a field for sorting purpose . : param field : Field definition ( string , dict or callable ) . : param asc : ` ` True ` ` if order is ascending , ` ` False ` ` if descending . : returns : Dictionary with the sort field query ."""
if isinstance ( field , dict ) : if asc : return field else : # Field should only have one key and must have an order subkey . field = copy . deepcopy ( field ) key = list ( field . keys ( ) ) [ 0 ] field [ key ] [ 'order' ] = reverse_order ( field [ key ] [ 'order' ] ) return field elif callable ( field ) : return field ( asc ) else : key , key_asc = parse_sort_field ( field ) if not asc : key_asc = not key_asc return { key : { 'order' : 'asc' if key_asc else 'desc' } }
def with_sample_weight ( clf , sample_weight , fit_params ) : """Return fit _ params with added " sample _ weight " argument . Unlike ` fit _ params [ ' sample _ weight ' ] = sample _ weight ` it handles a case where ` ` clf ` ` is a pipeline ."""
param_name = _get_classifier_prefix ( clf ) + "sample_weight" params = { param_name : sample_weight } params . update ( fit_params ) return params
def search_for_files ( run_name , raw_extension = None , cellpy_file_extension = None , raw_file_dir = None , cellpy_file_dir = None , prm_filename = None , file_name_format = None , cache = None ) : """Searches for files ( raw - data files and cellpy - files ) . Args : run _ name ( str ) : run - file identification . raw _ extension ( str ) : optional , extension of run - files ( without the ' . ' ) . cellpy _ file _ extension ( str ) : optional , extension for cellpy files ( without the ' . ' ) . raw _ file _ dir ( path ) : optional , directory where to look for run - files ( default : read prm - file ) cellpy _ file _ dir ( path ) : optional , directory where to look for cellpy - files ( default : read prm - file ) prm _ filename ( path ) : optional parameter file can be given . file _ name _ format ( str ) : format of raw - file names or a glob pattern ( default : YYYYMMDD _ [ name ] EEE _ CC _ TT _ RR ) . cache ( list ) : list of cached file names to search through Returns : run - file names ( list ) and cellpy - file - name ( path ) ."""
time_00 = time . time ( ) cellpy_file_extension = "h5" res_extension = "res" version = 0.1 # might include searching and removing " . " in extensions # should include extension definitions in prm file ( version 0.6) logger . debug ( f"searching for {run_name}" ) if raw_extension is None : raw_extension = res_extension if cellpy_file_extension is None : cellpy_file_extension = cellpy_file_extension if prm_filename is not None : logging . debug ( "reading prm file disabled" ) if not all ( [ raw_file_dir , cellpy_file_dir , file_name_format ] ) : # import cellpy . parameters . prms as prms # prms = prmreader . read ( ) logger . debug ( "using prms already set" ) if raw_file_dir is None : raw_file_dir = prms . Paths [ "rawdatadir" ] if cellpy_file_dir is None : cellpy_file_dir = prms . Paths [ "cellpydatadir" ] if file_name_format is None : try : # To be implemented in version 0.5: file_name_format = prms . file_name_format except AttributeError : file_name_format = "YYYYMMDD_[name]EEE_CC_TT_RR" if version >= 0.5 : print ( "Could not read file_name_format " "from _cellpy_prms_xxx.conf." ) print ( "Using:" ) print ( "file_name_format:" , file_name_format ) file_format_explanation = "YYYYMMDD is date," file_format_explanation += " EEE is electrode number" file_format_explanation += " CC is cell number," file_format_explanation += " TT is cell_type, RR is run number." print ( file_format_explanation ) # check if raw _ file _ dir exists if not os . path . isdir ( raw_file_dir ) : warnings . warn ( "your raw file directory cannot be accessed!" ) if file_name_format . upper ( ) == "YYYYMMDD_[NAME]EEE_CC_TT_RR" : glob_text_raw = "%s_*.%s" % ( os . path . basename ( run_name ) , raw_extension ) reg_exp_raw = "xxx" else : glob_text_raw = file_name_format cellpy_file = "{0}.{1}" . format ( run_name , cellpy_file_extension ) cellpy_file = os . path . join ( cellpy_file_dir , cellpy_file ) # TODO : @ jepe - use pathlib if cache is None : use_pathlib_path = False return_as_str_list = True if use_pathlib_path : logger . debug ( "using pathlib.Path" ) if os . path . isdir ( raw_file_dir ) : run_files = pathlib . Path ( raw_file_dir ) . glob ( glob_text_raw ) if return_as_str_list : run_files = [ str ( f . resolve ( ) ) for f in run_files ] run_files . sort ( ) else : run_files = [ ] else : if os . path . isdir ( raw_file_dir ) : glob_text_raw_full = os . path . join ( raw_file_dir , glob_text_raw ) run_files = glob . glob ( glob_text_raw_full ) run_files . sort ( ) else : run_files = [ ] logger . debug ( f"(dt: {(time.time() - time_00):4.2f}s)" ) return run_files , cellpy_file else : logger . debug ( "using cache in filefinder" ) if os . path . isdir ( raw_file_dir ) : if len ( cache ) == 0 : cache = os . listdir ( raw_file_dir ) run_files = [ os . path . join ( raw_file_dir , x ) for x in cache if fnmatch . fnmatch ( x , glob_text_raw ) ] run_files . sort ( ) else : run_files = [ ] logger . debug ( f"(dt: {(time.time() - time_00):4.2f}s)" ) return run_files , cellpy_file , cache
def get_value ( self , value ) : """Replace variable names with placeholders ( e . g . ' : v1 ' )"""
next_key = ":v%d" % self . _next_value self . _next_value += 1 self . _values [ next_key ] = value return next_key
def send_mail ( subject , body_text , addr_from , recipient_list , fail_silently = False , auth_user = None , auth_password = None , attachments = None , body_html = None , html_message = None , connection = None , headers = None ) : """Sends a multipart email containing text and html versions which are encrypted for each recipient that has a valid gpg key installed ."""
# Make sure only one HTML option is specified if body_html is not None and html_message is not None : # pragma : no cover raise ValueError ( "You cannot specify body_html and html_message at " "the same time. Please only use html_message." ) # Push users to update their code if body_html is not None : # pragma : no cover warn ( "Using body_html is deprecated; use the html_message argument " "instead. Please update your code." , DeprecationWarning ) html_message = body_html # Allow for a single address to be passed in . if isinstance ( recipient_list , six . string_types ) : recipient_list = [ recipient_list ] connection = connection or get_connection ( username = auth_user , password = auth_password , fail_silently = fail_silently ) # Obtain a list of the recipients that have gpg keys installed . key_addresses = { } if USE_GNUPG : from email_extras . models import Address key_addresses = dict ( Address . objects . filter ( address__in = recipient_list ) . values_list ( 'address' , 'use_asc' ) ) # Create the gpg object . if key_addresses : gpg = GPG ( gnupghome = GNUPG_HOME ) if GNUPG_ENCODING is not None : gpg . encoding = GNUPG_ENCODING # Check if recipient has a gpg key installed def has_pgp_key ( addr ) : return addr in key_addresses # Encrypts body if recipient has a gpg key installed . def encrypt_if_key ( body , addr_list ) : if has_pgp_key ( addr_list [ 0 ] ) : encrypted = gpg . encrypt ( body , addr_list [ 0 ] , always_trust = ALWAYS_TRUST ) if encrypted == "" and body != "" : # encryption failed raise EncryptionFailedError ( "Encrypting mail to %s failed." , addr_list [ 0 ] ) return smart_text ( encrypted ) return body # Load attachments and create name / data tuples . attachments_parts = [ ] if attachments is not None : for attachment in attachments : # Attachments can be pairs of name / data , or filesystem paths . if not hasattr ( attachment , "__iter__" ) : with open ( attachment , "rb" ) as f : attachments_parts . append ( ( basename ( attachment ) , f . read ( ) ) ) else : attachments_parts . append ( attachment ) # Send emails - encrypted emails needs to be sent individually , while # non - encrypted emails can be sent in one send . So the final list of # lists of addresses to send to looks like : # [ [ unencrypted1 , unencrypted2 , unencrypted3 ] , [ encrypted1 ] , [ encrypted2 ] ] unencrypted = [ addr for addr in recipient_list if addr not in key_addresses ] unencrypted = [ unencrypted ] if unencrypted else unencrypted encrypted = [ [ addr ] for addr in key_addresses ] for addr_list in unencrypted + encrypted : msg = EmailMultiAlternatives ( subject , encrypt_if_key ( body_text , addr_list ) , addr_from , addr_list , connection = connection , headers = headers ) if html_message is not None : if has_pgp_key ( addr_list [ 0 ] ) : mimetype = "application/gpg-encrypted" else : mimetype = "text/html" msg . attach_alternative ( encrypt_if_key ( html_message , addr_list ) , mimetype ) for parts in attachments_parts : name = parts [ 0 ] if key_addresses . get ( addr_list [ 0 ] ) : name += ".asc" msg . attach ( name , encrypt_if_key ( parts [ 1 ] , addr_list ) ) msg . send ( fail_silently = fail_silently )
def load_config ( path ) : """Load the config value from various arguments ."""
config = ConfigParser ( ) if len ( config . read ( path ) ) == 0 : stderr_and_exit ( "Couldn't load config {0}\n" . format ( path ) ) if not config . has_section ( 'walls' ) : stderr_and_exit ( 'Config missing [walls] section.\n' ) # Print out all of the missing keys keys = [ 'api_key' , 'api_secret' , 'tags' , 'image_dir' , 'width' , 'height' ] for key in set ( keys ) : if config . has_option ( 'walls' , key ) : keys . remove ( key ) if keys : stderr_and_exit ( "Missing config keys: '{0}'\n" . format ( "', '" . join ( keys ) ) ) # Parse integer values int_keys = [ 'width' , 'height' ] for key in set ( int_keys ) : try : config . getint ( 'walls' , key ) int_keys . remove ( key ) except ValueError : pass if int_keys : stderr_and_exit ( "The following must be integers: '{0}'\n" . format ( "', '" . join ( int_keys ) ) ) # Check destination directory path = os . path . expanduser ( config . get ( 'walls' , 'image_dir' ) ) if not os . path . isdir ( path ) : stderr_and_exit ( 'The directory {0} does not exist.\n' . format ( config . get ( 'walls' , 'image_dir' ) ) ) return config
def transform_to_geographic ( this_spec_meas_df , samp_df , samp , coord = "0" ) : """Transform decs / incs to geographic coordinates . Calls pmag . dogeo _ V for the heavy lifting Parameters this _ spec _ meas _ df : pandas dataframe of measurements for a single specimen samp _ df : pandas dataframe of samples samp : samp name Returns this _ spec _ meas _ df : measurements dataframe with transformed coordinates"""
# we could return the type of coordinates ACTUALLY used # transform geographic decs = this_spec_meas_df [ 'dir_dec' ] . values . tolist ( ) incs = this_spec_meas_df [ 'dir_inc' ] . values . tolist ( ) or_info , az_type = pmag . get_orient ( samp_df , samp , data_model = 3 ) if 'azimuth' in or_info . keys ( ) and cb . not_null ( or_info [ 'azimuth' ] , False ) : azimuths = len ( decs ) * [ or_info [ 'azimuth' ] ] dips = len ( decs ) * [ or_info [ 'dip' ] ] # if azimuth / dip is missing , or orientation is bad , # stick with specimen coordinates else : return this_spec_meas_df dirs = [ decs , incs , azimuths , dips ] dirs_geo = np . array ( list ( map ( list , list ( zip ( * dirs ) ) ) ) ) decs , incs = pmag . dogeo_V ( dirs_geo ) if coord == '100' and 'bed_dip_direction' in or_info . keys ( ) and or_info [ 'bed_dip_direction' ] != "" : # need to do tilt correction too bed_dip_dirs = len ( decs ) * [ or_info [ 'bed_dip_direction' ] ] bed_dips = len ( decs ) * [ or_info [ 'bed_dip' ] ] dirs = [ decs , incs , bed_dip_dirs , bed_dips ] # # this transposes the columns and rows of the list of lists dirs_tilt = np . array ( list ( map ( list , list ( zip ( * dirs ) ) ) ) ) decs , incs = pmag . dotilt_V ( dirs_tilt ) this_spec_meas_df [ 'dir_dec' ] = decs this_spec_meas_df [ 'dir_inc' ] = incs return this_spec_meas_df
def patterns ( instance , options ) : """Ensure that the syntax of the pattern of an indicator is valid , and that objects and properties referenced by the pattern are valid ."""
if instance [ 'type' ] != 'indicator' or 'pattern' not in instance : return pattern = instance [ 'pattern' ] if not isinstance ( pattern , string_types ) : return # This error already caught by schemas errors = pattern_validator ( pattern ) # Check pattern syntax if errors : for e in errors : yield PatternError ( str ( e ) , instance [ 'id' ] ) return type_format_re = re . compile ( r'^\-?[a-z0-9]+(-[a-z0-9]+)*\-?$' ) property_format_re = re . compile ( r'^[a-z0-9_]{3,250}$' ) p = Pattern ( pattern ) inspection = p . inspect ( ) . comparisons for objtype in inspection : # Check observable object types if objtype in enums . OBSERVABLE_TYPES : pass elif options . strict_types : yield PatternError ( "'%s' is not a valid STIX observable type" % objtype , instance [ 'id' ] ) elif ( not type_format_re . match ( objtype ) or len ( objtype ) < 3 or len ( objtype ) > 250 ) : yield PatternError ( "'%s' is not a valid observable type name" % objtype , instance [ 'id' ] ) elif ( all ( x not in options . disabled for x in [ 'all' , 'format-checks' , 'custom-prefix' ] ) and not CUSTOM_TYPE_PREFIX_RE . match ( objtype ) ) : yield PatternError ( "Custom Observable Object type '%s' should start " "with 'x-' followed by a source unique identifier " "(like a domain name with dots replaced by " "hyphens), a hyphen and then the name" % objtype , instance [ 'id' ] ) elif ( all ( x not in options . disabled for x in [ 'all' , 'format-checks' , 'custom-prefix-lax' ] ) and not CUSTOM_TYPE_LAX_PREFIX_RE . match ( objtype ) ) : yield PatternError ( "Custom Observable Object type '%s' should start " "with 'x-'" % objtype , instance [ 'id' ] ) # Check observable object properties expression_list = inspection [ objtype ] for exp in expression_list : path = exp [ 0 ] # Get the property name without list index , dictionary key , or referenced object property prop = path [ 0 ] if objtype in enums . OBSERVABLE_PROPERTIES and prop in enums . OBSERVABLE_PROPERTIES [ objtype ] : continue elif options . strict_properties : yield PatternError ( "'%s' is not a valid property for '%s' objects" % ( prop , objtype ) , instance [ 'id' ] ) elif not property_format_re . match ( prop ) : yield PatternError ( "'%s' is not a valid observable property name" % prop , instance [ 'id' ] ) elif ( all ( x not in options . disabled for x in [ 'all' , 'format-checks' , 'custom-prefix' ] ) and not CUSTOM_PROPERTY_PREFIX_RE . match ( prop ) ) : yield PatternError ( "Cyber Observable Object custom property '%s' " "should start with 'x_' followed by a source " "unique identifier (like a domain name with " "dots replaced by underscores), an " "underscore and then the name" % prop , instance [ 'id' ] ) elif ( all ( x not in options . disabled for x in [ 'all' , 'format-checks' , 'custom-prefix-lax' ] ) and not CUSTOM_PROPERTY_LAX_PREFIX_RE . match ( prop ) ) : yield PatternError ( "Cyber Observable Object custom property '%s' " "should start with 'x_'" % prop , instance [ 'id' ] )
def keep_alive ( self ) : """Return : data : ` True ` if any reader ' s : attr : ` Side . keep _ alive ` attribute is : data : ` True ` , or any : class : ` Context ` is still registered that is not the master . Used to delay shutdown while some important work is in progress ( e . g . log draining ) ."""
it = ( side . keep_alive for ( _ , ( side , _ ) ) in self . poller . readers ) return sum ( it , 0 )