signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def set_bucket_props ( self , bucket , props ) : """Serialize set bucket property request and deserialize response"""
if not self . pb_all_bucket_props ( ) : for key in props : if key not in ( 'n_val' , 'allow_mult' ) : raise NotImplementedError ( 'Server only supports n_val and ' 'allow_mult properties over PBC' ) msg_code = riak . pb . messages . MSG_CODE_SET_BUCKET_REQ codec = self . _get_codec ( msg_code ) msg = codec . encode_set_bucket_props ( bucket , props ) resp_code , resp = self . _request ( msg , codec ) return True
def change_range_cb ( self , setting , value , fitsimage ) : """This method is called when the cut level values ( lo / hi ) have changed in a channel . We adjust them in the ColorBar to match ."""
if not self . gui_up : return if fitsimage != self . fv . getfocus_viewer ( ) : # values have changed in a channel that doesn ' t have the focus return False loval , hival = value self . colorbar . set_range ( loval , hival )
def set_bios_configuration ( irmc_info , settings ) : """Set BIOS configurations on the server . : param irmc _ info : node info : param settings : Dictionary containing the BIOS configuration . : raise : BiosConfigNotFound , if there is wrong settings for bios configuration ."""
bios_config_data = { 'Server' : { 'SystemConfig' : { 'BiosConfig' : { } } } } versions = elcm_profile_get_versions ( irmc_info ) server_version = versions [ 'Server' ] . get ( '@Version' ) bios_version = versions [ 'Server' ] [ 'SystemConfig' ] [ 'BiosConfig' ] . get ( '@Version' ) if server_version : bios_config_data [ 'Server' ] [ '@Version' ] = server_version if bios_version : bios_config_data [ 'Server' ] [ 'SystemConfig' ] [ 'BiosConfig' ] [ '@Version' ] = bios_version configs = { } for setting_param in settings : setting_name = setting_param . get ( "name" ) setting_value = setting_param . get ( "value" ) # Revert - conversion from a string of True / False to boolean . # It will be raise failed if put " True " or " False " string value . if isinstance ( setting_value , six . string_types ) : if setting_value . lower ( ) == "true" : setting_value = True elif setting_value . lower ( ) == "false" : setting_value = False try : type_config , config = BIOS_CONFIGURATION_DICTIONARY [ setting_name ] . split ( "_" ) if type_config in configs . keys ( ) : configs [ type_config ] [ config ] = setting_value else : configs . update ( { type_config : { config : setting_value } } ) except KeyError : raise BiosConfigNotFound ( "Invalid BIOS setting: %s" % setting_param ) bios_config_data [ 'Server' ] [ 'SystemConfig' ] [ 'BiosConfig' ] . update ( configs ) restore_bios_config ( irmc_info , bios_config_data )
def chunk_pandas ( frame_or_series , chunksize = None ) : """Chunk a frame into smaller , equal parts ."""
if not isinstance ( chunksize , int ) : raise ValueError ( 'argument chunksize needs to be integer type' ) bins = np . arange ( 0 , len ( frame_or_series ) , step = chunksize ) for b in bins : yield frame_or_series [ b : b + chunksize ]
def get_undo_redo_list_from_active_trail_history_item_to_version_id ( self , version_id ) : """Perform fast search from currently active branch to specific version _ id and collect all recovery steps ."""
all_trail_action = [ a . version_id for a in self . single_trail_history ( ) if a is not None ] all_active_action = self . get_all_active_actions ( ) undo_redo_list = [ ] _undo_redo_list = [ ] intermediate_version_id = version_id if self . with_verbose : logger . verbose ( "Version_id : {0} in" . format ( intermediate_version_id ) ) logger . verbose ( "Active actions: {0} in: {1}" . format ( all_active_action , intermediate_version_id in all_active_action ) ) logger . verbose ( "Trail actions : {0} in: {1}" . format ( all_trail_action , intermediate_version_id in all_trail_action ) ) if intermediate_version_id not in all_trail_action : # get undo to come from version _ id to trail _ action while intermediate_version_id not in all_trail_action : _undo_redo_list . insert ( 0 , ( intermediate_version_id , 'redo' ) ) intermediate_version_id = self . all_time_history [ intermediate_version_id ] . prev_id intermediate_goal_version_id = intermediate_version_id else : intermediate_goal_version_id = version_id intermediate_version_id = self . trail_history [ self . trail_pointer ] . version_id if self . with_verbose : logger . verbose ( "Version_id : {0} {1}" . format ( intermediate_goal_version_id , intermediate_version_id ) ) logger . verbose ( "Active actions: {0} in: {1}" . format ( all_active_action , intermediate_version_id in all_active_action ) ) logger . verbose ( "Trail actions : {0} in: {1}" . format ( all_trail_action , intermediate_version_id in all_trail_action ) ) # collect undo and redo on trail if intermediate_goal_version_id in all_active_action : # collect needed undo to reach intermediate version while not intermediate_version_id == intermediate_goal_version_id : undo_redo_list . append ( ( intermediate_version_id , 'undo' ) ) intermediate_version_id = self . all_time_history [ intermediate_version_id ] . prev_id elif intermediate_goal_version_id in all_trail_action : # collect needed redo to reach intermediate version while not intermediate_version_id == intermediate_goal_version_id : intermediate_version_id = self . all_time_history [ intermediate_version_id ] . next_id undo_redo_list . append ( ( intermediate_version_id , 'redo' ) ) for elem in _undo_redo_list : undo_redo_list . append ( elem ) return undo_redo_list
def till ( self ) : """Queries the current shop till and returns the amount Returns str - - Amount of NPs in shop till Raises parseException"""
pg = self . usr . getPage ( "http://www.neopets.com/market.phtml?type=till" ) try : return pg . find_all ( text = "Shop Till" ) [ 1 ] . parent . next_sibling . b . text . replace ( " NP" , "" ) . replace ( "," , "" ) except Exception : logging . getLogger ( "neolib.shop" ) . exception ( "Could not grab shop till." , { 'pg' : pg } ) raise parseException
def graceful_stop ( self , signal_number = None , stack_frame = None ) : """This function will be called when a graceful - stop is initiated ."""
stop_msg = "Hard" if self . shutdown else "Graceful" if signal_number is None : self . log . info ( "%s stop called manually. " "Shutting down." , stop_msg ) else : self . log . info ( "%s stop called by signal #%s. Shutting down." "Stack Frame: %s" , stop_msg , signal_number , stack_frame ) self . shutdown = True self . crawler_list . stop ( ) self . daemon_list . stop ( ) self . thread_event . set ( ) return True
def _parse_peer_link ( self , config ) : """Scans the config block and parses the peer - link value Args : config ( str ) : The config block to scan Returns : dict : A dict object that is intended to be merged into the resource dict"""
match = re . search ( r'peer-link (\S+)' , config ) value = match . group ( 1 ) if match else None return dict ( peer_link = value )
def upper2_for_ramp_wall ( self ) -> Set [ Point2 ] : """Returns the 2 upper ramp points of the main base ramp required for the supply depot and barracks placement properties used in this file ."""
if len ( self . upper ) > 5 : # NOTE : this was way too slow on large ramps return set ( ) # HACK : makes this work for now # FIXME : please do upper2 = sorted ( list ( self . upper ) , key = lambda x : x . distance_to ( self . bottom_center ) , reverse = True ) while len ( upper2 ) > 2 : upper2 . pop ( ) return set ( upper2 )
def _to_variable_type ( x ) : """Convert CWL variables to WDL variables , handling nested arrays ."""
var_mapping = { "string" : "String" , "File" : "File" , "null" : "String" , "long" : "Float" , "int" : "Int" } if isinstance ( x , dict ) : if x [ "type" ] == "record" : return "Object" else : assert x [ "type" ] == "array" , x return "Array[%s]" % _to_variable_type ( x [ "items" ] ) elif isinstance ( x , ( list , tuple ) ) : vars = [ v for v in x if v != "null" ] assert len ( vars ) == 1 , vars return var_mapping [ vars [ 0 ] ] else : return var_mapping [ x ]
def box_iter ( self ) : """Get an iterator over all boxes in the Sudoku"""
for i in utils . range_ ( self . order ) : for j in utils . range_ ( self . order ) : yield self . box ( i * 3 , j * 3 )
def rotate_point ( self , p ) : """Rotate a Point instance using this quaternion ."""
# Prepare p = Quaternion ( 0 , p [ 0 ] , p [ 1 ] , p [ 2 ] , False ) # Do not normalize ! q1 = self . normalize ( ) q2 = self . inverse ( ) # Apply rotation r = ( q1 * p ) * q2 # Make point and return return r . x , r . y , r . z
def run ( self , address , port , unix_path ) : """Starts the Daemon , handling commands until interrupted . @ return False if error . Runs indefinitely otherwise ."""
assert address or unix_path if unix_path : sock = bind_unix_socket ( unix_path ) else : sock = bind_socket ( address , port ) if sock is None : return False sock . setblocking ( False ) inputs = [ sock ] outputs = [ ] try : while True : readable , _ , _ = select . select ( inputs , outputs , inputs , SCHEDULER_CHECK_PERIOD ) for r_socket in readable : if unix_path and r_socket is sock : client_stream , _ = sock . accept ( ) logger . debug ( "New connection from %s" , unix_path ) self . handle_client_stream ( client_stream , True ) else : client_stream = self . new_client_stream ( sock ) if client_stream is None : continue self . handle_client_stream ( client_stream , False ) close_client_stream ( client_stream , unix_path ) self . scheduler ( ) except KeyboardInterrupt : logger . info ( "Received Ctrl-C shutting-down ..." ) finally : sock . shutdown ( socket . SHUT_RDWR ) sock . close ( )
def replace_chars ( astring ) : """Replace certain unicode characters to avoid errors when trying to read various strings . Returns str"""
for k , v in CHAR_REPLACE . items ( ) : astring = astring . replace ( k , v ) return astring
def dataset ( node_parser , include = lambda x : True , input_transform = None , target_transform = None ) : """Convert immediate children of a GroupNode into a torch . data . Dataset Keyword arguments * node _ parser = callable that converts a DataNode to a Dataset item * include = lambda x : True lambda ( quilt . nodes . GroupNode ) = > { True , False } intended to filter nodes based on metadata * input _ transform = None ; optional callable that takes the item as its argument * output _ transform = None ; optional callable that takes the item as its argument ; implementation may make its own copy of item to avoid side effects Dataset . _ _ getitem _ _ returns the following tuple item = node _ parser ( node ) ( input _ transform ( item ) , output _ transform ( item ) ) Or , if no _ transform functions are provided : ( item , item )"""
def _dataset ( node , paths ) : # pylint : disable = unused - argument return DatasetFromGroupNode ( node , node_parser = node_parser , include = include , input_transform = input_transform , target_transform = target_transform ) return _dataset
def catch_timeout ( f ) : """A decorator to handle read timeouts from Twitter ."""
def new_f ( self , * args , ** kwargs ) : try : return f ( self , * args , ** kwargs ) except ( requests . exceptions . ReadTimeout , requests . packages . urllib3 . exceptions . ReadTimeoutError ) as e : log . warning ( "caught read timeout: %s" , e ) self . connect ( ) return f ( self , * args , ** kwargs ) return new_f
def set_remote ( self , key = None , value = None , data = None , scope = None , ** kwdata ) : """Set data for the remote end ( s ) of the : class : ` Conversation ` with the given scope . In Python , this is equivalent to : : relation . conversation ( scope ) . set _ remote ( key , value , data , scope , * * kwdata ) See : meth : ` conversation ` and : meth : ` Conversation . set _ remote ` ."""
self . conversation ( scope ) . set_remote ( key , value , data , ** kwdata )
def percent_change ( value , decimal_places = 1 , multiply = True , failure_string = 'N/A' ) : """Converts a floating point value into a percentage change value . Number of decimal places set by the ` precision ` kwarg . Default is one . Non - floats are assumed to be zero division errors and are presented as ' N / A ' in the output . By default the number is multiplied by 100 . You can prevent it from doing that by setting the ` multiply ` keyword argument to False ."""
try : f = float ( value ) if multiply : f = f * 100 except ValueError : return failure_string s = _saferound ( f , decimal_places ) if f > 0 : return '+' + s + '%' else : return s + '%'
def time_recommendation ( move_num , seconds_per_move = 5 , time_limit = 15 * 60 , decay_factor = 0.98 ) : """Given the current move number and the ' desired ' seconds per move , return how much time should actually be used . This is intended specifically for CGOS time controls , which has an absolute 15 - minute time limit . The strategy is to spend the maximum possible moves using seconds _ per _ move , and then switch to an exponentially decaying time usage , calibrated so that we have enough time for an infinite number of moves ."""
# Divide by two since you only play half the moves in a game . player_move_num = move_num / 2 # Sum of geometric series maxes out at endgame _ time seconds . endgame_time = seconds_per_move / ( 1 - decay_factor ) if endgame_time > time_limit : # There is so little main time that we ' re already in ' endgame ' mode . base_time = time_limit * ( 1 - decay_factor ) core_moves = 0 else : # Leave over endgame _ time seconds for the end , and play at # seconds _ per _ move for as long as possible . base_time = seconds_per_move core_moves = ( time_limit - endgame_time ) / seconds_per_move return base_time * decay_factor ** max ( player_move_num - core_moves , 0 )
def _Constructor ( cls , ptr = _internal_guard ) : """( INTERNAL ) New wrapper from ctypes ."""
if ptr == _internal_guard : raise VLCException ( "(INTERNAL) ctypes class. You should get references for this class through methods of the LibVLC API." ) if ptr is None or ptr == 0 : return None return _Cobject ( cls , ctypes . c_void_p ( ptr ) )
def annotated ( self ) : """Return an AnnotatedGraph with the same structure as this graph ."""
annotated_vertices = { vertex : AnnotatedVertex ( id = vertex_id , annotation = six . text_type ( vertex ) , ) for vertex_id , vertex in zip ( itertools . count ( ) , self . vertices ) } annotated_edges = [ AnnotatedEdge ( id = edge_id , annotation = six . text_type ( edge ) , head = annotated_vertices [ self . head ( edge ) ] . id , tail = annotated_vertices [ self . tail ( edge ) ] . id , ) for edge_id , edge in zip ( itertools . count ( ) , self . edges ) ] return AnnotatedGraph ( vertices = annotated_vertices . values ( ) , edges = annotated_edges , )
def get_lookup ( self , main_field , for_remote , alias ) : """create a fake field for the lookup capability : param CompositeForeignKey main _ field : the local fk : param Field for _ remote : the remote field to match : return :"""
lookup_class = for_remote . get_lookup ( "exact" ) return lookup_class ( for_remote . get_col ( alias ) , self . value )
def _should_recover ( self , exception ) : """Determine if an error on the RPC stream should be recovered . If the exception is one of the retryable exceptions , this will signal to the consumer thread that it should " recover " from the failure . This will cause the stream to exit when it returns : data : ` False ` . Returns : bool : Indicates if the caller should recover or shut down . Will be : data : ` True ` if the ` ` exception ` ` is " acceptable " , i . e . in a list of retryable / idempotent exceptions ."""
exception = _maybe_wrap_exception ( exception ) # If this is in the list of idempotent exceptions , then we want to # recover . if isinstance ( exception , _RETRYABLE_STREAM_ERRORS ) : _LOGGER . info ( "Observed recoverable stream error %s" , exception ) return True _LOGGER . info ( "Observed non-recoverable stream error %s" , exception ) return False
def get_posterior ( self , twig = None , feedback = None , ** kwargs ) : """[ NOT IMPLEMENTED ] : raises NotImplementedError : because it isn ' t"""
raise NotImplementedError kwargs [ 'context' ] = 'posterior' return self . filter ( twig = twig , ** kwargs )
def removed ( self ) : """Return the total number of deleted lines in the file . : return : int lines _ deleted"""
removed = 0 for line in self . diff . replace ( '\r' , '' ) . split ( "\n" ) : if line . startswith ( '-' ) and not line . startswith ( '---' ) : removed += 1 return removed
def desymbolize ( self ) : """We believe this was a pointer and symbolized it before . Now we want to desymbolize it . The following actions are performed : - Reload content from memory - Mark the sort as ' unknown ' : return : None"""
self . sort = 'unknown' content = self . binary . fast_memory_load ( self . addr , self . size , bytes ) self . content = [ content ]
def new_sent ( self , text , ID = None , ** kwargs ) : '''Create a new sentence and add it to this Document'''
if ID is None : ID = next ( self . __idgen ) return self . add_sent ( Sentence ( text , ID = ID , ** kwargs ) )
def write_paula ( docgraph , output_root_dir , human_readable = False ) : """converts a DiscourseDocumentGraph into a set of PAULA XML files representing the same document . Parameters docgraph : DiscourseDocumentGraph the document graph to be converted"""
paula_document = PaulaDocument ( docgraph , human_readable = human_readable ) error_msg = ( "Please specify an output directory.\nPaula documents consist" " of multiple files, so we can't just pipe them to STDOUT." ) assert isinstance ( output_root_dir , str ) , error_msg document_dir = os . path . join ( output_root_dir , paula_document . name ) if not os . path . isdir ( document_dir ) : create_dir ( document_dir ) for paula_id in paula_document . files : with open ( os . path . join ( document_dir , paula_id + '.xml' ) , 'w' ) as outfile : outfile . write ( paula_etree_to_string ( paula_document . files [ paula_id ] , paula_document . file2dtd [ paula_id ] ) )
def eval_js ( self , expr ) : """Evaluate a Javascript expression ."""
if not self . is_built ( ) : self . _pending_js_eval . append ( expr ) return logger . log ( 5 , "Evaluate Javascript: `%s`." , expr ) out = self . page ( ) . mainFrame ( ) . evaluateJavaScript ( expr ) return _to_py ( out )
def _from_dict ( cls , _dict ) : """Initialize a RecognitionJobs object from a json dictionary ."""
args = { } if 'recognitions' in _dict : args [ 'recognitions' ] = [ RecognitionJob . _from_dict ( x ) for x in ( _dict . get ( 'recognitions' ) ) ] else : raise ValueError ( 'Required property \'recognitions\' not present in RecognitionJobs JSON' ) return cls ( ** args )
def to_kaf ( self ) : """Converts the element to NAF"""
if self . type == 'NAF' : # # convert all the properties for node in self . node . findall ( 'properties/property' ) : node . set ( 'pid' , node . get ( 'id' ) ) del node . attrib [ 'id' ]
def get_route ( ip ) : '''Return routing information for given destination ip . . versionadded : : 2015.5.3 . . versionchanged : : 2015.8.0 Added support for SunOS ( Solaris 10 , Illumos , SmartOS ) Added support for OpenBSD . . versionchanged : : 2016.11.4 Added support for AIX CLI Example : : salt ' * ' network . get _ route 10.10.10.10'''
if __grains__ [ 'kernel' ] == 'Linux' : cmd = 'ip route get {0}' . format ( ip ) out = __salt__ [ 'cmd.run' ] ( cmd , python_shell = True ) regexp = re . compile ( r'(via\s+(?P<gateway>[\w\.:]+))?\s+dev\s+(?P<interface>[\w\.\:\-]+)\s+.*src\s+(?P<source>[\w\.:]+)' ) m = regexp . search ( out . splitlines ( ) [ 0 ] ) ret = { 'destination' : ip , 'gateway' : m . group ( 'gateway' ) , 'interface' : m . group ( 'interface' ) , 'source' : m . group ( 'source' ) } return ret if __grains__ [ 'kernel' ] == 'SunOS' : # [ root @ nacl ~ ] # route - n get 172.16.10.123 # route to : 172.16.10.123 # destination : 172.16.10.0 # mask : 255.255.255.0 # interface : net0 # flags : < UP , DONE , KERNEL > # recvpipe sendpipe ssthresh rtt , ms rttvar , ms hopcount mtu expire # 0 0 0 0 0 0 1500 0 cmd = '/usr/sbin/route -n get {0}' . format ( ip ) out = __salt__ [ 'cmd.run' ] ( cmd , python_shell = False ) ret = { 'destination' : ip , 'gateway' : None , 'interface' : None , 'source' : None } for line in out . splitlines ( ) : line = line . split ( ':' ) if 'route to' in line [ 0 ] : ret [ 'destination' ] = line [ 1 ] . strip ( ) if 'gateway' in line [ 0 ] : ret [ 'gateway' ] = line [ 1 ] . strip ( ) if 'interface' in line [ 0 ] : ret [ 'interface' ] = line [ 1 ] . strip ( ) ret [ 'source' ] = salt . utils . network . interface_ip ( line [ 1 ] . strip ( ) ) return ret if __grains__ [ 'kernel' ] == 'OpenBSD' : # [ root @ exosphere ] route - n get blackdot . be # route to : 5.135.127.100 # destination : default # mask : default # gateway : 192.168.0.1 # interface : vio0 # if address : 192.168.0.2 # priority : 8 ( static ) # flags : < UP , GATEWAY , DONE , STATIC > # use mtu expire # 8352657 0 0 cmd = 'route -n get {0}' . format ( ip ) out = __salt__ [ 'cmd.run' ] ( cmd , python_shell = False ) ret = { 'destination' : ip , 'gateway' : None , 'interface' : None , 'source' : None } for line in out . splitlines ( ) : line = line . split ( ':' ) if 'route to' in line [ 0 ] : ret [ 'destination' ] = line [ 1 ] . strip ( ) if 'gateway' in line [ 0 ] : ret [ 'gateway' ] = line [ 1 ] . strip ( ) if 'interface' in line [ 0 ] : ret [ 'interface' ] = line [ 1 ] . strip ( ) if 'if address' in line [ 0 ] : ret [ 'source' ] = line [ 1 ] . strip ( ) return ret if __grains__ [ 'kernel' ] == 'AIX' : # root @ la68pp002 _ pub : ~ # route - n get 172.29.149.95 # route to : 172.29.149.95 # destination : 172.29.149.95 # gateway : 127.0.0.1 # interface : lo0 # interf addr : 127.0.0.1 # flags : < UP , GATEWAY , HOST , DONE , STATIC > # recvpipe sendpipe ssthresh rtt , msec rttvar hopcount mtu expire # 0 0 0 0 0 0 0 - 68642 cmd = 'route -n get {0}' . format ( ip ) out = __salt__ [ 'cmd.run' ] ( cmd , python_shell = False ) ret = { 'destination' : ip , 'gateway' : None , 'interface' : None , 'source' : None } for line in out . splitlines ( ) : line = line . split ( ':' ) if 'route to' in line [ 0 ] : ret [ 'destination' ] = line [ 1 ] . strip ( ) if 'gateway' in line [ 0 ] : ret [ 'gateway' ] = line [ 1 ] . strip ( ) if 'interface' in line [ 0 ] : ret [ 'interface' ] = line [ 1 ] . strip ( ) if 'interf addr' in line [ 0 ] : ret [ 'source' ] = line [ 1 ] . strip ( ) return ret else : raise CommandExecutionError ( 'Not yet supported on this platform' )
def jwt_is_expired ( self , access_token = None , leeway = 0 ) : """Validate JWT access token expiration . Args : access _ token ( str ) : Access token to validate . Defaults to ` ` None ` ` . leeway ( float ) : Time in seconds to adjust for local clock skew . Defaults to 0. Returns : bool : ` ` True ` ` if expired , otherwise ` ` False ` ` ."""
if access_token is not None : exp = self . _decode_exp ( access_token ) else : exp = self . jwt_exp now = time ( ) if exp < ( now - leeway ) : return True return False
def fast_exit ( code ) : """Exit without garbage collection , this speeds up exit by about 10ms for things like bash completion ."""
sys . stdout . flush ( ) sys . stderr . flush ( ) os . _exit ( code )
async def execute_command ( self , container , * args ) : '''Execute command on Redis server : - For ( P ) SUBSCRIBE / ( P ) UNSUBSCRIBE , the command is sent to the subscribe connection . It is recommended to use ( p ) subscribe / ( p ) unsubscribe method instead of directly call the command - For BLPOP , BRPOP , BRPOPLPUSH , the command is sent to a separated connection . The connection is recycled after command returns . - For other commands , the command is sent to the default connection .'''
if args : cmd = _str ( args [ 0 ] ) . upper ( ) if cmd in ( 'SUBSCRIBE' , 'UNSUBSCRIBE' , 'PSUBSCRIBE' , 'PUNSUBSCRIBE' ) : await self . _get_subscribe_connection ( container ) return await self . _protocol . execute_command ( self . _subscribeconn , container , * args ) elif cmd in ( 'BLPOP' , 'BRPOP' , 'BRPOPLPUSH' ) : c = await self . get_connection ( container ) with c . context ( container ) : return await c . execute_command ( container , * args ) return await RedisClientBase . execute_command ( self , container , * args )
def process_csr ( cls , common_name , csr = None , private_key = None , country = None , state = None , city = None , organisation = None , branch = None ) : """Create a PK and a CSR if needed ."""
if csr : if branch or organisation or city or state or country : cls . echo ( 'Following options are only used to generate' ' the CSR.' ) else : params = ( ( 'CN' , common_name ) , ( 'OU' , branch ) , ( 'O' , organisation ) , ( 'L' , city ) , ( 'ST' , state ) , ( 'C' , country ) ) params = [ ( key , val ) for key , val in params if val ] csr = cls . create_csr ( common_name , private_key , params ) if csr and os . path . exists ( csr ) : with open ( csr ) as fcsr : csr = fcsr . read ( ) return csr
def send_file ( self , url , name , ** fileinfo ) : """Send a pre - uploaded file to the room . See http : / / matrix . org / docs / spec / r0.2.0 / client _ server . html # m - file for fileinfo . Args : url ( str ) : The mxc url of the file . name ( str ) : The filename of the image . fileinfo ( ) : Extra information about the file"""
return self . client . api . send_content ( self . room_id , url , name , "m.file" , extra_information = fileinfo )
def asarray ( self , key = None , series = None , out = None , validate = True , maxworkers = None ) : """Return image data from selected TIFF page ( s ) as numpy array . By default , the data from the first series is returned . Parameters key : int , slice , or sequence of indices Defines which pages to return as array . If None ( default ) , data from a series ( default 0 ) is returned . If not None , data from the specified pages in the whole file ( if ' series ' is None ) or a specified series are returned as a stacked array . Requesting an array from multiple pages that are not compatible wrt . shape , dtype , compression etc is undefined , i . e . may crash or return incorrect values . series : int or TiffPageSeries Defines which series of pages to return as array . out : numpy . ndarray , str , or file - like object Buffer where image data will be saved . If None ( default ) , a new array will be created . If numpy . ndarray , a writable array of compatible dtype and shape . If ' memmap ' , directly memory - map the image data in the TIFF file if possible ; else create a memory - mapped array in a temporary file . If str or open file , the file name or file object used to create a memory - map to an array stored in a binary file on disk . validate : bool If True ( default ) , validate various tags . Passed to TiffPage . asarray ( ) . maxworkers : int or None Maximum number of threads to concurrently get data from pages or tiles . If None ( default ) , mutli - threading is enabled if data are compressed . If 0 , up to half the CPU cores are used . If 1 , mutli - threading is disabled . Reading data from file is limited to a single thread . Using multiple threads can significantly speed up this function if the bottleneck is decoding compressed data , e . g . in case of large LZW compressed LSM files or JPEG compressed tiled slides . If the bottleneck is I / O or pure Python code , using multiple threads might be detrimental . Returns numpy . ndarray Image data from the specified pages . See the TiffPage . asarray function for operations that are applied ( or not ) to the raw data stored in the file ."""
if not self . pages : return numpy . array ( [ ] ) if key is None and series is None : series = 0 if series is None : pages = self . pages else : try : series = self . series [ series ] except ( KeyError , TypeError ) : pass pages = series . pages if key is None : pass elif series is None : pages = self . pages . _getlist ( key ) elif isinstance ( key , inttypes ) : pages = [ pages [ key ] ] elif isinstance ( key , slice ) : pages = pages [ key ] elif isinstance ( key , Iterable ) : pages = [ pages [ k ] for k in key ] else : raise TypeError ( 'key must be an int, slice, or sequence' ) if not pages : raise ValueError ( 'no pages selected' ) if key is None and series and series . offset : typecode = self . byteorder + series . dtype . char if pages [ 0 ] . is_memmappable and ( isinstance ( out , str ) and out == 'memmap' ) : # direct mapping result = self . filehandle . memmap_array ( typecode , series . shape , series . offset ) else : # read into output if out is not None : out = create_output ( out , series . shape , series . dtype ) self . filehandle . seek ( series . offset ) result = self . filehandle . read_array ( typecode , product ( series . shape ) , out = out ) elif len ( pages ) == 1 : result = pages [ 0 ] . asarray ( out = out , validate = validate , maxworkers = maxworkers ) else : result = stack_pages ( pages , out = out , maxworkers = maxworkers ) if result is None : return None if key is None : try : result . shape = series . shape except ValueError : try : log . warning ( 'TiffFile.asarray: failed to reshape %s to %s' , result . shape , series . shape ) # try series of expected shapes result . shape = ( - 1 , ) + series . shape except ValueError : # revert to generic shape result . shape = ( - 1 , ) + pages [ 0 ] . shape elif len ( pages ) == 1 : result . shape = pages [ 0 ] . shape else : result . shape = ( - 1 , ) + pages [ 0 ] . shape return result
def frequency2phase ( freqdata , rate ) : """integrate fractional frequency data and output phase data Parameters freqdata : np . array Data array of fractional frequency measurements ( nondimensional ) rate : float The sampling rate for phase or frequency , in Hz Returns phasedata : np . array Time integral of fractional frequency data , i . e . phase ( time ) data in units of seconds . For phase in units of radians , see phase2radians ( )"""
dt = 1.0 / float ( rate ) # Protect against NaN values in input array ( issue # 60) # Reintroduces data trimming as in commit 503cb82 freqdata = trim_data ( freqdata ) phasedata = np . cumsum ( freqdata ) * dt phasedata = np . insert ( phasedata , 0 , 0 ) # FIXME : why do we do this ? # so that phase starts at zero and len ( phase ) = len ( freq ) + 1 ? ? return phasedata
def remove ( self , list ) : """Removes a list from the site ."""
xml = SP . DeleteList ( SP . listName ( list . id ) ) self . opener . post_soap ( LIST_WEBSERVICE , xml , soapaction = 'http://schemas.microsoft.com/sharepoint/soap/DeleteList' ) self . all_lists . remove ( list )
def get_item_content ( self , repository_id , path , project = None , scope_path = None , recursion_level = None , include_content_metadata = None , latest_processed_change = None , download = None , version_descriptor = None , include_content = None , resolve_lfs = None , ** kwargs ) : """GetItemContent . Get Item Metadata and / or Content for a single item . The download parameter is to indicate whether the content should be available as a download or just sent as a stream in the response . Doesn ' t apply to zipped content , which is always returned as a download . : param str repository _ id : The name or ID of the repository . : param str path : The item path . : param str project : Project ID or project name : param str scope _ path : The path scope . The default is null . : param str recursion _ level : The recursion level of this request . The default is ' none ' , no recursion . : param bool include _ content _ metadata : Set to true to include content metadata . Default is false . : param bool latest _ processed _ change : Set to true to include the lastest changes . Default is false . : param bool download : Set to true to download the response as a file . Default is false . : param : class : ` < GitVersionDescriptor > < azure . devops . v5_0 . git . models . GitVersionDescriptor > ` version _ descriptor : Version descriptor . Default is null . : param bool include _ content : Set to true to include item content when requesting json . Default is false . : param bool resolve _ lfs : Set to true to resolve Git LFS pointer files to return actual content from Git LFS . Default is false . : rtype : object"""
route_values = { } if project is not None : route_values [ 'project' ] = self . _serialize . url ( 'project' , project , 'str' ) if repository_id is not None : route_values [ 'repositoryId' ] = self . _serialize . url ( 'repository_id' , repository_id , 'str' ) query_parameters = { } if path is not None : query_parameters [ 'path' ] = self . _serialize . query ( 'path' , path , 'str' ) if scope_path is not None : query_parameters [ 'scopePath' ] = self . _serialize . query ( 'scope_path' , scope_path , 'str' ) if recursion_level is not None : query_parameters [ 'recursionLevel' ] = self . _serialize . query ( 'recursion_level' , recursion_level , 'str' ) if include_content_metadata is not None : query_parameters [ 'includeContentMetadata' ] = self . _serialize . query ( 'include_content_metadata' , include_content_metadata , 'bool' ) if latest_processed_change is not None : query_parameters [ 'latestProcessedChange' ] = self . _serialize . query ( 'latest_processed_change' , latest_processed_change , 'bool' ) if download is not None : query_parameters [ 'download' ] = self . _serialize . query ( 'download' , download , 'bool' ) if version_descriptor is not None : if version_descriptor . version_type is not None : query_parameters [ 'versionDescriptor.versionType' ] = version_descriptor . version_type if version_descriptor . version is not None : query_parameters [ 'versionDescriptor.version' ] = version_descriptor . version if version_descriptor . version_options is not None : query_parameters [ 'versionDescriptor.versionOptions' ] = version_descriptor . version_options if include_content is not None : query_parameters [ 'includeContent' ] = self . _serialize . query ( 'include_content' , include_content , 'bool' ) if resolve_lfs is not None : query_parameters [ 'resolveLfs' ] = self . _serialize . query ( 'resolve_lfs' , resolve_lfs , 'bool' ) response = self . _send ( http_method = 'GET' , location_id = 'fb93c0db-47ed-4a31-8c20-47552878fb44' , version = '5.0' , route_values = route_values , query_parameters = query_parameters , accept_media_type = 'application/octet-stream' ) if "callback" in kwargs : callback = kwargs [ "callback" ] else : callback = None return self . _client . stream_download ( response , callback = callback )
def _on_access_token ( self , future , response ) : """Invoked as a callback when StackExchange has returned a response to the access token request . : param method future : The callback method to pass along : param tornado . httpclient . HTTPResponse response : The HTTP response"""
LOGGER . info ( response . body ) content = escape . json_decode ( response . body ) if 'error' in content : LOGGER . error ( 'Error fetching access token: %s' , content [ 'error' ] ) future . set_exception ( auth . AuthError ( 'StackExchange auth error: %s' % str ( content [ 'error' ] ) ) ) return callback = self . async_callback ( self . _on_stackexchange_user , future , content [ 'access_token' ] ) self . stackexchange_request ( 'me' , callback , content [ 'access_token' ] )
def download ( self , file : Optional [ IO ] = None , rewind : bool = True , duration_timeout : Optional [ float ] = None ) -> Response : '''Read the response content into file . Args : file : A file object or asyncio stream . rewind : Seek the given file back to its original offset after reading is finished . duration _ timeout : Maximum time in seconds of which the entire file must be read . Returns : A Response populated with the final data connection reply . Be sure to call : meth : ` start ` first . Coroutine .'''
if self . _session_state != SessionState . file_request_sent : raise RuntimeError ( 'File request not sent' ) if rewind and file and hasattr ( file , 'seek' ) : original_offset = file . tell ( ) else : original_offset = None if not hasattr ( file , 'drain' ) : self . _response . body = file if not isinstance ( file , Body ) : self . _response . body = Body ( file ) read_future = self . _commander . read_stream ( file , self . _data_stream ) try : reply = yield from asyncio . wait_for ( read_future , timeout = duration_timeout ) except asyncio . TimeoutError as error : raise DurationTimeout ( 'Did not finish reading after {} seconds.' . format ( duration_timeout ) ) from error self . _response . reply = reply if original_offset is not None : file . seek ( original_offset ) self . event_dispatcher . notify ( self . Event . end_transfer , self . _response ) self . _session_state = SessionState . response_received return self . _response
def ik_robot_eef_joint_cartesian_pose ( self ) : """Returns the current cartesian pose of the last joint of the ik robot with respect to the base frame as a ( pos , orn ) tuple where orn is a x - y - z - w quaternion"""
eef_pos_in_world = np . array ( p . getLinkState ( self . ik_robot , 6 ) [ 0 ] ) eef_orn_in_world = np . array ( p . getLinkState ( self . ik_robot , 6 ) [ 1 ] ) eef_pose_in_world = T . pose2mat ( ( eef_pos_in_world , eef_orn_in_world ) ) base_pos_in_world = np . array ( p . getBasePositionAndOrientation ( self . ik_robot ) [ 0 ] ) base_orn_in_world = np . array ( p . getBasePositionAndOrientation ( self . ik_robot ) [ 1 ] ) base_pose_in_world = T . pose2mat ( ( base_pos_in_world , base_orn_in_world ) ) world_pose_in_base = T . pose_inv ( base_pose_in_world ) eef_pose_in_base = T . pose_in_A_to_pose_in_B ( pose_A = eef_pose_in_world , pose_A_in_B = world_pose_in_base ) return T . mat2pose ( eef_pose_in_base )
def pipe ( self , command , timeout = None , cwd = None ) : """Runs the current command and passes its output to the next given process ."""
if not timeout : timeout = self . timeout if not self . was_run : self . run ( block = False , cwd = cwd ) data = self . out if timeout : c = Command ( command , timeout ) else : c = Command ( command ) c . run ( block = False , cwd = cwd ) if data : c . send ( data ) c . block ( ) return c
def list_unit_states ( self , machine_id = None , unit_name = None ) : """Return the current UnitState for the fleet cluster Args : machine _ id ( str ) : filter all UnitState objects to those originating from a specific machine unit _ name ( str ) : filter all UnitState objects to those related to a specific unit Yields : UnitState : The next UnitState in the cluster Raises : fleet . v1 . errors . APIError : Fleet returned a response code > = 400"""
for page in self . _request ( 'UnitState.List' , machineID = machine_id , unitName = unit_name ) : for state in page . get ( 'states' , [ ] ) : yield UnitState ( data = state )
def assets ( lon = None , lat = None , begin = None , end = None ) : '''HTTP REQUEST GET https : / / api . nasa . gov / planetary / earth / assets QUERY PARAMETERS ParameterTypeDefaultDescription latfloatn / aLatitude lonfloatn / aLongitude beginYYYY - MM - DDn / abeginning of date range end YYYY - MM - DDtodayend of date range api _ keystringDEMO _ KEYapi . nasa . gov key for expanded usage EXAMPLE QUERY https : / / api . nasa . gov / planetary / earth / assets ? lon = 100.75 & lat = 1.5 & begin = 2014-02-01 & api _ key = DEMO _ KEY'''
base_url = "https://api.nasa.gov/planetary/earth/assets?" if not lon or not lat : raise ValueError ( "assets endpoint expects lat and lon, type has to be float. Call the method with keyword args. Ex : lon=100.75, lat=1.5" ) else : try : validate_float ( lon , lat ) # Floats are entered / displayed as decimal numbers , but your computer # ( in fact , your standard C library ) stores them as binary . # You get some side effects from this transition : # > > > print len ( repr ( 0.1 ) ) # 19 # > > > print repr ( 0.1) # 0.1000001 # Thus using decimal to str transition is more reliant lon = decimal . Decimal ( lon ) lat = decimal . Decimal ( lat ) base_url += "lon=" + str ( lon ) + "&" + "lat=" + str ( lat ) + "&" except : raise ValueError ( "assets endpoint expects lat and lon, type has to be float. Call the method with keyword args. Ex : lon=100.75, lat=1.5" ) if not begin : raise ValueError ( "Begin date is missing, which is mandatory. Format : YYYY-MM-DD" ) else : try : vali_date ( begin ) base_url += "begin=" + begin + "&" except : raise ValueError ( "Incorrect date format, should be YYYY-MM-DD" ) if end : try : vali_date ( end ) base_url += "end=" + end + "&" except : raise ValueError ( "Incorrect date format, should be YYYY-MM-DD" ) req_url = base_url + "api_key=" + nasa_api_key ( ) return dispatch_http_get ( req_url )
def _write_lat_lon ( data_out_nc , rivid_lat_lon_z_file ) : """Add latitude and longitude each netCDF feature Lookup table is a CSV file with rivid , Lat , Lon , columns . Columns must be in that order and these must be the first three columns ."""
# only add if user adds if rivid_lat_lon_z_file and os . path . exists ( rivid_lat_lon_z_file ) : # get list of COMIDS lookup_table = np . loadtxt ( rivid_lat_lon_z_file , delimiter = "," , usecols = ( 0 , 1 , 2 ) , skiprows = 1 , dtype = { 'names' : ( 'rivid' , 'lat' , 'lon' ) , 'formats' : ( 'i8' , 'f8' , 'f8' ) , } , ) # Get relevant arrays while we update them nc_rivids = data_out_nc . variables [ 'rivid' ] [ : ] lats = data_out_nc . variables [ 'lat' ] [ : ] lons = data_out_nc . variables [ 'lon' ] [ : ] lat_min = None lat_max = None lon_min = None lon_max = None # Process each row in the lookup table for nc_index , nc_rivid in enumerate ( nc_rivids ) : try : lookup_index = np . where ( lookup_table [ 'rivid' ] == nc_rivid ) [ 0 ] [ 0 ] except Exception : raise Exception ( 'rivid {0} misssing in ' 'comid_lat_lon_z file' . format ( nc_rivid ) ) lat = float ( lookup_table [ 'lat' ] [ lookup_index ] ) lats [ nc_index ] = lat if lat_min is None or lat < lat_min : lat_min = lat if lat_max is None or lat > lat_max : lat_max = lat lon = float ( lookup_table [ 'lon' ] [ lookup_index ] ) lons [ nc_index ] = lon if lon_min is None or lon < lon_min : lon_min = lon if lon_max is None or lon > lon_max : lon_max = lon # Overwrite netCDF variable values data_out_nc . variables [ 'lat' ] [ : ] = lats data_out_nc . variables [ 'lon' ] [ : ] = lons # Update metadata if lat_min is not None : data_out_nc . geospatial_lat_min = lat_min if lat_max is not None : data_out_nc . geospatial_lat_max = lat_max if lon_min is not None : data_out_nc . geospatial_lon_min = lon_min if lon_max is not None : data_out_nc . geospatial_lon_max = lon_max else : print ( 'No comid_lat_lon_z file. Not adding values ...' )
def _head ( self , client_kwargs ) : """Returns object HTTP header . Args : client _ kwargs ( dict ) : Client arguments . Returns : dict : HTTP header ."""
with _handle_client_exception ( ) : # Object if 'obj' in client_kwargs : return self . client . head_object ( ** client_kwargs ) # Container return self . client . head_container ( ** client_kwargs )
def convert_unicode_to_str ( data ) : """Py2 , always translate to utf8 from unicode Py3 , always translate to unicode : param data : : return :"""
if six . PY2 and isinstance ( data , six . text_type ) : return data . encode ( 'utf8' ) elif six . PY3 and isinstance ( data , six . binary_type ) : return data . decode ( 'utf8' ) elif isinstance ( data , collections . Mapping ) : return dict ( ( Util . convert_unicode_to_str ( k ) , Util . convert_unicode_to_str ( v ) ) for k , v in six . iteritems ( data ) ) elif isinstance ( data , collections . Iterable ) and not isinstance ( data , ( six . binary_type , six . string_types ) ) : return type ( data ) ( map ( Util . convert_unicode_to_str , data ) ) return data
def update ( self , ** kwargs ) : u"""Updating or creation of new simple nodes . Each dict key is used as a tagname and value as text ."""
for key , value in kwargs . items ( ) : helper = helpers . CAST_DICT . get ( type ( value ) , str ) tag = self . _get_aliases ( ) . get ( key , key ) elements = list ( self . _xml . iterchildren ( tag = tag ) ) if elements : for element in elements : element . text = helper ( value ) else : element = etree . Element ( key ) element . text = helper ( value ) self . _xml . append ( element ) self . _aliases = None
def new_ipy ( s = '' ) : """Create a new IPython kernel ( optionally with extra arguments ) XXX : Allow passing of profile information here Examples new _ ipy ( )"""
# Modified by Bo Peng . # This package has been deprecated . Need to import from ipykernel from jupyter_client . manager import KernelManager km = KernelManager ( ) km . start_kernel ( ) return km_from_string ( km . connection_file )
def insertData ( self , offset : int , string : str ) -> None : """Insert ` ` string ` ` at offset on this node ."""
self . _insert_data ( offset , string )
def del_permission ( self , name ) : """Deletes a permission from the backend , model permission : param name : name of the permission : ' can _ add ' , ' can _ edit ' etc . . ."""
perm = self . find_permission ( name ) if perm : try : self . get_session . delete ( perm ) self . get_session . commit ( ) except Exception as e : log . error ( c . LOGMSG_ERR_SEC_DEL_PERMISSION . format ( str ( e ) ) ) self . get_session . rollback ( )
def import_prices ( self , prices : List [ PriceModel ] ) : """Import prices ( from csv )"""
result = { } for price in prices : result [ price . symbol ] = self . import_price ( price ) return result
def get_all_subdomains ( self , offset = None , count = None , min_sequence = None , cur = None ) : """Get and all subdomain names , optionally over a range"""
get_cmd = 'SELECT DISTINCT fully_qualified_subdomain FROM {}' . format ( self . subdomain_table ) args = ( ) if min_sequence is not None : get_cmd += ' WHERE sequence >= ?' args += ( min_sequence , ) if count is not None : get_cmd += ' LIMIT ?' args += ( count , ) if offset is not None : get_cmd += ' OFFSET ?' args += ( offset , ) get_cmd += ';' cursor = None if cur is None : cursor = self . conn . cursor ( ) else : cursor = cur rows = db_query_execute ( cursor , get_cmd , args ) subdomains = [ ] for row in rows : subdomains . append ( row [ 'fully_qualified_subdomain' ] ) return subdomains
def insert ( self , i , x ) : """Insert an item ( x ) at a given position ( i ) ."""
if i == len ( self ) : # end of list or empty list : append self . append ( x ) elif len ( self . matches ) > i : # create a new xml node at the requested position insert_index = self . matches [ i ] . getparent ( ) . index ( self . matches [ i ] ) _create_xml_node ( self . xast , self . node , self . context , insert_index ) # then use default set logic self [ i ] = x else : raise IndexError ( "Can't insert '%s' at index %d - list length is only %d" % ( x , i , len ( self ) ) )
def from_file ( xmu_dat_file = "xmu.dat" , feff_inp_file = "feff.inp" ) : """Get Xmu from file . Args : xmu _ dat _ file ( str ) : filename and path for xmu . dat feff _ inp _ file ( str ) : filename and path of feff . inp input file Returns : Xmu object"""
data = np . loadtxt ( xmu_dat_file ) header = Header . from_file ( feff_inp_file ) parameters = Tags . from_file ( feff_inp_file ) pots = Potential . pot_string_from_file ( feff_inp_file ) # site index ( Note : in feff it starts from 1) if "RECIPROCAL" in parameters : absorbing_atom = parameters [ "TARGET" ] # species symbol else : absorbing_atom = pots . splitlines ( ) [ 3 ] . split ( ) [ 2 ] return Xmu ( header , parameters , absorbing_atom , data )
def plot_bhist ( samples , file_type , ** plot_args ) : """Create line graph plot of histogram data for BBMap ' bhist ' output . The ' samples ' parameter could be from the bbmap mod _ data dictionary : samples = bbmap . MultiqcModule . mod _ data [ file _ type ]"""
all_x = set ( ) for item in sorted ( chain ( * [ samples [ sample ] [ 'data' ] . items ( ) for sample in samples ] ) ) : all_x . add ( item [ 0 ] ) columns_to_plot = { 'GC' : { 1 : 'C' , 2 : 'G' , } , 'AT' : { 0 : 'A' , 3 : 'T' , } , 'N' : { 4 : 'N' } , } nucleotide_data = [ ] for column_type in columns_to_plot : nucleotide_data . append ( { sample + '.' + column_name : { x : samples [ sample ] [ 'data' ] [ x ] [ column ] * 100 if x in samples [ sample ] [ 'data' ] else 0 for x in all_x } for sample in samples for column , column_name in columns_to_plot [ column_type ] . items ( ) } ) plot_params = { 'id' : 'bbmap-' + file_type + '_plot' , 'title' : 'BBTools: ' + plot_args [ 'plot_title' ] , 'xlab' : 'Read position' , 'ymin' : 0 , 'ymax' : 100 , 'data_labels' : [ { 'name' : 'Percentage of G+C bases' } , { 'name' : 'Percentage of A+T bases' } , { 'name' : 'Percentage of N bases' } , ] } plot_params . update ( plot_args [ 'plot_params' ] ) plot = linegraph . plot ( nucleotide_data , plot_params ) return plot
def createEditor ( self , parent , option , index ) : """Returns the widget used to edit the item specified by index for editing . The parent widget and style option are used to control how the editor widget appears . Args : parent ( QWidget ) : parent widget . option ( QStyleOptionViewItem ) : controls how editor widget appears . index ( QModelIndex ) : model data index ."""
editor = QtGui . QLineEdit ( parent ) return editor
def integer_ceil ( a , b ) : '''Return the ceil integer of a div b .'''
quanta , mod = divmod ( a , b ) if mod : quanta += 1 return quanta
def _recv_get_response ( self , method_frame ) : '''Handle either get _ ok or get _ empty . This is a hack because the synchronous callback stack is expecting one method to satisfy the expectation . To keep that loop as tight as possible , work within those constraints . Use of get is not recommended anyway .'''
if method_frame . method_id == 71 : return self . _recv_get_ok ( method_frame ) elif method_frame . method_id == 72 : return self . _recv_get_empty ( method_frame )
def hash64 ( key , seed ) : """Wrapper around mmh3 . hash64 to get us single 64 - bit value . This also does the extra work of ensuring that we always treat the returned values as big - endian unsigned long , like smhasher used to do ."""
hash_val = mmh3 . hash64 ( key , seed ) [ 0 ] return struct . unpack ( '>Q' , struct . pack ( 'q' , hash_val ) ) [ 0 ]
def log_prob ( self , hidden ) : r"""Computes log probabilities for all : math : ` n \ _ classes ` From : https : / / github . com / pytorch / pytorch / blob / master / torch / nn / modules / adaptive . py Args : hidden ( Tensor ) : a minibatch of examples Returns : log - probabilities of for each class : math : ` c ` in range : math : ` 0 < = c < = n \ _ classes ` , where : math : ` n \ _ classes ` is a parameter passed to ` ` AdaptiveLogSoftmaxWithLoss ` ` constructor . Shape : - Input : : math : ` ( N , in \ _ features ) ` - Output : : math : ` ( N , n \ _ classes ) `"""
if self . n_clusters == 0 : logit = self . _compute_logit ( hidden , self . out_layers [ 0 ] . weight , self . out_layers [ 0 ] . bias , self . out_projs [ 0 ] ) return F . log_softmax ( logit , dim = - 1 ) else : # construct weights and biases weights , biases = [ ] , [ ] for i in range ( len ( self . cutoffs ) ) : if self . div_val == 1 : l_idx , r_idx = self . cutoff_ends [ i ] , self . cutoff_ends [ i + 1 ] weight_i = self . out_layers [ 0 ] . weight [ l_idx : r_idx ] bias_i = self . out_layers [ 0 ] . bias [ l_idx : r_idx ] else : weight_i = self . out_layers [ i ] . weight bias_i = self . out_layers [ i ] . bias if i == 0 : weight_i = torch . cat ( [ weight_i , self . cluster_weight ] , dim = 0 ) bias_i = torch . cat ( [ bias_i , self . cluster_bias ] , dim = 0 ) weights . append ( weight_i ) biases . append ( bias_i ) head_weight , head_bias , head_proj = weights [ 0 ] , biases [ 0 ] , self . out_projs [ 0 ] head_logit = self . _compute_logit ( hidden , head_weight , head_bias , head_proj ) out = hidden . new_empty ( ( head_logit . size ( 0 ) , self . n_token ) ) head_logprob = F . log_softmax ( head_logit , dim = 1 ) cutoff_values = [ 0 ] + self . cutoffs for i in range ( len ( cutoff_values ) - 1 ) : start_idx , stop_idx = cutoff_values [ i ] , cutoff_values [ i + 1 ] if i == 0 : out [ : , : self . cutoffs [ 0 ] ] = head_logprob [ : , : self . cutoffs [ 0 ] ] else : weight_i , bias_i , proj_i = weights [ i ] , biases [ i ] , self . out_projs [ i ] tail_logit_i = self . _compute_logit ( hidden , weight_i , bias_i , proj_i ) tail_logprob_i = F . log_softmax ( tail_logit_i , dim = 1 ) logprob_i = head_logprob [ : , - i ] + tail_logprob_i out [ : , start_idx , stop_idx ] = logprob_i return out
def assertFileEncodingNotEqual ( self , filename , encoding , msg = None ) : '''Fail if ` ` filename ` ` is encoded with the given ` ` encoding ` ` as determined by the ' ! = ' operator . Parameters filename : str , bytes , file - like encoding : str , bytes msg : str If not provided , the : mod : ` marbles . mixins ` or : mod : ` unittest ` standard message will be used . Raises TypeError If ` ` filename ` ` is not a str or bytes object and is not file - like .'''
fencoding = self . _get_file_encoding ( filename ) fname = self . _get_file_name ( filename ) standardMsg = '%s is %s encoded' % ( fname , encoding ) self . assertNotEqual ( fencoding . lower ( ) , encoding . lower ( ) , self . _formatMessage ( msg , standardMsg ) )
def RQ_sigma ( self , sigma ) : """Given a policy ` sigma ` , return the reward vector ` R _ sigma ` and the transition probability matrix ` Q _ sigma ` . Parameters sigma : array _ like ( int , ndim = 1) Policy vector , of length n . Returns R _ sigma : ndarray ( float , ndim = 1) Reward vector for ` sigma ` , of length n . Q _ sigma : ndarray ( float , ndim = 2) Transition probability matrix for ` sigma ` , of shape ( n , n ) ."""
if self . _sa_pair : sigma = np . asarray ( sigma ) sigma_indices = np . empty ( self . num_states , dtype = int ) _find_indices ( self . a_indices , self . a_indptr , sigma , out = sigma_indices ) R_sigma , Q_sigma = self . R [ sigma_indices ] , self . Q [ sigma_indices ] else : R_sigma = self . R [ np . arange ( self . num_states ) , sigma ] Q_sigma = self . Q [ np . arange ( self . num_states ) , sigma ] return R_sigma , Q_sigma
def set_theme ( self , theme ) : """Pick a palette from the list of supported THEMES . : param theme : The name of the theme to set ."""
if theme in THEMES : self . palette = THEMES [ theme ] if self . _scroll_bar : # TODO : fix protected access . self . _scroll_bar . _palette = self . palette
def _update_statechanges ( storage : SQLiteStorage ) : """Update each ContractReceiveChannelNew ' s channel _ state member by setting the ` mediation _ fee ` that was added to the NettingChannelState"""
batch_size = 50 batch_query = storage . batch_query_state_changes ( batch_size = batch_size , filters = [ ( '_type' , 'raiden.transfer.state_change.ContractReceiveChannelNew' ) , ] , ) for state_changes_batch in batch_query : updated_state_changes = list ( ) for state_change in state_changes_batch : data = json . loads ( state_change . data ) msg = 'v20 ContractReceiveChannelNew channel state should not contain medation_fee' assert 'mediation_fee' not in data [ 'channel_state' ] , msg data [ 'channel_state' ] [ 'mediation_fee' ] = '0' updated_state_changes . append ( ( json . dumps ( data ) , state_change . state_change_identifier , ) ) storage . update_state_changes ( updated_state_changes ) batch_query = storage . batch_query_state_changes ( batch_size = batch_size , filters = [ ( '_type' , 'raiden.transfer.mediated_transfer.state_change.ActionInitInitiator' ) , ] , ) for state_changes_batch in batch_query : updated_state_changes = list ( ) for state_change in state_changes_batch : data = json . loads ( state_change . data ) msg = 'v20 ActionInitInitiator transfer should not contain allocated_fee' assert 'allocated_fee' not in data [ 'transfer' ] , msg data [ 'transfer' ] [ 'allocated_fee' ] = '0' updated_state_changes . append ( ( json . dumps ( data ) , state_change . state_change_identifier , ) ) storage . update_state_changes ( updated_state_changes )
def _phrase_doc_stream ( paths , n , tokenizer = word_tokenize ) : """Generator to feed sentences to the phrase model ."""
i = 0 p = Progress ( ) for path in paths : with open ( path , 'r' ) as f : for line in f : i += 1 p . print_progress ( i / n ) for sent in sent_tokenize ( line . lower ( ) ) : tokens = tokenizer ( sent ) yield tokens
def enter_diff_mode ( self , context_model = None ) : """Enter diff mode . Args : context _ model ( ` ContextModel ` ) : Context to diff against . If None , a copy of the current context is used ."""
assert not self . diff_mode self . diff_mode = True if context_model is None : self . diff_from_source = True self . diff_context_model = self . context_model . copy ( ) else : self . diff_from_source = False self . diff_context_model = context_model self . clear ( ) self . setColumnCount ( 5 ) self . refresh ( )
def write ( write_entry : FILE_WRITE_ENTRY ) : """Writes the contents of the specified file entry to its destination path ."""
output_path = environ . paths . clean ( write_entry . path ) make_output_directory ( output_path ) writer . write_file ( output_path , write_entry . contents )
def create ( cls , title , conn = None , google_user = None , google_password = None ) : """Create a new spreadsheet with the given ` ` title ` ` ."""
conn = Connection . connect ( conn = conn , google_user = google_user , google_password = google_password ) res = Resource ( type = 'spreadsheet' , title = title ) res = conn . docs_client . CreateResource ( res ) id = res . id . text . rsplit ( '%3A' , 1 ) [ - 1 ] return cls ( id , conn , resource = res )
def use_comparative_sequence_rule_enabler_rule_view ( self ) : """Pass through to provider SequenceRuleEnablerRuleLookupSession . use _ comparative _ sequence _ rule _ enabler _ rule _ view"""
self . _object_views [ 'sequence_rule_enabler_rule' ] = COMPARATIVE # self . _ get _ provider _ session ( ' sequence _ rule _ enabler _ rule _ lookup _ session ' ) # To make sure the session is tracked for session in self . _get_provider_sessions ( ) : try : session . use_comparative_sequence_rule_enabler_rule_view ( ) except AttributeError : pass
def count_distinct_characters ( string : str ) -> int : """Given a string , find out how many distinct characters ( regardless of case ) it consists of . : param string : String to be analyzed : type string : str : return : Count of distinct characters : rtype : int > > > count _ distinct _ characters ( ' xyzXYZ ' ) > > > count _ distinct _ characters ( ' Jerry ' )"""
return len ( set ( string . lower ( ) ) ) # convert string to lower case , create a set ( which eliminates duplicates ) , and then count the elements in the set
def nrmse_range ( simulated_array , observed_array , replace_nan = None , replace_inf = None , remove_neg = False , remove_zero = False ) : """Compute the range normalized root mean square error between the simulated and observed data . . . image : : / pictures / NRMSE _ Range . png * * Range : * * 0 ≤ NRMSE < inf . * * Notes : * * This metric is the RMSE normalized by the range of the observed time series ( x ) . Normalizing allows comparison between data sets with different scales . The NRMSErange is the most sensitive to outliers of the three normalized rmse metrics . Parameters simulated _ array : one dimensional ndarray An array of simulated data from the time series . observed _ array : one dimensional ndarray An array of observed data from the time series . replace _ nan : float , optional If given , indicates which value to replace NaN values with in the two arrays . If None , when a NaN value is found at the i - th position in the observed OR simulated array , the i - th value of the observed and simulated array are removed before the computation . replace _ inf : float , optional If given , indicates which value to replace Inf values with in the two arrays . If None , when an inf value is found at the i - th position in the observed OR simulated array , the i - th value of the observed and simulated array are removed before the computation . remove _ neg : boolean , optional If True , when a negative value is found at the i - th position in the observed OR simulated array , the i - th value of the observed AND simulated array are removed before the computation . remove _ zero : boolean , optional If true , when a zero value is found at the i - th position in the observed OR simulated array , the i - th value of the observed AND simulated array are removed before the computation . Returns float The range normalized root mean square error value . Examples > > > import HydroErr as he > > > import numpy as np > > > sim = np . array ( [ 5 , 7 , 9 , 2 , 4.5 , 6.7 ] ) > > > obs = np . array ( [ 4.7 , 6 , 10 , 2.5 , 4 , 7 ] ) > > > he . nrmse _ range ( sim , obs ) 0.0891108340256152 References - Pontius , R . G . , Thontteh , O . , Chen , H . , 2008 . Components of information for multiple resolution comparison between maps that share a real variable . Environmental and Ecological Statistics 15(2 ) 111-142."""
# Checking and cleaning the data simulated_array , observed_array = treat_values ( simulated_array , observed_array , replace_nan = replace_nan , replace_inf = replace_inf , remove_neg = remove_neg , remove_zero = remove_zero ) rmse_value = np . sqrt ( np . mean ( ( simulated_array - observed_array ) ** 2 ) ) obs_max = np . max ( observed_array ) obs_min = np . min ( observed_array ) return rmse_value / ( obs_max - obs_min )
def setup_logging ( verbose = 0 , colors = False , name = None ) : """Configure console logging . Info and below go to stdout , others go to stderr . : param int verbose : Verbosity level . > 0 print debug statements . > 1 passed to sphinx - build . : param bool colors : Print color text in non - verbose mode . : param str name : Which logger name to set handlers to . Used for testing ."""
root_logger = logging . getLogger ( name ) root_logger . setLevel ( logging . DEBUG if verbose > 0 else logging . INFO ) formatter = ColorFormatter ( verbose > 0 , colors ) if colors : colorclass . Windows . enable ( ) handler_stdout = logging . StreamHandler ( sys . stdout ) handler_stdout . setFormatter ( formatter ) handler_stdout . setLevel ( logging . DEBUG ) handler_stdout . addFilter ( type ( '' , ( logging . Filter , ) , { 'filter' : staticmethod ( lambda r : r . levelno <= logging . INFO ) } ) ) root_logger . addHandler ( handler_stdout ) handler_stderr = logging . StreamHandler ( sys . stderr ) handler_stderr . setFormatter ( formatter ) handler_stderr . setLevel ( logging . WARNING ) root_logger . addHandler ( handler_stderr )
def live ( self , kill_port = False , check_url = None ) : """Starts a live server in a separate process and checks whether it is running . : param bool kill _ port : If ` ` True ` ` , processes running on the same port as ` ` self . port ` ` will be killed . : param str check _ url : URL where to check whether the server is running . Default is ` ` " http : / / { self . host } : { self . port } " ` ` ."""
pid = port_in_use ( self . port , kill_port ) if pid : raise LiveAndLetDieError ( 'Port {0} is already being used by process {1}!' . format ( self . port , pid ) ) host = str ( self . host ) if re . match ( _VALID_HOST_PATTERN , host ) : with open ( os . devnull , "w" ) as devnull : if self . suppress_output : self . process = subprocess . Popen ( self . create_command ( ) , stderr = devnull , stdout = devnull , preexec_fn = os . setsid ) else : self . process = subprocess . Popen ( self . create_command ( ) , preexec_fn = os . setsid ) _log ( self . logging , 'Starting process PID: {0}' . format ( self . process . pid ) ) duration = self . check ( check_url ) _log ( self . logging , 'Live server started in {0} seconds. PID: {1}' . format ( duration , self . process . pid ) ) return self . process else : raise LiveAndLetDieError ( '{0} is not a valid host!' . format ( host ) )
def check_solution ( self ) -> bool : """Checks if the the found solution correctly reproduces the input . : return : True if solution correctly reproduces input bitstring map : rtype : Bool"""
if self . solution is None : raise AssertionError ( "You need to `run` this algorithm first" ) assert_map = create_bv_bitmap ( * self . solution ) return all ( [ assert_map [ k ] == v for k , v in self . input_bitmap . items ( ) ] )
def _remove_overlaps ( in_file , out_dir , data ) : """Remove regions that overlap with next region , these result in issues with PureCN ."""
out_file = os . path . join ( out_dir , "%s-nooverlaps%s" % utils . splitext_plus ( os . path . basename ( in_file ) ) ) if not utils . file_uptodate ( out_file , in_file ) : with file_transaction ( data , out_file ) as tx_out_file : with open ( in_file ) as in_handle : with open ( tx_out_file , "w" ) as out_handle : prev_line = None for line in in_handle : if prev_line : pchrom , pstart , pend = prev_line . split ( "\t" , 4 ) [ : 3 ] cchrom , cstart , cend = line . split ( "\t" , 4 ) [ : 3 ] # Skip if chromosomes match and end overlaps start if pchrom == cchrom and int ( pend ) > int ( cstart ) : pass else : out_handle . write ( prev_line ) prev_line = line out_handle . write ( prev_line ) return out_file
def verify_signature ( self , signature_filename , data_filename , keystore = None ) : """Verify a signature for a file . : param signature _ filename : The pathname to the file containing the signature . : param data _ filename : The pathname to the file containing the signed data . : param keystore : The path to a directory which contains the keys used in verification . If not specified , the instance ' s ` ` gpg _ home ` ` attribute is used instead . : return : True if the signature was verified , else False ."""
if not self . gpg : raise DistlibException ( 'verification unavailable because gpg ' 'unavailable' ) cmd = self . get_verify_command ( signature_filename , data_filename , keystore ) rc , stdout , stderr = self . run_command ( cmd ) if rc not in ( 0 , 1 ) : raise DistlibException ( 'verify command failed with error ' 'code %s' % rc ) return rc == 0
def call ( self , name , options = None , o = None ) : """Call another command . : param name : The command name : type name : str : param options : The options : type options : list or None : param o : The output : type o : cleo . outputs . output . Output"""
if options is None : options = [ ] command = self . get_application ( ) . find ( name ) options = [ ( 'command' , command . get_name ( ) ) ] + options return command . run ( ListInput ( options ) , o )
def getControllerStateWithPose ( self , eOrigin , unControllerDeviceIndex , unControllerStateSize = sizeof ( VRControllerState_t ) ) : """fills the supplied struct with the current state of the controller and the provided pose with the pose of the controller when the controller state was updated most recently . Use this form if you need a precise controller pose as input to your application when the user presses or releases a button . This function is deprecated in favor of the new IVRInput system ."""
fn = self . function_table . getControllerStateWithPose pControllerState = VRControllerState_t ( ) pTrackedDevicePose = TrackedDevicePose_t ( ) result = fn ( eOrigin , unControllerDeviceIndex , byref ( pControllerState ) , unControllerStateSize , byref ( pTrackedDevicePose ) ) return result , pControllerState , pTrackedDevicePose
def _division ( divisor , dividend , remainder , base ) : """Get the quotient and remainder : param int divisor : the divisor : param dividend : the divident : type dividend : sequence of int : param int remainder : initial remainder : param int base : the base : returns : quotient and remainder : rtype : tuple of ( list of int ) * int Complexity : O ( log _ { divisor } ( quotient ) )"""
quotient = [ ] for value in dividend : remainder = remainder * base + value ( quot , rem ) = divmod ( remainder , divisor ) quotient . append ( quot ) if quot > 0 : remainder = rem return ( quotient , remainder )
def get_bibtex ( identifier ) : """Try to fetch BibTeX from a found identifier . . . note : : Calls the functions in the respective identifiers module . : param identifier : a tuple ( type , identifier ) with a valid type . : returns : A BibTeX string or ` ` None ` ` if an error occurred . # TODO : Should return a BiBTeX object ?"""
identifier_type , identifier_id = identifier if identifier_type not in __valid_identifiers__ : return None # Dynamically call the ` ` get _ bibtex ` ` method from the associated module . module = sys . modules . get ( "libbmc.%s" % ( identifier_type , ) , None ) if module is None : return None return getattr ( module , "get_bibtex" ) ( identifier_id )
def messages ( self ) : '''A generator yielding the : class : ` MacIndexMessage ` structures in this index file .'''
# The file contains the fixed - size file header followed by # fixed - size message structures , followed by minimal message # information ( subject , from , to ) . Start after the file # header and then simply return the message structures in # sequence until we have returned the number of messages in # this folder , ignoring the minimal message information at the # end of the file . offset = self . header_length # initial offset # how much of the data in this file we expect to use , based on # the number of messages in this folder and the index message block size maxlen = self . header_length + self . total_messages * MacIndexMessage . LENGTH while offset < maxlen : yield MacIndexMessage ( mm = self . mmap , offset = offset ) offset += MacIndexMessage . LENGTH
def full_installation ( self , location = None ) : """Return the full details of the installation ."""
url = ( "https://tccna.honeywell.com/WebAPI/emea/api/v1/location" "/%s/installationInfo?includeTemperatureControlSystems=True" % self . _get_location ( location ) ) response = requests . get ( url , headers = self . _headers ( ) ) response . raise_for_status ( ) return response . json ( )
def _lw_decode ( self , msg ) : """LW : temperatures from all keypads and zones 1-16."""
keypad_temps = [ ] zone_temps = [ ] for i in range ( 16 ) : keypad_temps . append ( int ( msg [ 4 + 3 * i : 7 + 3 * i ] ) - 40 ) zone_temps . append ( int ( msg [ 52 + 3 * i : 55 + 3 * i ] ) - 60 ) return { 'keypad_temps' : keypad_temps , 'zone_temps' : zone_temps }
def _to_dict ( self ) : """Return a json dictionary representing this model ."""
_dict = { } if hasattr ( self , 'credential_type' ) and self . credential_type is not None : _dict [ 'credential_type' ] = self . credential_type if hasattr ( self , 'client_id' ) and self . client_id is not None : _dict [ 'client_id' ] = self . client_id if hasattr ( self , 'enterprise_id' ) and self . enterprise_id is not None : _dict [ 'enterprise_id' ] = self . enterprise_id if hasattr ( self , 'url' ) and self . url is not None : _dict [ 'url' ] = self . url if hasattr ( self , 'username' ) and self . username is not None : _dict [ 'username' ] = self . username if hasattr ( self , 'organization_url' ) and self . organization_url is not None : _dict [ 'organization_url' ] = self . organization_url if hasattr ( self , 'site_collection_path' ) and self . site_collection_path is not None : _dict [ 'site_collection.path' ] = self . site_collection_path if hasattr ( self , 'client_secret' ) and self . client_secret is not None : _dict [ 'client_secret' ] = self . client_secret if hasattr ( self , 'public_key_id' ) and self . public_key_id is not None : _dict [ 'public_key_id' ] = self . public_key_id if hasattr ( self , 'private_key' ) and self . private_key is not None : _dict [ 'private_key' ] = self . private_key if hasattr ( self , 'passphrase' ) and self . passphrase is not None : _dict [ 'passphrase' ] = self . passphrase if hasattr ( self , 'password' ) and self . password is not None : _dict [ 'password' ] = self . password if hasattr ( self , 'gateway_id' ) and self . gateway_id is not None : _dict [ 'gateway_id' ] = self . gateway_id if hasattr ( self , 'source_version' ) and self . source_version is not None : _dict [ 'source_version' ] = self . source_version if hasattr ( self , 'web_application_url' ) and self . web_application_url is not None : _dict [ 'web_application_url' ] = self . web_application_url if hasattr ( self , 'domain' ) and self . domain is not None : _dict [ 'domain' ] = self . domain if hasattr ( self , 'endpoint' ) and self . endpoint is not None : _dict [ 'endpoint' ] = self . endpoint if hasattr ( self , 'access_key_id' ) and self . access_key_id is not None : _dict [ 'access_key_id' ] = self . access_key_id if hasattr ( self , 'secret_access_key' ) and self . secret_access_key is not None : _dict [ 'secret_access_key' ] = self . secret_access_key return _dict
def compute_ecc_params ( max_block_size , rate , hasher ) : '''Compute the ecc parameters ( size of the message , size of the hash , size of the ecc ) . This is an helper function to easily compute the parameters from a resilience rate to instanciate an ECCMan object .'''
# message _ size = max _ block _ size - int ( round ( max _ block _ size * rate * 2 , 0 ) ) # old way to compute , wasn ' t really correct because we applied the rate on the total message + ecc size , when we should apply the rate to the message size only ( that is not known beforehand , but we want the ecc size ( k ) = 2 * rate * message _ size or in other words that k + k * 2 * rate = n ) message_size = int ( round ( float ( max_block_size ) / ( 1 + 2 * rate ) , 0 ) ) ecc_size = max_block_size - message_size hash_size = len ( hasher ) # 32 when we use MD5 return { "message_size" : message_size , "ecc_size" : ecc_size , "hash_size" : hash_size }
def validate_results ( self , results , checks = None ) : '''Valdiate results from the Anisble Run .'''
results [ 'status' ] = 'PASS' failed_hosts = [ ] # First validation is to make sure connectivity to # all the hosts was ok . if results [ 'dark' ] : print "Host connectivity issues on %s" , results [ 'dark' ] . keys ( ) failed_hosts . append ( results [ 'dark' ] . keys ( ) ) results [ 'status' ] = 'FAIL' # Now look for status ' failed ' for node in results [ 'contacted' ] . keys ( ) : if 'failed' in results [ 'contacted' ] [ node ] : if results [ 'contacted' ] [ node ] [ 'failed' ] is True : results [ 'status' ] = 'FAIL' # Check for the return code ' rc ' for each host . for node in results [ 'contacted' ] . keys ( ) : rc = results [ 'contacted' ] [ node ] . get ( 'rc' , None ) if rc is not None and rc != 0 : print "Operation 'return code' %s on host %s" % ( results [ 'contacted' ] [ node ] [ 'rc' ] , node ) failed_hosts . append ( node ) results [ 'status' ] = 'FAIL' # Additional checks . If passed is a list of key / value # pairs that should be matched . if checks is None : # print " No additional checks validated " return results , failed_hosts for check in checks : key = check . keys ( ) [ 0 ] value = check . values ( ) [ 0 ] for node in results [ 'contacted' ] . keys ( ) : if key in results [ 'contacted' ] [ node ] . keys ( ) : if results [ 'contacted' ] [ node ] [ key ] != value : failed_hosts . append ( node ) results [ 'status' ] = 'FAIL' return ( results , failed_hosts )
def read_file ( self , path , ** kwargs ) : """Read file input into memory , returning deserialized objects : param path : Path of file to read"""
try : parsed_data = Parser ( ) . parse_file ( path ) return parsed_data except ( IOError , TypeError , ImportError ) : LOGGER . warning ( "Error reading file: {0}" . format ( path ) ) return None
def get_config ( self , hostname ) : """Returns a configuration for hostname ."""
version , config = self . _get ( self . associations . get ( hostname ) ) return config
def assert_variable_type ( variable , expected_type , raise_exception = True ) : """Return True if a variable is of a certain type or types . Otherwise raise a ValueError exception . Positional arguments : variable - - the variable to be checked expected _ type - - the expected type or types of the variable raise _ exception - - whether to raise an exception or just return False on failure , with error message"""
# if expected type is not a list make it one if not isinstance ( expected_type , list ) : expected_type = [ expected_type ] # make sure all entries in the expected _ type list are types for t in expected_type : if not isinstance ( t , type ) : raise ValueError ( 'expected_type argument "%s" is not a type' % str ( t ) ) # make sure raise _ exception is a bool if not isinstance ( raise_exception , bool ) : raise ValueError ( 'raise_exception argument "%s" is not a bool' % str ( raise_exception ) ) # check the type of the variable against the list # then raise an exception or return True if not len ( [ ( t ) for t in expected_type if isinstance ( variable , t ) ] ) : error_message = '"%s" is not an instance of type %s. It is of type %s' % ( str ( variable ) , ' or ' . join ( [ str ( t ) for t in expected_type ] ) , str ( type ( variable ) ) ) if raise_exception : raise ValueError ( error_message ) else : return False , error_message return True , None
def Realization ( M , C , * args , ** kwargs ) : """f = Realization ( M , C [ , init _ mesh , init _ vals , check _ repeats = True , regularize = True ] ) Returns a realization from a Gaussian process . : Arguments : - ` M ` : A Gaussian process mean function . - ` C ` : A Covariance instance . - ` init _ mesh ` : An optional ndarray giving mesh at which f ' s initial value will be specified . - ` init _ vals ` : An ndarray giving the value of f over init _ mesh . - ` regularize ` : If init _ mesh is not shaped as ( n , ndim ) , where ndim is the dimension of the space , regularize should be True . - ` check _ repeats : Determines whether calls to the GP realization will be checked against previous calls before evaluation . : SeeAlso : Mean , Covariance , BasisCovariance , observe , GP"""
if isinstance ( C , BasisCovariance ) : return BasisRealization ( M , C , * args , ** kwargs ) else : return StandardRealization ( M , C , * args , ** kwargs )
def dogeo_V ( indat ) : """Rotates declination and inclination into geographic coordinates using the azimuth and plunge of the X direction ( lab arrow ) of a specimen . Parameters indat : nested list of [ dec , inc , az , pl ] data Returns rotated _ directions : arrays of Declinations and Inclinations"""
indat = indat . transpose ( ) # unpack input array into separate arrays dec , inc , az , pl = indat [ 0 ] , indat [ 1 ] , indat [ 2 ] , indat [ 3 ] Dir = np . array ( [ dec , inc ] ) . transpose ( ) X = dir2cart ( Dir ) . transpose ( ) # get cartesian coordinates N = np . size ( dec ) A1 = dir2cart ( np . array ( [ az , pl , np . ones ( N ) ] ) . transpose ( ) ) . transpose ( ) A2 = dir2cart ( np . array ( [ az + 90. , np . zeros ( N ) , np . ones ( N ) ] ) . transpose ( ) ) . transpose ( ) A3 = dir2cart ( np . array ( [ az - 180. , 90. - pl , np . ones ( N ) ] ) . transpose ( ) ) . transpose ( ) # do rotation xp = A1 [ 0 ] * X [ 0 ] + A2 [ 0 ] * X [ 1 ] + A3 [ 0 ] * X [ 2 ] yp = A1 [ 1 ] * X [ 0 ] + A2 [ 1 ] * X [ 1 ] + A3 [ 1 ] * X [ 2 ] zp = A1 [ 2 ] * X [ 0 ] + A2 [ 2 ] * X [ 1 ] + A3 [ 2 ] * X [ 2 ] cart = np . array ( [ xp , yp , zp ] ) . transpose ( ) # transform back to dec , inc Dir_geo = cart2dir ( cart ) . transpose ( ) # send back declination and inclination arrays return Dir_geo [ 0 ] , Dir_geo [ 1 ]
def send_generic_message ( self , recipient_id , elements , notification_type = NotificationType . regular ) : """Send generic messages to the specified recipient . https : / / developers . facebook . com / docs / messenger - platform / send - api - reference / generic - template Input : recipient _ id : recipient id to send to elements : generic message elements to send Output : Response from API as < dict >"""
return self . send_message ( recipient_id , { "attachment" : { "type" : "template" , "payload" : { "template_type" : "generic" , "elements" : elements } } } , notification_type )
def make_function ( function , name , arity ) : """Make a function node , a representation of a mathematical relationship . This factory function creates a function node , one of the core nodes in any program . The resulting object is able to be called with NumPy vectorized arguments and return a resulting vector based on a mathematical relationship . Parameters function : callable A function with signature ` function ( x1 , * args ) ` that returns a Numpy array of the same shape as its arguments . name : str The name for the function as it should be represented in the program and its visualizations . arity : int The number of arguments that the ` function ` takes ."""
if not isinstance ( arity , int ) : raise ValueError ( 'arity must be an int, got %s' % type ( arity ) ) if not isinstance ( function , np . ufunc ) : if function . __code__ . co_argcount != arity : raise ValueError ( 'arity %d does not match required number of ' 'function arguments of %d.' % ( arity , function . __code__ . co_argcount ) ) if not isinstance ( name , str ) : raise ValueError ( 'name must be a string, got %s' % type ( name ) ) # Check output shape args = [ np . ones ( 10 ) for _ in range ( arity ) ] try : function ( * args ) except ValueError : raise ValueError ( 'supplied function %s does not support arity of %d.' % ( name , arity ) ) if not hasattr ( function ( * args ) , 'shape' ) : raise ValueError ( 'supplied function %s does not return a numpy array.' % name ) if function ( * args ) . shape != ( 10 , ) : raise ValueError ( 'supplied function %s does not return same shape as ' 'input vectors.' % name ) # Check closure for zero & negative input arguments args = [ np . zeros ( 10 ) for _ in range ( arity ) ] if not np . all ( np . isfinite ( function ( * args ) ) ) : raise ValueError ( 'supplied function %s does not have closure against ' 'zeros in argument vectors.' % name ) args = [ - 1 * np . ones ( 10 ) for _ in range ( arity ) ] if not np . all ( np . isfinite ( function ( * args ) ) ) : raise ValueError ( 'supplied function %s does not have closure against ' 'negatives in argument vectors.' % name ) return _Function ( function , name , arity )
def select ( self , channels , dataframe = False , record_offset = 0 , raw = False , copy_master = True ) : """retrieve the channels listed in * channels * argument as * Signal * objects Parameters channels : list list of items to be filtered ; each item can be : * a channel name string * ( channel name , group index , channel index ) list or tuple * ( channel name , group index ) list or tuple * ( None , group index , channel index ) list or tuple dataframe : bool return pandas dataframe instead of list of Signals ; default * False * record _ offset : int record number offset ; optimization to get the last part of signal samples raw : bool get raw channel samples ; default * False * copy _ master : bool option to get a new timestamps array for each selected Signal or to use a shared array for channels of the same channel group ; default * False * dataframe : bool return a pandas DataFrame instead of a list of * Signals * ; in this case the signals will be interpolated using the union of all timestamps Returns signals : list list of * Signal * objects based on the input channel list Examples > > > from asammdf import MDF , Signal > > > import numpy as np > > > t = np . arange ( 5) > > > s = np . ones ( 5) > > > mdf = MDF ( ) > > > for i in range ( 4 ) : . . . sigs = [ Signal ( s * ( i * 10 + j ) , t , name = ' SIG ' ) for j in range ( 1,4 ) ] . . . mdf . append ( sigs ) > > > # select SIG group 0 default index 1 default , SIG group 3 index 1 , SIG group 2 index 1 default and channel index 2 from group 1 > > > mdf . select ( [ ' SIG ' , ( ' SIG ' , 3 , 1 ) , [ ' SIG ' , 2 ] , ( None , 1 , 2 ) ] ) [ < Signal SIG : samples = [ 1 . 1 . 1 . 1 . 1 . ] timestamps = [ 0 1 2 3 4] unit = " " info = None comment = " " > , < Signal SIG : samples = [ 31 . 31 . 31 . 31 . 31 . ] timestamps = [ 0 1 2 3 4] unit = " " info = None comment = " " > , < Signal SIG : samples = [ 21 . 21 . 21 . 21 . 21 . ] timestamps = [ 0 1 2 3 4] unit = " " info = None comment = " " > , < Signal SIG : samples = [ 12 . 12 . 12 . 12 . 12 . ] timestamps = [ 0 1 2 3 4] unit = " " info = None comment = " " >"""
# group channels by group index gps = { } indexes = [ ] for item in channels : if isinstance ( item , ( list , tuple ) ) : if len ( item ) not in ( 2 , 3 ) : raise MdfException ( "The items used for filtering must be strings, " "or they must match the first 3 argumens of the get " "method" ) else : group , index = self . _validate_channel_selection ( * item ) indexes . append ( ( group , index ) ) if group not in gps : gps [ group ] = { index } else : gps [ group ] . add ( index ) else : name = item group , index = self . _validate_channel_selection ( name ) indexes . append ( ( group , index ) ) if group not in gps : gps [ group ] = { index } else : gps [ group ] . add ( index ) signals = { } for group in gps : grp = self . groups [ group ] data = self . _load_data ( grp , record_offset = record_offset ) parents , dtypes = self . _prepare_record ( grp ) channel_indexes = gps [ group ] signal_parts = defaultdict ( list ) master_parts = [ ] sigs = { } for i , fragment in enumerate ( data ) : if dtypes . itemsize : grp . record = np . core . records . fromstring ( fragment [ 0 ] , dtype = dtypes ) else : grp . record = None if i == 0 : for index in channel_indexes : signal = self . get ( group = group , index = index , data = fragment , copy_master = False , raw = True , ignore_invalidation_bits = True , ) sigs [ index ] = signal signal_parts [ ( group , index ) ] . append ( ( signal . samples , signal . invalidation_bits ) ) master_parts . append ( signal . timestamps ) else : for index in channel_indexes : signal = self . get ( group = group , index = index , data = fragment , copy_master = False , raw = True , samples_only = True , ignore_invalidation_bits = True , ) signal_parts [ ( group , index ) ] . append ( signal ) master_parts . append ( self . get_master ( group , data = fragment , copy_master = False , ) ) grp . record = None pieces = len ( master_parts ) if pieces > 1 : master = np . concatenate ( master_parts ) else : master = master_parts [ 0 ] master_parts = None pairs = list ( signal_parts . keys ( ) ) for pair in pairs : group , index = pair parts = signal_parts . pop ( pair ) sig = sigs . pop ( index ) if pieces > 1 : samples = np . concatenate ( [ part [ 0 ] for part in parts ] ) if sig . invalidation_bits is not None : invalidation_bits = np . concatenate ( [ part [ 0 ] for part in parts ] ) else : invalidation_bits = None else : samples = parts [ 0 ] [ 0 ] if sig . invalidation_bits is not None : invalidation_bits = parts [ 0 ] [ 1 ] else : invalidation_bits = None signals [ pair ] = Signal ( samples = samples , timestamps = master , name = sig . name , unit = sig . unit , bit_count = sig . bit_count , attachment = sig . attachment , comment = sig . comment , conversion = sig . conversion , display_name = sig . display_name , encoding = sig . encoding , master_metadata = sig . master_metadata , raw = True , source = sig . source , stream_sync = sig . stream_sync , invalidation_bits = invalidation_bits , ) signals = [ signals [ pair ] for pair in indexes ] if copy_master : for signal in signals : signal . timestamps = signal . timestamps . copy ( ) if not raw : for signal in signals : conversion = signal . conversion if conversion : signal . samples = conversion . convert ( signal . samples ) raw = False signal . conversion = None if dataframe : interpolation_mode = self . _integer_interpolation times = [ s . timestamps for s in signals ] master = reduce ( np . union1d , times ) . flatten ( ) . astype ( np . float64 ) signals = [ s . interp ( master , interpolation_mode = interpolation_mode ) for s in signals ] df = pd . DataFrame ( ) df [ "time" ] = pd . Series ( master , index = np . arange ( len ( master ) ) ) df . set_index ( 'time' , inplace = True ) used_names = UniqueDB ( ) used_names . get_unique_name ( "time" ) for k , sig in enumerate ( signals ) : # byte arrays if len ( sig . samples . shape ) > 1 : arr = [ sig . samples ] types = [ ( sig . name , sig . samples . dtype , sig . samples . shape [ 1 : ] ) ] sig . samples = np . core . records . fromarrays ( arr , dtype = types ) channel_name = used_names . get_unique_name ( sig . name ) df [ channel_name ] = pd . Series ( sig . samples , index = master , dtype = "O" ) # arrays and structures elif sig . samples . dtype . names : for name , series in components ( sig . samples , sig . name , used_names , master = master ) : df [ name ] = series # scalars else : channel_name = used_names . get_unique_name ( sig . name ) df [ channel_name ] = pd . Series ( sig . samples , index = master ) return df else : return signals
def flush_mod ( self ) : """Flush all pending LDAP modifications ."""
for dn in self . __pending_mod_dn__ : try : if self . __ro__ : for mod in self . __mod_queue__ [ dn ] : if mod [ 0 ] == ldap . MOD_DELETE : mod_str = "DELETE" elif mod [ 0 ] == ldap . MOD_ADD : mod_str = "ADD" else : mod_str = "REPLACE" print ( "{} VALUE {} = {} FOR {}" . format ( mod_str , mod [ 1 ] , mod [ 2 ] , dn ) ) else : self . __con__ . modify_s ( dn , self . __mod_queue__ [ dn ] ) except ldap . TYPE_OR_VALUE_EXISTS : print ( "Error! Conflicting Batch Modification: %s" % str ( self . __mod_queue__ [ dn ] ) ) continue except ldap . NO_SUCH_ATTRIBUTE : print ( "Error! Conflicting Batch Modification: %s" % str ( self . __mod_queue__ [ dn ] ) ) continue self . __mod_queue__ [ dn ] = None self . __pending_mod_dn__ = [ ]
def count ( cls , path = None , objtype = None , query = None , ** kwargs ) : """Like _ _ init _ _ , but simply returns the number of objects that match the query rather than returning the objects NOTE : The path and objtype parameters to this function are to allow use of the DatabaseCollection class directly . However , this class is intended for subclassing and children of it should override either the OBJTYPE or PATH attribute rather than passing them as parameters here . @ param path : the path of the database to query , in the form " database . colletion " ; pass None to use the value of the PATH property of the object or , if that is none , the PATH property of OBJTYPE @ param objtype : the object type to use for these DatabaseObjects ; pass None to use the OBJTYPE property of the class @ param query : a dictionary specifying key - value pairs that the result must match . If query is None , use kwargs in it ' s place @ param * * kwargs : used as query parameters if query is None @ raise Exception : if path , PATH , and OBJTYPE . PATH are all None ; the database path must be defined in at least one of these"""
if not objtype : objtype = cls . OBJTYPE if not path : path = cls . PATH if not query : query = kwargs return objtype . db ( path ) . find ( query ) . count ( )