signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def delete_instance ( self , name , retry = google . api_core . gapic_v1 . method . DEFAULT , timeout = google . api_core . gapic_v1 . method . DEFAULT , metadata = None , ) : """Deletes a specific Redis instance . Instance stops serving and data is deleted . Example : > > > from google . cloud import redis _ v1beta1 > > > client = redis _ v1beta1 . CloudRedisClient ( ) > > > name = client . instance _ path ( ' [ PROJECT ] ' , ' [ LOCATION ] ' , ' [ INSTANCE ] ' ) > > > response = client . delete _ instance ( name ) > > > def callback ( operation _ future ) : . . . # Handle result . . . . result = operation _ future . result ( ) > > > response . add _ done _ callback ( callback ) > > > # Handle metadata . > > > metadata = response . metadata ( ) Args : name ( str ) : Required . Redis instance resource name using the form : ` ` projects / { project _ id } / locations / { location _ id } / instances / { instance _ id } ` ` where ` ` location _ id ` ` refers to a GCP region retry ( Optional [ google . api _ core . retry . Retry ] ) : A retry object used to retry requests . If ` ` None ` ` is specified , requests will not be retried . timeout ( Optional [ float ] ) : The amount of time , in seconds , to wait for the request to complete . Note that if ` ` retry ` ` is specified , the timeout applies to each individual attempt . metadata ( Optional [ Sequence [ Tuple [ str , str ] ] ] ) : Additional metadata that is provided to the method . Returns : A : class : ` ~ google . cloud . redis _ v1beta1 . types . _ OperationFuture ` instance . Raises : google . api _ core . exceptions . GoogleAPICallError : If the request failed for any reason . google . api _ core . exceptions . RetryError : If the request failed due to a retryable error and retry attempts failed . ValueError : If the parameters are invalid ."""
# Wrap the transport method to add retry and timeout logic . if "delete_instance" not in self . _inner_api_calls : self . _inner_api_calls [ "delete_instance" ] = google . api_core . gapic_v1 . method . wrap_method ( self . transport . delete_instance , default_retry = self . _method_configs [ "DeleteInstance" ] . retry , default_timeout = self . _method_configs [ "DeleteInstance" ] . timeout , client_info = self . _client_info , ) request = cloud_redis_pb2 . DeleteInstanceRequest ( name = name ) operation = self . _inner_api_calls [ "delete_instance" ] ( request , retry = retry , timeout = timeout , metadata = metadata ) return google . api_core . operation . from_gapic ( operation , self . transport . _operations_client , empty_pb2 . Empty , metadata_type = any_pb2 . Any , )
def _fetch_error_descriptions ( self , error_dict ) : """: type error _ dict : dict [ str , list [ dict [ str , str ] ] : rtype : list [ str ]"""
error_descriptions = [ ] for error in error_dict [ self . _FIELD_ERROR ] : description = error [ self . _FIELD_ERROR_DESCRIPTION ] error_descriptions . append ( description ) return error_descriptions
def get_files_in_current_directory ( file_type ) : """Gets the list of files in the current directory and subdirectories . Respects . floydignore file if present"""
local_files = [ ] total_file_size = 0 ignore_list , whitelist = FloydIgnoreManager . get_lists ( ) floyd_logger . debug ( "Ignoring: %s" , ignore_list ) floyd_logger . debug ( "Whitelisting: %s" , whitelist ) file_paths = get_unignored_file_paths ( ignore_list , whitelist ) for file_path in file_paths : local_files . append ( ( file_type , ( unix_style_path ( file_path ) , open ( file_path , 'rb' ) , 'text/plain' ) ) ) total_file_size += os . path . getsize ( file_path ) return ( local_files , total_file_size )
async def open ( self ) : """Register with the publisher ."""
self . store . register ( self ) while not self . finished : message = await self . messages . get ( ) await self . publish ( message )
def create_object ( request , model = None , template_name = None , template_loader = loader , extra_context = None , post_save_redirect = None , login_required = False , context_processors = None , form_class = None ) : """Generic object - creation function . Templates : ` ` < app _ label > / < model _ name > _ form . html ` ` Context : form the form for the object"""
if extra_context is None : extra_context = { } if login_required and not request . user . is_authenticated : return redirect_to_login ( request . path ) model , form_class = get_model_and_form_class ( model , form_class ) if request . method == 'POST' : form = form_class ( request . POST , request . FILES ) if form . is_valid ( ) : new_object = form . save ( ) msg = ugettext ( "The %(verbose_name)s was created successfully." ) % { "verbose_name" : model . _meta . verbose_name } messages . success ( request , msg , fail_silently = True ) return redirect ( post_save_redirect , new_object ) else : form = form_class ( ) # Create the template , context , response if not template_name : template_name = "%s/%s_form.html" % ( model . _meta . app_label , model . _meta . object_name . lower ( ) ) t = template_loader . get_template ( template_name ) c = { 'form' : form , } apply_extra_context ( extra_context , c ) return HttpResponse ( t . render ( context = c , request = request ) )
def pipeline_repr ( obj ) : """Returns a string representation of an object , including pieshell pipelines ."""
if not hasattr ( repr_state , 'in_repr' ) : repr_state . in_repr = 0 repr_state . in_repr += 1 try : return standard_repr ( obj ) finally : repr_state . in_repr -= 1
def _set_session_cookie ( self ) : """Set the session data cookie ."""
LOGGER . debug ( 'Setting session cookie for %s' , self . session . id ) self . set_secure_cookie ( name = self . _session_cookie_name , value = self . session . id , expires = self . _cookie_expiration )
def search ( self , spec , operator ) : '''Query PYPI via XMLRPC interface using search spec'''
return self . xmlrpc . search ( spec , operator . lower ( ) )
def isClientCert ( self , name ) : '''Checks if a user client certificate ( PKCS12 ) exists . Args : name ( str ) : The name of the user keypair . Examples : Check if the client certificate " myuser " exists : exists = cdir . isClientCert ( ' myuser ' ) Returns : bool : True if the certificate is present , False otherwise .'''
crtpath = self . _getPathJoin ( 'users' , '%s.p12' % name ) return os . path . isfile ( crtpath )
def printFunc ( name , m ) : """prints results"""
print ( "* %s *" % name ) objSet = bool ( m . getObjective ( ) . terms . keys ( ) ) print ( "* Is objective set? %s" % objSet ) if objSet : print ( "* Sense: %s" % m . getObjectiveSense ( ) ) for v in m . getVars ( ) : if v . name != "n" : print ( "%s: %d" % ( v , round ( m . getVal ( v ) ) ) ) print ( "\n" )
def __ensure_provisioning_writes ( table_name , key_name , num_consec_write_checks ) : """Ensure that provisioning of writes is correct : type table _ name : str : param table _ name : Name of the DynamoDB table : type key _ name : str : param key _ name : Configuration option key name : type num _ consec _ write _ checks : int : param num _ consec _ write _ checks : How many consecutive checks have we had : returns : ( bool , int , int ) update _ needed , updated _ write _ units , num _ consec _ write _ checks"""
if not get_table_option ( key_name , 'enable_writes_autoscaling' ) : logger . info ( '{0} - Autoscaling of writes has been disabled' . format ( table_name ) ) return False , dynamodb . get_provisioned_table_write_units ( table_name ) , 0 update_needed = False try : lookback_window_start = get_table_option ( key_name , 'lookback_window_start' ) lookback_period = get_table_option ( key_name , 'lookback_period' ) current_write_units = dynamodb . get_provisioned_table_write_units ( table_name ) consumed_write_units_percent = table_stats . get_consumed_write_units_percent ( table_name , lookback_window_start , lookback_period ) throttled_write_count = table_stats . get_throttled_write_event_count ( table_name , lookback_window_start , lookback_period ) throttled_by_provisioned_write_percent = table_stats . get_throttled_by_provisioned_write_event_percent ( table_name , lookback_window_start , lookback_period ) throttled_by_consumed_write_percent = table_stats . get_throttled_by_consumed_write_percent ( table_name , lookback_window_start , lookback_period ) writes_upper_threshold = get_table_option ( key_name , 'writes_upper_threshold' ) writes_lower_threshold = get_table_option ( key_name , 'writes_lower_threshold' ) throttled_writes_upper_threshold = get_table_option ( key_name , 'throttled_writes_upper_threshold' ) increase_writes_unit = get_table_option ( key_name , 'increase_writes_unit' ) increase_writes_with = get_table_option ( key_name , 'increase_writes_with' ) decrease_writes_unit = get_table_option ( key_name , 'decrease_writes_unit' ) decrease_writes_with = get_table_option ( key_name , 'decrease_writes_with' ) min_provisioned_writes = get_table_option ( key_name , 'min_provisioned_writes' ) max_provisioned_writes = get_table_option ( key_name , 'max_provisioned_writes' ) num_write_checks_before_scale_down = get_table_option ( key_name , 'num_write_checks_before_scale_down' ) num_write_checks_reset_percent = get_table_option ( key_name , 'num_write_checks_reset_percent' ) increase_throttled_by_provisioned_writes_unit = get_table_option ( key_name , 'increase_throttled_by_provisioned_writes_unit' ) increase_throttled_by_provisioned_writes_scale = get_table_option ( key_name , 'increase_throttled_by_provisioned_writes_scale' ) increase_throttled_by_consumed_writes_unit = get_table_option ( key_name , 'increase_throttled_by_consumed_writes_unit' ) increase_throttled_by_consumed_writes_scale = get_table_option ( key_name , 'increase_throttled_by_consumed_writes_scale' ) increase_consumed_writes_unit = get_table_option ( key_name , 'increase_consumed_writes_unit' ) increase_consumed_writes_with = get_table_option ( key_name , 'increase_consumed_writes_with' ) increase_consumed_writes_scale = get_table_option ( key_name , 'increase_consumed_writes_scale' ) decrease_consumed_writes_unit = get_table_option ( key_name , 'decrease_consumed_writes_unit' ) decrease_consumed_writes_with = get_table_option ( key_name , 'decrease_consumed_writes_with' ) decrease_consumed_writes_scale = get_table_option ( key_name , 'decrease_consumed_writes_scale' ) except JSONResponseError : raise except BotoServerError : raise # Set the updated units to the current read unit value updated_write_units = current_write_units # Reset consecutive write count if num _ write _ checks _ reset _ percent # is reached if num_write_checks_reset_percent : if consumed_write_units_percent >= num_write_checks_reset_percent : logger . info ( '{0} - Resetting the number of consecutive ' 'write checks. Reason: Consumed percent {1} is ' 'greater than reset percent: {2}' . format ( table_name , consumed_write_units_percent , num_write_checks_reset_percent ) ) num_consec_write_checks = 0 # Exit if up scaling has been disabled if not get_table_option ( key_name , 'enable_writes_up_scaling' ) : logger . debug ( '{0} - Up scaling event detected. No action taken as scaling ' 'up writes has been disabled in the configuration' . format ( table_name ) ) else : # If local / granular values not specified use global values increase_consumed_writes_unit = increase_consumed_writes_unit or increase_writes_unit increase_throttled_by_provisioned_writes_unit = ( increase_throttled_by_provisioned_writes_unit or increase_writes_unit ) increase_throttled_by_consumed_writes_unit = increase_throttled_by_consumed_writes_unit or increase_writes_unit increase_consumed_writes_with = increase_consumed_writes_with or increase_writes_with # Initialise variables to store calculated provisioning throttled_by_provisioned_calculated_provisioning = scale_reader ( increase_throttled_by_provisioned_writes_scale , throttled_by_provisioned_write_percent ) throttled_by_consumed_calculated_provisioning = scale_reader ( increase_throttled_by_consumed_writes_scale , throttled_by_consumed_write_percent ) consumed_calculated_provisioning = scale_reader ( increase_consumed_writes_scale , consumed_write_units_percent ) throttled_count_calculated_provisioning = 0 calculated_provisioning = 0 # Increase needed due to high throttled to provisioned ratio if throttled_by_provisioned_calculated_provisioning : if increase_throttled_by_provisioned_writes_unit == 'percent' : throttled_by_provisioned_calculated_provisioning = calculators . increase_writes_in_percent ( current_write_units , throttled_by_provisioned_calculated_provisioning , get_table_option ( key_name , 'max_provisioned_writes' ) , consumed_write_units_percent , table_name ) else : throttled_by_provisioned_calculated_provisioning = calculators . increase_writes_in_units ( current_write_units , throttled_by_provisioned_calculated_provisioning , get_table_option ( key_name , 'max_provisioned_writes' ) , consumed_write_units_percent , table_name ) # Increase needed due to high throttled to consumed ratio if throttled_by_consumed_calculated_provisioning : if increase_throttled_by_consumed_writes_unit == 'percent' : throttled_by_consumed_calculated_provisioning = calculators . increase_writes_in_percent ( current_write_units , throttled_by_consumed_calculated_provisioning , get_table_option ( key_name , 'max_provisioned_writes' ) , consumed_write_units_percent , table_name ) else : throttled_by_consumed_calculated_provisioning = calculators . increase_writes_in_units ( current_write_units , throttled_by_consumed_calculated_provisioning , get_table_option ( key_name , 'max_provisioned_writes' ) , consumed_write_units_percent , table_name ) # Increase needed due to high CU consumption if consumed_calculated_provisioning : if increase_consumed_writes_unit == 'percent' : consumed_calculated_provisioning = calculators . increase_writes_in_percent ( current_write_units , consumed_calculated_provisioning , get_table_option ( key_name , 'max_provisioned_writes' ) , consumed_write_units_percent , table_name ) else : consumed_calculated_provisioning = calculators . increase_writes_in_units ( current_write_units , consumed_calculated_provisioning , get_table_option ( key_name , 'max_provisioned_writes' ) , consumed_write_units_percent , table_name ) elif ( writes_upper_threshold and consumed_write_units_percent > writes_upper_threshold and not increase_consumed_writes_scale ) : if increase_consumed_writes_unit == 'percent' : consumed_calculated_provisioning = calculators . increase_writes_in_percent ( current_write_units , increase_consumed_writes_with , get_table_option ( key_name , 'max_provisioned_writes' ) , consumed_write_units_percent , table_name ) else : consumed_calculated_provisioning = calculators . increase_writes_in_units ( current_write_units , increase_consumed_writes_with , get_table_option ( key_name , 'max_provisioned_writes' ) , consumed_write_units_percent , table_name ) # Increase needed due to high throttling if ( throttled_writes_upper_threshold and throttled_write_count > throttled_writes_upper_threshold ) : if increase_writes_unit == 'percent' : throttled_count_calculated_provisioning = calculators . increase_writes_in_percent ( updated_write_units , increase_writes_with , get_table_option ( key_name , 'max_provisioned_writes' ) , consumed_write_units_percent , table_name ) else : throttled_count_calculated_provisioning = calculators . increase_writes_in_units ( updated_write_units , increase_writes_with , get_table_option ( key_name , 'max_provisioned_writes' ) , consumed_write_units_percent , table_name ) # Determine which metric requires the most scaling if ( throttled_by_provisioned_calculated_provisioning > calculated_provisioning ) : calculated_provisioning = throttled_by_provisioned_calculated_provisioning scale_reason = ( "due to throttled events by provisioned " "units threshold being exceeded" ) if ( throttled_by_consumed_calculated_provisioning > calculated_provisioning ) : calculated_provisioning = throttled_by_consumed_calculated_provisioning scale_reason = ( "due to throttled events by consumed " "units threshold being exceeded" ) if consumed_calculated_provisioning > calculated_provisioning : calculated_provisioning = consumed_calculated_provisioning scale_reason = "due to consumed threshold being exceeded" if throttled_count_calculated_provisioning > calculated_provisioning : calculated_provisioning = throttled_count_calculated_provisioning scale_reason = "due to throttled events threshold being exceeded" if calculated_provisioning > current_write_units : logger . info ( '{0} - Resetting the number of consecutive ' 'write checks. Reason: scale up {1}' . format ( table_name , scale_reason ) ) num_consec_write_checks = 0 update_needed = True updated_write_units = calculated_provisioning # Decrease needed due to low CU consumption if not update_needed : # If local / granular values not specified use global values decrease_consumed_writes_unit = decrease_consumed_writes_unit or decrease_writes_unit decrease_consumed_writes_with = decrease_consumed_writes_with or decrease_writes_with # Initialise variables to store calculated provisioning consumed_calculated_provisioning = scale_reader_decrease ( decrease_consumed_writes_scale , consumed_write_units_percent ) calculated_provisioning = None # Exit if down scaling has been disabled if not get_table_option ( key_name , 'enable_writes_down_scaling' ) : logger . debug ( '{0} - Down scaling event detected. No action taken as scaling' ' down writes has been disabled in the configuration' . format ( table_name ) ) # Exit if writes = = 0 % and downscaling has been disabled at 0% elif ( consumed_write_units_percent == 0 and not get_table_option ( key_name , 'allow_scaling_down_writes_on_0_percent' ) ) : logger . info ( '{0} - Down scaling event detected. No action taken as scaling' ' down writes is not done when usage is at 0%' . format ( table_name ) ) # Exit if writes are still throttled elif ( throttled_writes_upper_threshold and throttled_write_count > throttled_writes_upper_threshold ) : logger . info ( '{0} - Down scaling event detected. No action taken as there' ' are still throttled writes' . format ( table_name ) ) else : if consumed_calculated_provisioning : if decrease_consumed_writes_unit == 'percent' : calculated_provisioning = calculators . decrease_writes_in_percent ( updated_write_units , consumed_calculated_provisioning , get_table_option ( key_name , 'min_provisioned_writes' ) , table_name ) else : calculated_provisioning = calculators . decrease_writes_in_units ( updated_write_units , consumed_calculated_provisioning , get_table_option ( key_name , 'min_provisioned_writes' ) , table_name ) elif ( writes_lower_threshold and consumed_write_units_percent < writes_lower_threshold and not decrease_consumed_writes_scale ) : if decrease_consumed_writes_unit == 'percent' : calculated_provisioning = calculators . decrease_writes_in_percent ( updated_write_units , decrease_consumed_writes_with , get_table_option ( key_name , 'min_provisioned_writes' ) , table_name ) else : calculated_provisioning = calculators . decrease_writes_in_units ( updated_write_units , decrease_consumed_writes_with , get_table_option ( key_name , 'min_provisioned_writes' ) , table_name ) if ( calculated_provisioning and current_write_units != calculated_provisioning ) : num_consec_write_checks += 1 if num_consec_write_checks >= num_write_checks_before_scale_down : update_needed = True updated_write_units = calculated_provisioning # Never go over the configured max provisioning if max_provisioned_writes : if int ( updated_write_units ) > int ( max_provisioned_writes ) : update_needed = True updated_write_units = int ( max_provisioned_writes ) logger . info ( 'Will not increase writes over max-provisioned-writes ' 'limit ({0} writes)' . format ( updated_write_units ) ) # Ensure that we have met the min - provisioning if min_provisioned_writes : if int ( min_provisioned_writes ) > int ( updated_write_units ) : update_needed = True updated_write_units = int ( min_provisioned_writes ) logger . info ( '{0} - Increasing writes to meet min-provisioned-writes ' 'limit ({1} writes)' . format ( table_name , updated_write_units ) ) if calculators . is_consumed_over_proposed ( current_write_units , updated_write_units , consumed_write_units_percent ) : update_needed = False updated_write_units = current_write_units logger . info ( '{0} - Consumed is over proposed write units. Will leave table at ' 'current setting.' . format ( table_name ) ) logger . info ( '{0} - Consecutive write checks {1}/{2}' . format ( table_name , num_consec_write_checks , num_write_checks_before_scale_down ) ) return update_needed , updated_write_units , num_consec_write_checks
def shutdown ( self , message = None ) : """Disconnect all servers with a message . Args : message ( str ) : Quit message to use on each connection ."""
for name , server in self . servers . items ( ) : server . quit ( message )
def count_collision ( number_of_cars : int ) : """This function simulates an infinite road where cars move against each other from two directions . There are ' n ' cars coming from left heading toward right and ' n ' cars from right heading to left , with same speed . Two cars are said to meet when a car moving left to right and one moving right to left intersect . The cars are robust and can withstand collision and keep going on their route without a pause . The function outputs the total number of such meetings : param number _ of _ cars : The number of cars setting off from each direction : return : The total number of collisions between cars from different directions Note : The road is linear and infinitely long ."""
return number_of_cars ** 2
def decode_dict_keys ( d , coding = "utf-8" ) : """Convert all keys to unicde ( recursively ) ."""
assert compat . PY2 res = { } for k , v in d . items ( ) : if type ( k ) is str : k = k . decode ( coding ) if type ( v ) is dict : v = decode_dict_keys ( v , coding ) res [ k ] = v return res
def do_work ( self ) : """Run a single connection iteration . This will return ` True ` if the connection is still open and ready to be used for further work , or ` False ` if it needs to be shut down . : rtype : bool : raises : TimeoutError or ~ uamqp . errors . ClientTimeout if CBS authentication timeout reached ."""
if self . _shutdown : return False if not self . client_ready ( ) : return True return self . _client_run ( )
def bulk_create_awards ( objects , batch_size = 500 , post_save_signal = True ) : """Saves award objects ."""
count = len ( objects ) if not count : return badge = objects [ 0 ] . badge try : Award . objects . bulk_create ( objects , batch_size = batch_size ) if post_save_signal : for obj in objects : signals . post_save . send ( sender = obj . __class__ , instance = obj , created = True ) except IntegrityError : logger . error ( '✘ Badge %s: IntegrityError for %d awards' , badge . slug , count )
def build ( self , ** kwargs ) : """create the operation and associate tasks : param dict kwargs : operation data : return : the controller : rtype : kser . sequencing . controller . OperationController"""
self . tasks += self . compute_tasks ( ** kwargs ) return self . finalize ( )
def init_optimizer ( self ) : """Initializes query optimizer state . There are 4 internals hash tables : 1 . from type to declarations 2 . from type to declarations for non - recursive queries 3 . from type to name to declarations 4 . from type to name to declarations for non - recursive queries Almost every query includes declaration type information . Also very common query is to search some declaration ( s ) by name or full name . Those hash tables allows to search declaration very quick ."""
if self . name == '::' : self . _logger . debug ( "preparing data structures for query optimizer - started" ) start_time = timeit . default_timer ( ) self . clear_optimizer ( ) for dtype in scopedef_t . _impl_all_decl_types : self . _type2decls [ dtype ] = [ ] self . _type2decls_nr [ dtype ] = [ ] self . _type2name2decls [ dtype ] = { } self . _type2name2decls_nr [ dtype ] = { } self . _all_decls_not_recursive = self . declarations self . _all_decls = make_flatten ( self . _all_decls_not_recursive ) for decl in self . _all_decls : types = self . __decl_types ( decl ) for type_ in types : self . _type2decls [ type_ ] . append ( decl ) name2decls = self . _type2name2decls [ type_ ] if decl . name not in name2decls : name2decls [ decl . name ] = [ ] name2decls [ decl . name ] . append ( decl ) if self is decl . parent : self . _type2decls_nr [ type_ ] . append ( decl ) name2decls_nr = self . _type2name2decls_nr [ type_ ] if decl . name not in name2decls_nr : name2decls_nr [ decl . name ] = [ ] name2decls_nr [ decl . name ] . append ( decl ) for decl in self . _all_decls_not_recursive : if isinstance ( decl , scopedef_t ) : decl . init_optimizer ( ) if self . name == '::' : self . _logger . debug ( ( "preparing data structures for query optimizer - " + "done( %f seconds ). " ) , ( timeit . default_timer ( ) - start_time ) ) self . _optimized = True
def _array_safe_dict_eq ( one_dict , other_dict ) : """Dicts containing arrays are hard to compare . This function uses numpy . allclose to compare arrays , and does normal comparison for all other types . : param one _ dict : : param other _ dict : : return : bool"""
for key in one_dict : try : assert one_dict [ key ] == other_dict [ key ] except ValueError as err : # When dealing with arrays , we need to use numpy for comparison if isinstance ( one_dict [ key ] , dict ) : assert FitResults . _array_safe_dict_eq ( one_dict [ key ] , other_dict [ key ] ) else : assert np . allclose ( one_dict [ key ] , other_dict [ key ] ) except AssertionError : return False else : return True
def _statsd_address ( self ) : """Return a tuple of host and port for the statsd server to send stats to . : return : tuple ( host , port )"""
return ( self . application . settings . get ( 'statsd' , { } ) . get ( 'host' , self . STATSD_HOST ) , self . application . settings . get ( 'statsd' , { } ) . get ( 'port' , self . STATSD_PORT ) )
def print_markdown ( data , title = None ) : """Print data in GitHub - flavoured Markdown format for issues etc . data ( dict or list of tuples ) : Label / value pairs . title ( unicode or None ) : Title , will be rendered as headline 2."""
def excl_value ( value ) : # contains path , i . e . personal info return isinstance ( value , basestring_ ) and Path ( value ) . exists ( ) if isinstance ( data , dict ) : data = list ( data . items ( ) ) markdown = [ "* **{}:** {}" . format ( l , unicode_ ( v ) ) for l , v in data if not excl_value ( v ) ] if title : print ( "\n## {}" . format ( title ) ) print ( '\n{}\n' . format ( '\n' . join ( markdown ) ) )
def are_collinear ( magmoms ) : """Method checks to see if a set of magnetic moments are collinear with each other . : param magmoms : list of magmoms ( Magmoms , scalars or vectors ) : return : bool"""
magmoms = [ Magmom ( magmom ) for magmom in magmoms ] if not Magmom . have_consistent_saxis ( magmoms ) : magmoms = Magmom . get_consistent_set ( magmoms ) # convert to numpy array for convenience magmoms = np . array ( [ list ( magmom ) for magmom in magmoms ] ) magmoms = magmoms [ np . any ( magmoms , axis = 1 ) ] # remove zero magmoms if len ( magmoms ) == 0 : return True # use first moment as reference to compare against ref_magmom = magmoms [ 0 ] # magnitude of cross products ! = 0 if non - collinear with reference num_ncl = np . count_nonzero ( np . linalg . norm ( np . cross ( ref_magmom , magmoms ) , axis = 1 ) ) if num_ncl > 0 : return False else : return True
def get ( self , zone_id ) : """Retrieve the information for a zone entity ."""
path = '/' . join ( [ 'zone' , zone_id ] ) return self . rachio . get ( path )
def clean_upload ( self , query = '/content/uploads/' ) : """pulp leaves droppings if you don ' t specifically tell it to clean up after itself . use this to do so ."""
query = query + self . uid + '/' _r = self . connector . delete ( query ) if _r . status_code == Constants . PULP_DELETE_OK : juicer . utils . Log . log_info ( "Cleaned up after upload request." ) else : _r . raise_for_status ( )
def zoom ( self , zoom , center = ( 0 , 0 , 0 ) , mapped = True ) : """Update the transform such that its scale factor is changed , but the specified center point is left unchanged . Parameters zoom : array - like Values to multiply the transform ' s current scale factors . center : array - like The center point around which the scaling will take place . mapped : bool Whether * center * is expressed in mapped coordinates ( True ) or unmapped coordinates ( False ) ."""
zoom = as_vec4 ( zoom , default = ( 1 , 1 , 1 , 1 ) ) center = as_vec4 ( center , default = ( 0 , 0 , 0 , 0 ) ) scale = self . scale * zoom if mapped : trans = center - ( center - self . translate ) * zoom else : trans = self . scale * ( 1 - zoom ) * center + self . translate self . _set_st ( scale = scale , translate = trans )
def normalize_ip ( ip ) : """Transform the address into a fixed - length form , such as : : 192.168.0.1 - > 192.168.000.001 : type ip : string : param ip : An IP address . : rtype : string : return : The normalized IP ."""
theip = ip . split ( '.' ) if len ( theip ) != 4 : raise ValueError ( 'ip should be 4 tuples' ) return '.' . join ( str ( int ( l ) ) . rjust ( 3 , '0' ) for l in theip )
def hash_str ( data , hasher = None ) : """Checksum hash a string ."""
hasher = hasher or hashlib . sha1 ( ) hasher . update ( data ) return hasher
def find_files ( self , ignore_policies = True ) : """Search shared and private assemblies and return a list of files . If any files are not found , return an empty list . IMPORTANT NOTE : For the purpose of getting the dependent assembly files of an executable , the publisher configuration ( aka policy ) should be ignored ( which is the default ) . Setting ignore _ policies = False is only useful to find out which files are actually loaded at runtime ."""
# Shared Assemblies : # http : / / msdn . microsoft . com / en - us / library / aa375996%28VS . 85%29 . aspx # Private Assemblies : # http : / / msdn . microsoft . com / en - us / library / aa375674%28VS . 85%29 . aspx # Assembly Searching Sequence : # http : / / msdn . microsoft . com / en - us / library / aa374224%28VS . 85%29 . aspx # NOTE : # Multilanguage User Interface ( MUI ) support not yet implemented files = [ ] languages = [ ] if self . language not in ( None , "" , "*" , "neutral" ) : languages . append ( self . getlanguage ( ) ) if "-" in self . language : # language - culture syntax , e . g . en - us # Add only the language part languages . append ( self . language . split ( "-" ) [ 0 ] ) if self . language not in ( "en-us" , "en" ) : languages . append ( "en-us" ) if self . language != "en" : languages . append ( "en" ) languages . append ( self . getlanguage ( "*" ) ) winsxs = os . path . join ( compat . getenv ( "SystemRoot" ) , "WinSxS" ) if not os . path . isdir ( winsxs ) : logger . warn ( "No such dir %s" , winsxs ) manifests = os . path . join ( winsxs , "Manifests" ) if not os . path . isdir ( manifests ) : logger . warn ( "No such dir %s" , manifests ) if not ignore_policies and self . version : if sys . getwindowsversion ( ) < ( 6 , ) : # Windows XP pcfiles = os . path . join ( winsxs , "Policies" ) if not os . path . isdir ( pcfiles ) : logger . warn ( "No such dir %s" , pcfiles ) else : # Vista or later pcfiles = manifests for language in languages : version = self . version # Search for publisher configuration if not ignore_policies and version : # Publisher Configuration ( aka policy ) # A publisher configuration file globally redirects # applications and assemblies having a dependence on one # version of a side - by - side assembly to use another version of # the same assembly . This enables applications and assemblies # to use the updated assembly without having to rebuild all of # the affected applications . # http : / / msdn . microsoft . com / en - us / library / aa375680%28VS . 85%29 . aspx # Under Windows XP and 2003 , policies are stored as # < version > . policy files inside # % SystemRoot % \ WinSxS \ Policies \ < name > # Under Vista and later , policies are stored as # < name > . manifest files inside % SystemRoot % \ winsxs \ Manifests redirected = False if os . path . isdir ( pcfiles ) : logger . info ( "Searching for publisher configuration %s ..." , self . getpolicyid ( True , language = language ) ) if sys . getwindowsversion ( ) < ( 6 , ) : # Windows XP policies = os . path . join ( pcfiles , self . getpolicyid ( True , language = language ) + ".policy" ) else : # Vista or later policies = os . path . join ( pcfiles , self . getpolicyid ( True , language = language ) + ".manifest" ) for manifestpth in glob ( policies ) : if not os . path . isfile ( manifestpth ) : logger . warn ( "Not a file %s" , manifestpth ) continue logger . info ( "Found %s" , manifestpth ) try : policy = ManifestFromXMLFile ( manifestpth ) except Exception , exc : logger . error ( "Could not parse file %s" , manifestpth ) logger . exception ( exc ) else : logger . info ( "Checking publisher policy for " "binding redirects" ) for assembly in policy . dependentAssemblies : if ( not assembly . same_id ( self , True ) or assembly . optional ) : continue for redirect in assembly . bindingRedirects : if logger . isEnabledFor ( logging . INFO ) : old = "-" . join ( [ "." . join ( [ str ( i ) for i in part ] ) for part in redirect [ 0 ] ] ) new = "." . join ( [ str ( i ) for i in redirect [ 1 ] ] ) logger . info ( "Found redirect for " "version(s) %s -> %n" , old , new ) if ( version >= redirect [ 0 ] [ 0 ] and version <= redirect [ 0 ] [ - 1 ] and version != redirect [ 1 ] ) : logger . info ( "Applying redirect " "%s -> %s" , "." . join ( [ str ( i ) for i in version ] ) , new ) version = redirect [ 1 ] redirected = True if not redirected : logger . info ( "Publisher configuration not used" ) # Search for assemblies according to assembly searching sequence paths = [ ] if os . path . isdir ( manifests ) : # Add winsxs search paths paths . extend ( glob ( os . path . join ( manifests , self . getid ( language = language , version = version ) + "_*.manifest" ) ) ) if self . filename : # Add private assembly search paths dirnm = os . path . dirname ( self . filename ) if language in ( LANGUAGE_NEUTRAL_NT5 , LANGUAGE_NEUTRAL_NT6 ) : for ext in ( ".dll" , ".manifest" ) : paths . extend ( glob ( os . path . join ( dirnm , self . name + ext ) ) ) paths . extend ( glob ( os . path . join ( dirnm , self . name , self . name + ext ) ) ) else : for ext in ( ".dll" , ".manifest" ) : paths . extend ( glob ( os . path . join ( dirnm , language , self . name + ext ) ) ) for ext in ( ".dll" , ".manifest" ) : paths . extend ( glob ( os . path . join ( dirnm , language , self . name , self . name + ext ) ) ) logger . info ( "Searching for assembly %s ..." , self . getid ( language = language , version = version ) ) for manifestpth in paths : if not os . path . isfile ( manifestpth ) : logger . warn ( "Not a file %s" , manifestpth ) continue assemblynm = os . path . basename ( os . path . splitext ( manifestpth ) [ 0 ] ) try : if manifestpth . endswith ( ".dll" ) : logger . info ( "Found manifest in %s" , manifestpth ) manifest = ManifestFromResFile ( manifestpth , [ 1 ] ) else : logger . info ( "Found manifest %s" , manifestpth ) manifest = ManifestFromXMLFile ( manifestpth ) except Exception , exc : logger . error ( "Could not parse manifest %s" , manifestpth ) logger . exception ( exc ) else : if manifestpth . startswith ( winsxs ) : assemblydir = os . path . join ( winsxs , assemblynm ) if not os . path . isdir ( assemblydir ) : logger . warn ( "No such dir %s" , assemblydir ) logger . warn ( "Assembly incomplete" ) return [ ] else : assemblydir = os . path . dirname ( manifestpth ) files . append ( manifestpth ) for file_ in self . files or manifest . files : fn = file_ . find ( assemblydir ) if fn : files . append ( fn ) else : # If any of our files does not exist , # the assembly is incomplete logger . warn ( "Assembly incomplete" ) return [ ] return files logger . warn ( "Assembly not found" ) return [ ]
def get ( self , uri_pattern , headers = None , parameters = None , ** kwargs ) : """Launch HTTP GET request to the API with given arguments : param uri _ pattern : string pattern of the full API url with keyword arguments ( format string syntax ) : param headers : HTTP header ( dict ) : param parameters : Query parameters . i . e . { ' key1 ' : ' value1 ' , ' key2 ' : ' value2 ' } : param * * kwargs : URL parameters ( without url _ root ) to fill the patters : returns : REST API response ( ' Requests ' response )"""
print uri_pattern print headers return self . _call_api ( uri_pattern , HTTP_VERB_GET , headers = headers , parameters = parameters , ** kwargs )
def smart_scrubb ( df , col_name , error_rate = 0 ) : """Scrubs from the back of an ' object ' column in a DataFrame until the scrub would semantically alter the contents of the column . If only a subset of the elements in the column are scrubbed , then a boolean array indicating which elements have been scrubbed is appended to the dataframe . Returns the string that was scrubbed . df - DataFrame DataFrame to scrub col _ name - string Name of column to scrub error _ rate - number , default 0 The maximum amount of values this function can ignore while scrubbing , expressed as a fraction of the total amount of rows in the dataframe ."""
scrubbed = "" while True : valcounts = df [ col_name ] . str [ - len ( scrubbed ) - 1 : ] . value_counts ( ) if not len ( valcounts ) : break if not valcounts [ 0 ] >= ( 1 - error_rate ) * _utils . rows ( df ) : break scrubbed = valcounts . index [ 0 ] if scrubbed == '' : return None which = df [ col_name ] . str . endswith ( scrubbed ) _basics . col_scrubb ( df , col_name , which , len ( scrubbed ) , True ) if not which . all ( ) : new_col_name = _basics . colname_gen ( df , "{}_sb-{}" . format ( col_name , scrubbed ) ) df [ new_col_name ] = which return scrubbed
def set_metadata ( self , metadata_dict ) : """Set the metadata on a dataset * * metadata _ dict * * : A dictionary of metadata key - vals . Transforms this dict into an array of metadata objects for storage in the DB ."""
if metadata_dict is None : return existing_metadata = [ ] for m in self . metadata : existing_metadata . append ( m . key ) if m . key in metadata_dict : if m . value != metadata_dict [ m . key ] : m . value = metadata_dict [ m . key ] for k , v in metadata_dict . items ( ) : if k not in existing_metadata : m_i = Metadata ( key = str ( k ) , value = str ( v ) ) self . metadata . append ( m_i ) metadata_to_delete = set ( existing_metadata ) . difference ( set ( metadata_dict . keys ( ) ) ) for m in self . metadata : if m . key in metadata_to_delete : get_session ( ) . delete ( m )
def _crawl_attribute ( this_data , this_attr ) : '''helper function to crawl an attribute specified for retrieval'''
if isinstance ( this_data , list ) : t_list = [ ] for d in this_data : t_list . append ( _crawl_attribute ( d , this_attr ) ) return t_list else : if isinstance ( this_attr , dict ) : t_dict = { } for k in this_attr : if hasattr ( this_data , k ) : t_dict [ k ] = _crawl_attribute ( getattr ( this_data , k , None ) , this_attr [ k ] ) return t_dict elif isinstance ( this_attr , list ) : this_dict = { } for l in this_attr : this_dict = dictupdate . update ( this_dict , _crawl_attribute ( this_data , l ) ) return this_dict else : return { this_attr : _recurse_config_to_dict ( getattr ( this_data , this_attr , None ) ) }
def import_one_to_many ( self , file_path , column_index , parent_table , column_in_one2many_table ) : """: param file _ path : : param column _ index : : param parent _ table : : param column _ in _ one2many _ table :"""
chunks = pd . read_table ( file_path , usecols = [ column_index ] , header = None , comment = '#' , index_col = False , chunksize = 1000000 , dtype = self . get_dtypes ( parent_table . model ) ) for chunk in chunks : child_values = [ ] parent_id_values = [ ] chunk . dropna ( inplace = True ) chunk . index += 1 for parent_id , values in chunk . iterrows ( ) : entry = values [ column_index ] if not isinstance ( entry , str ) : entry = str ( entry ) for value in entry . split ( "|" ) : parent_id_values . append ( parent_id ) child_values . append ( value . strip ( ) ) parent_id_column_name = parent_table . name + '__id' o2m_table_name = defaults . TABLE_PREFIX + parent_table . name + '__' + column_in_one2many_table pd . DataFrame ( { parent_id_column_name : parent_id_values , column_in_one2many_table : child_values } ) . to_sql ( name = o2m_table_name , if_exists = 'append' , con = self . engine , index = False )
def delete_pre_subscriptions ( self , ** kwargs ) : # noqa : E501 """Remove pre - subscriptions # noqa : E501 Removes pre - subscriptions . * * Example usage : * * curl - X DELETE https : / / api . us - east - 1 . mbedcloud . com / v2 / subscriptions - H ' authorization : Bearer { api - key } ' # noqa : E501 This method makes a synchronous HTTP request by default . To make an asynchronous HTTP request , please pass asynchronous = True > > > thread = api . delete _ pre _ subscriptions ( asynchronous = True ) > > > result = thread . get ( ) : param asynchronous bool : return : None If the method is called asynchronously , returns the request thread ."""
kwargs [ '_return_http_data_only' ] = True if kwargs . get ( 'asynchronous' ) : return self . delete_pre_subscriptions_with_http_info ( ** kwargs ) # noqa : E501 else : ( data ) = self . delete_pre_subscriptions_with_http_info ( ** kwargs ) # noqa : E501 return data
def get_header ( mesh ) : """For now we only know Element types 1 ( line ) and 2 ( triangle )"""
nr_all_nodes = len ( mesh [ 'nodes' ] ) nr_types = 1 # triangles # compute bandwidth bandwidth = - 1 for triangle in mesh [ 'elements' ] [ '2' ] : diff1 = abs ( triangle [ 0 ] - triangle [ 1 ] ) diff2 = abs ( triangle [ 0 ] - triangle [ 2 ] ) diff3 = abs ( triangle [ 1 ] - triangle [ 2 ] ) diffm = max ( diff1 , diff2 , diff3 ) if ( diffm > bandwidth ) : bandwidth = diffm el_infos = [ ] # triangles for element in ( '2' , ) : el = mesh [ 'elements' ] [ element ] if ( element == '2' ) : el_type = 3 elif ( element == '1' ) : el_type = 12 # Neumann nr = len ( el ) nr_nodes = len ( el [ 0 ] ) el_infos . append ( ( el_type , nr , nr_nodes ) ) # boundary elements for btype in ( '12' , '11' ) : if ( btype in mesh [ 'boundaries' ] ) : el_type = int ( btype ) nr = len ( mesh [ 'boundaries' ] [ btype ] ) nr_nodes = 2 if ( nr > 0 ) : nr_types += 1 el_infos . append ( ( el_type , nr , nr_nodes ) ) # now convert to string str_header = '' for a , b , c in [ ( nr_all_nodes , nr_types , bandwidth ) , ] + el_infos : str_header = str_header + '{0} {1} {2}\n' . format ( a , b , c ) return str_header
def create ( self , ** kwargs ) : """Create the resource on the BIG - IP ® . Uses HTTP POST to the ` collection ` URI to create a resource associated with a new unique URI on the device . As proxyServerPool parameter will be required only if useProxyServer is set to ' enabled ' we have to use conditional to capture this logic during create ."""
if kwargs [ 'useProxyServer' ] == 'enabled' : tup_par = ( 'proxyServerPool' , 'trustedCa' , 'useProxyServer' ) else : tup_par = ( 'dnsResolver' , 'trustedCa' , 'useProxyServer' ) self . _meta_data [ 'required_creation_parameters' ] . update ( tup_par ) return self . _create ( ** kwargs )
def dumps ( self , obj ) : """Serializes obj to an avro - format byte array and returns it ."""
out = BytesIO ( ) try : self . dump ( obj , out ) return out . getvalue ( ) finally : out . close ( )
def circle_circle_intersection ( C_a , r_a , C_b , r_b ) : '''Finds the coordinates of the intersection points of two circles A and B . Circle center coordinates C _ a and C _ b , should be given as tuples ( or 1x2 arrays ) . Returns a 2x2 array result with result [ 0 ] being the first intersection point ( to the right of the vector C _ a - > C _ b ) and result [ 1 ] being the second intersection point . If there is a single intersection point , it is repeated in output . If there are no intersection points or an infinite number of those , None is returned . > > > circle _ circle _ intersection ( [ 0 , 0 ] , 1 , [ 1 , 0 ] , 1 ) # Two intersection points array ( [ [ 0.5 , - 0.866 . . . ] , [ 0.5 , 0.866 . . . ] ] ) > > > circle _ circle _ intersection ( [ 0 , 0 ] , 1 , [ 2 , 0 ] , 1 ) # Single intersection point ( circles touch from outside ) array ( [ [ 1 . , 0 . ] , [ 1 . , 0 . ] ] ) > > > circle _ circle _ intersection ( [ 0 , 0 ] , 1 , [ 0.5 , 0 ] , 0.5 ) # Single intersection point ( circles touch from inside ) array ( [ [ 1 . , 0 . ] , [ 1 . , 0 . ] ] ) > > > circle _ circle _ intersection ( [ 0 , 0 ] , 1 , [ 0 , 0 ] , 1 ) is None # Infinite number of intersections ( circles coincide ) True > > > circle _ circle _ intersection ( [ 0 , 0 ] , 1 , [ 0 , 0.1 ] , 0.8 ) is None # No intersections ( one circle inside another ) True > > > circle _ circle _ intersection ( [ 0 , 0 ] , 1 , [ 2.1 , 0 ] , 1 ) is None # No intersections ( one circle outside another ) True'''
C_a , C_b = np . asarray ( C_a , float ) , np . asarray ( C_b , float ) v_ab = C_b - C_a d_ab = np . linalg . norm ( v_ab ) if np . abs ( d_ab ) < tol : # No intersection points or infinitely many of them ( circle centers coincide ) return None cos_gamma = ( d_ab ** 2 + r_a ** 2 - r_b ** 2 ) / 2.0 / d_ab / r_a if abs ( cos_gamma ) > 1.0 + tol / 10 : # Allow for a tiny numeric tolerance here too ( always better to be return something instead of None , if possible ) return None # No intersection point ( circles do not touch ) if ( cos_gamma > 1.0 ) : cos_gamma = 1.0 if ( cos_gamma < - 1.0 ) : cos_gamma = - 1.0 sin_gamma = np . sqrt ( 1 - cos_gamma ** 2 ) u = v_ab / d_ab v = np . array ( [ - u [ 1 ] , u [ 0 ] ] ) pt1 = C_a + r_a * cos_gamma * u - r_a * sin_gamma * v pt2 = C_a + r_a * cos_gamma * u + r_a * sin_gamma * v return np . array ( [ pt1 , pt2 ] )
def guess_leb_size ( path ) : """Get LEB size from superblock Arguments : Str : path - - Path to file . Returns : Int - - LEB size . Searches file for superblock and retrieves leb size ."""
f = open ( path , 'rb' ) f . seek ( 0 , 2 ) file_size = f . tell ( ) + 1 f . seek ( 0 ) block_size = None for _ in range ( 0 , file_size , FILE_CHUNK_SZ ) : buf = f . read ( FILE_CHUNK_SZ ) for m in re . finditer ( UBIFS_NODE_MAGIC , buf ) : start = m . start ( ) chdr = nodes . common_hdr ( buf [ start : start + UBIFS_COMMON_HDR_SZ ] ) if chdr and chdr . node_type == UBIFS_SB_NODE : sb_start = start + UBIFS_COMMON_HDR_SZ sb_end = sb_start + UBIFS_SB_NODE_SZ if chdr . len != len ( buf [ sb_start : sb_end ] ) : f . seek ( sb_start ) buf = f . read ( UBIFS_SB_NODE_SZ ) else : buf = buf [ sb_start : sb_end ] sbn = nodes . sb_node ( buf ) block_size = sbn . leb_size f . close ( ) return block_size f . close ( ) return block_size
def printStatistics ( completion , concordance , tpedSamples , oldSamples , prefix ) : """Print the statistics in a file . : param completion : the completion of each duplicated samples . : param concordance : the concordance of each duplicated samples . : param tpedSamples : the updated position of the samples in the tped containing only duplicated samples . : param oldSamples : the original duplicated sample positions . : param prefix : the prefix of all the files . : type completion : : py : class : ` numpy . array ` : type concordance : dict : type tpedSamples : dict : type oldSamples : dict : type prefix : str : returns : the completion for each duplicated samples , as a : py : class : ` numpy . array ` . Prints the statistics ( completion of each samples and pairwise concordance between duplicated samples ) in a file ( ` ` prefix . summary ` ` ) ."""
# Compute the completion percentage on none zero values none_zero_indexes = np . where ( completion [ 1 ] != 0 ) completionPercentage = np . zeros ( len ( completion [ 0 ] ) , dtype = float ) completionPercentage [ none_zero_indexes ] = np . true_divide ( completion [ 0 , none_zero_indexes ] , completion [ 1 , none_zero_indexes ] , ) # The output file containing the summary statistics ( for each of the # duplicated samples , print the mean concordance and the completion ) . outputFile = None try : outputFile = open ( prefix + ".summary" , "w" ) except IOError : msg = "%(prefix)s.summary: can't write file" % locals ( ) raise ProgramError ( msg ) print >> outputFile , "\t" . join ( [ "origIndex" , "dupIndex" , "famID" , "indID" , "% completion" , "completion" , "mean concordance" ] ) for sampleID , indexes in tpedSamples . iteritems ( ) : for i , index in enumerate ( indexes ) : # The indexes toPrint = [ str ( oldSamples [ sampleID ] [ i ] + 1 ) , str ( index + 1 ) ] # The samples toPrint . extend ( list ( sampleID ) ) # The completion toPrint . append ( "%.8f" % completionPercentage [ index ] ) toPrint . append ( "%d/%d" % ( completion [ 0 ] [ index ] , completion [ 1 ] [ index ] ) ) # The concordance ( not on total values = 0) indexToKeep = list ( set ( range ( len ( indexes ) ) ) - set ( [ i ] ) ) values = np . ravel ( np . asarray ( concordance [ sampleID ] [ 0 ] [ i , indexToKeep ] ) ) total_values = np . ravel ( np . asarray ( concordance [ sampleID ] [ 1 ] [ i , indexToKeep ] ) ) currConcordance = np . zeros ( len ( indexToKeep ) , dtype = float ) none_zero_indexes = np . where ( total_values != 0 ) currConcordance [ none_zero_indexes ] = np . true_divide ( values [ none_zero_indexes ] , total_values [ none_zero_indexes ] , ) currConcordance = np . mean ( currConcordance ) toPrint . append ( "%.8f" % currConcordance ) print >> outputFile , "\t" . join ( toPrint ) # Closing the output file outputFile . close ( ) return completionPercentage
def make_table ( self ) : """Make numpy array from timeseries data ."""
num_records = int ( np . sum ( [ 1 for frame in self . timeseries ] ) ) dtype = [ ( "frame" , float ) , ( "time" , float ) , ( "proteinring" , list ) , ( "ligand_ring_ids" , list ) , ( "distance" , float ) , ( "angle" , float ) , ( "offset" , float ) , ( "type" , "|U4" ) , ( "resid" , int ) , ( "resname" , "|U4" ) , ( "segid" , "|U8" ) ] out = np . empty ( ( num_records , ) , dtype = dtype ) cursor = 0 for contact in self . timeseries : out [ cursor ] = ( contact . frame , contact . time , contact . proteinring , contact . ligandring , contact . distance , contact . angle , contact . offset , contact . type , contact . resid , contact . resname , contact . segid ) cursor += 1 return out . view ( np . recarray )
def get_unique_record ( self , sql , parameters = None , quiet = False , locked = False ) : '''I use this pattern a lot . Return the single record corresponding to the query .'''
results = self . execute_select ( sql , parameters = parameters , quiet = quiet , locked = locked ) assert ( len ( results ) == 1 ) return results [ 0 ]
def devices ( opts = [ ] ) : """Get list of all available devices including emulators : param opts : list command options ( e . g . [ " - r " , " - a " ] ) : return : result of _ exec _ command ( ) execution"""
adb_full_cmd = [ v . ADB_COMMAND_PREFIX , v . ADB_COMMAND_DEVICES , _convert_opts ( opts ) ] return _exec_command ( adb_full_cmd )
def _archive_tp_file_done_tasks ( self , taskpaperPath ) : """* archive tp file done tasks * * * Key Arguments : * * - ` ` taskpaperPath ` ` - - path to a taskpaper file * * Return : * * - None"""
self . log . info ( 'starting the ``_archive_tp_file_done_tasks`` method' ) self . log . info ( "archiving taskpaper file %(taskpaperPath)s" % locals ( ) ) taskLog = { } mdArchiveFile = taskpaperPath . replace ( ".taskpaper" , "-tasklog.md" ) exists = os . path . exists ( mdArchiveFile ) if exists : pathToReadFile = mdArchiveFile try : self . log . debug ( "attempting to open the file %s" % ( pathToReadFile , ) ) readFile = codecs . open ( pathToReadFile , encoding = 'utf-8' , mode = 'r' ) thisData = readFile . read ( ) readFile . close ( ) except IOError , e : message = 'could not open the file %s' % ( pathToReadFile , ) self . log . critical ( message ) raise IOError ( message ) readFile . close ( ) table = False for l in thisData . split ( "\n" ) : l = l . encode ( "utf-8" ) if ":---" in l : table = True continue if table == True and len ( l ) and l [ 0 ] == "|" : dictt = collections . OrderedDict ( sorted ( { } . items ( ) ) ) columns = l . split ( "|" ) dictt [ "task" ] = columns [ 1 ] . strip ( ) . decode ( "utf-8" ) dictt [ "completed" ] = columns [ 2 ] . strip ( ) . decode ( "utf-8" ) dictt [ "project" ] = columns [ 3 ] . strip ( ) . decode ( "utf-8" ) taskLog [ dictt [ "task" ] + dictt [ "completed" ] + dictt [ "project" ] ] = dictt doc = document ( taskpaperPath ) aProject = doc . get_project ( "Archive" ) if not aProject : return doneTasks = aProject . tagged_tasks ( "@done" ) for task in doneTasks : dateCompleted = "" project = "" for t in task . tags : if "done" in t : dateCompleted = t . replace ( "done" , "" ) . replace ( "(" , "" ) . replace ( ")" , "" ) if "project(" in t : project = t . replace ( "project" , "" ) . replace ( "(" , "" ) . replace ( ")" , "" ) dictt = collections . OrderedDict ( sorted ( { } . items ( ) ) ) notes = "" if task . notes : for n in task . notes : if len ( notes ) and notes [ - 2 : ] != ". " : if notes [ - 1 ] == "." : notes += " " else : notes += ". " notes += n . title if len ( notes ) : notes = "<br><br>**NOTES:**<br>" + "<br>" . join ( textwrap . wrap ( notes , 120 , break_long_words = True ) ) dictt [ "task" ] = "<br>" . join ( textwrap . wrap ( task . title [ 2 : ] , 120 , break_long_words = True ) ) + notes dictt [ "task" ] = dictt [ "task" ] . encode ( "utf-8" ) dictt [ "completed" ] = dateCompleted dictt [ "project" ] = project # SET ENCODE ERROR RETURN VALUE # RECODE INTO ASCII dictt [ "task" ] = dictt [ "task" ] . decode ( "utf-8" ) dictt [ "completed" ] = dictt [ "completed" ] . decode ( "utf-8" ) dictt [ "project" ] = dictt [ "project" ] . decode ( "utf-8" ) taskLog [ dictt [ "task" ] + dictt [ "completed" ] + dictt [ "project" ] ] = dictt taskLog = taskLog . values ( ) taskLog = sorted ( taskLog , key = itemgetter ( 'task' ) , reverse = True ) taskLog = sorted ( taskLog , key = itemgetter ( 'project' ) , reverse = True ) taskLog = sorted ( taskLog , key = itemgetter ( 'completed' ) , reverse = True ) dataSet = list_of_dictionaries ( log = self . log , listOfDictionaries = taskLog ) markdownData = dataSet . markdown ( filepath = None ) try : self . log . debug ( "attempting to open the file %s" % ( mdArchiveFile , ) ) writeFile = codecs . open ( mdArchiveFile , encoding = 'utf-8' , mode = 'w' ) except IOError , e : message = 'could not open the file %s' % ( mdArchiveFile , ) self . log . critical ( message ) raise IOError ( message ) writeFile . write ( markdownData . decode ( "utf-8" ) ) writeFile . close ( ) aProject . delete ( ) doc . save ( ) self . log . info ( 'completed the ``_archive_tp_file_done_tasks`` method' ) return None
def _handle_properties ( self , stmt : Statement , sctx : SchemaContext ) -> None : """Handle * * enum * * statements ."""
nextval = 0 for est in stmt . find_all ( "enum" ) : if not sctx . schema_data . if_features ( est , sctx . text_mid ) : continue label = est . argument vst = est . find1 ( "value" ) if vst : val = int ( vst . argument ) self . enum [ label ] = val if val > nextval : nextval = val else : self . enum [ label ] = nextval nextval += 1
def _name ( iris_obj , default = 'unknown' ) : """Mimicks ` iris _ obj . name ( ) ` but with different name resolution order . Similar to iris _ obj . name ( ) method , but using iris _ obj . var _ name first to enable roundtripping ."""
return ( iris_obj . var_name or iris_obj . standard_name or iris_obj . long_name or default )
def return_dat ( self , chan , begsam , endsam ) : """Return the data as 2D numpy . ndarray . Parameters chan : int or list index ( indices ) of the channels to read begsam : int index of the first sample endsam : int index of the last sample Returns numpy . ndarray A 2d matrix , with dimension chan X samples"""
TRL = 0 try : ft_data = loadmat ( self . filename , struct_as_record = True , squeeze_me = True ) ft_data = ft_data [ VAR ] data = ft_data [ 'trial' ] . item ( TRL ) except NotImplementedError : from h5py import File with File ( self . filename ) as f : data = f [ f [ VAR ] [ 'trial' ] [ TRL ] . item ( ) ] . value . T return data [ chan , begsam : endsam ]
def main ( ) : '''main entry'''
cli = docker . from_env ( ) try : opts , args = getopt . gnu_getopt ( sys . argv [ 1 : ] , "pcv" , [ "pretty" , "compose" ] ) except getopt . GetoptError as _ : print ( "Usage: docker-parse [--pretty|-p|--compose|-c] [containers]" ) sys . exit ( 2 ) if len ( args ) == 0 : containers = cli . containers . list ( all = True ) else : containers = map ( lambda nm : cli . containers . get ( nm ) , args ) as_compose = False pretty = False for opt , _ in opts : if opt == '-v' : print ( __version__ ) sys . exit ( ) elif opt == '-p' or opt == '--pretty' : pretty = True break elif opt == '-c' or opt == '--compose' : as_compose = True break for container in containers : info = container . attrs # diff with image info to reduce information image_info = cli . images . get ( info [ 'Config' ] [ 'Image' ] ) . attrs if as_compose : output_compose ( info , image_info ) else : output_command ( info , image_info , pretty )
def expand ( self , string ) : """Expand ."""
self . expanding = False empties = [ ] found_literal = False if string : i = iter ( StringIter ( string ) ) for x in self . get_literals ( next ( i ) , i , 0 ) : # We don ' t want to return trailing empty strings . # Store empty strings and output only when followed by a literal . if not x : empties . append ( x ) continue found_literal = True while empties : yield empties . pop ( 0 ) yield x empties = [ ] # We found no literals so return an empty string if not found_literal : yield ""
def get_class ( self , name ) : """Return a specific class : param name : the name of the class : rtype : a : class : ` ClassDefItem ` object"""
for i in self . classes . class_def : if i . get_name ( ) == name : return i return None
def command_gen ( tool_installations , tool_executable , args = None , node_paths = None ) : """Generate a Command object with required tools installed and paths set up . : param list tool _ installations : A list of functions to install required tools . Those functions should take no parameter and return an installation path to be included in the runtime path . : param tool _ executable : Name of the tool to be executed . : param list args : A list of arguments to be passed to the executable : param list node _ paths : A list of path to node _ modules . node _ modules / . bin will be appended to the run time path . : rtype : class : ` Command `"""
node_module_bin_dir = 'node_modules/.bin' extra_paths = [ ] for t in tool_installations : # Calling tool _ installation [ i ] ( ) triggers installation if tool is not installed extra_paths . append ( t ( ) ) if node_paths : for node_path in node_paths : if not node_path . endswith ( node_module_bin_dir ) : node_path = os . path . join ( node_path , node_module_bin_dir ) extra_paths . append ( node_path ) return Command ( executable = tool_executable , args = args , extra_paths = extra_paths )
def fetch_token ( self , code , state ) : """Fetch the token , using the verification code . Also , make sure the state received in the response matches the one in the request . Returns the access _ token ."""
if self . state != state : raise MismatchingStateError ( ) self . token = self . oauth . fetch_token ( '%saccess_token/' % OAUTH_URL , code = code , client_secret = self . client_secret ) return self . token [ 'access_token' ]
def CreateJob ( self , cron_args = None , job_id = None , enabled = True , token = None ) : """Creates a cron job that runs given flow with a given frequency . Args : cron _ args : A protobuf of type rdf _ cronjobs . CreateCronJobArgs . job _ id : Use this job _ id instead of an autogenerated unique name ( used for system cron jobs - we want them to have well - defined persistent name ) . enabled : If False , the job object will be created , but will be disabled . token : Security token used for data store access . Unused . Returns : URN of the cron job created . Raises : ValueError : This function expects an arg protobuf that starts a CreateAndRunGenericHuntFlow flow . If the args specify something else , ValueError is raised ."""
# TODO ( amoser ) : Remove the token from this method once the aff4 # cronjobs are gone . del token if not job_id : uid = random . UInt16 ( ) job_id = "%s_%s" % ( cron_args . flow_name , uid ) args = rdf_cronjobs . CronJobAction ( action_type = rdf_cronjobs . CronJobAction . ActionType . HUNT_CRON_ACTION , hunt_cron_action = rdf_cronjobs . HuntCronAction ( flow_name = cron_args . flow_name , flow_args = cron_args . flow_args , hunt_runner_args = cron_args . hunt_runner_args ) ) job = rdf_cronjobs . CronJob ( cron_job_id = job_id , description = cron_args . description , frequency = cron_args . frequency , lifetime = cron_args . lifetime , allow_overruns = cron_args . allow_overruns , args = args , enabled = enabled ) data_store . REL_DB . WriteCronJob ( job ) return job_id
async def _incoming_from_rtm ( self , url : str , bot_id : str ) -> AsyncIterator [ events . Event ] : """Connect and discard incoming RTM event if necessary . : param url : Websocket url : param bot _ id : Bot ID : return : Incoming events"""
async for data in self . _rtm ( url ) : event = events . Event . from_rtm ( json . loads ( data ) ) if sansio . need_reconnect ( event ) : break elif sansio . discard_event ( event , bot_id ) : continue else : yield event
def request ( self , response_queue , payload , timeout_s = None , poll = POLL_QUEUES ) : '''Send Parameters device : serial . Serial Serial instance . response _ queue : Queue . Queue Queue to wait for response on . payload : str or bytes Payload to send . timeout _ s : float , optional Maximum time to wait ( in seconds ) for response . By default , block until response is ready . poll : bool , optional If ` ` True ` ` , poll response queue in a busy loop until response is ready ( or timeout occurs ) . Polling is much more processor intensive , but ( at least on Windows ) results in faster response processing . On Windows , polling is enabled by default .'''
self . connected . wait ( timeout_s ) return request ( self , response_queue , payload , timeout_s = timeout_s , poll = poll )
def start ( self ) : """Initialize websockets , say hello , and start listening for events"""
self . connect ( ) if not self . isAlive ( ) : super ( WAMPClient , self ) . start ( ) self . hello ( ) return self
def evaluate ( self , verbose = False , decode = True , passes = None , num_threads = 1 , apply_experimental = True ) : """Evaluates by creating an Index containing evaluated data . See ` LazyResult ` Returns Index Index with evaluated data ."""
evaluated_data = super ( Index , self ) . evaluate ( verbose , decode , passes , num_threads , apply_experimental ) return Index ( evaluated_data , self . dtype , self . name )
def write_file ( self , blob , dest ) : '''Using the blob object , write the file to the destination path'''
with salt . utils . files . fopen ( dest , 'wb+' ) as fp_ : fp_ . write ( blob . data )
def SetTimelineOwner ( self , username ) : """Sets the username of the user that should own the timeline . Args : username ( str ) : username ."""
self . _timeline_owner = username logger . info ( 'Owner of the timeline: {0!s}' . format ( self . _timeline_owner ) )
def removereadergroup ( self , group ) : """Remove a reader group"""
hresult , hcontext = SCardEstablishContext ( SCARD_SCOPE_USER ) if 0 != hresult : raise error ( 'Failed to establish context: ' + SCardGetErrorMessage ( hresult ) ) try : hresult = SCardForgetReaderGroup ( hcontext , group ) if hresult != 0 : raise error ( 'Unable to forget reader group: ' + SCardGetErrorMessage ( hresult ) ) else : innerreadergroups . removereadergroup ( self , group ) finally : hresult = SCardReleaseContext ( hcontext ) if 0 != hresult : raise error ( 'Failed to release context: ' + SCardGetErrorMessage ( hresult ) )
def match ( self , other , psd = None , low_frequency_cutoff = None , high_frequency_cutoff = None ) : """Return the match between the two TimeSeries or FrequencySeries . Return the match between two waveforms . This is equivelant to the overlap maximized over time and phase . By default , the other vector will be resized to match self . This may remove high frequency content or the end of the vector . Parameters other : TimeSeries or FrequencySeries The input vector containing a waveform . psd : Frequency Series A power spectral density to weight the overlap . low _ frequency _ cutoff : { None , float } , optional The frequency to begin the match . high _ frequency _ cutoff : { None , float } , optional The frequency to stop the match . Returns match : float index : int The number of samples to shift to get the match ."""
return self . to_frequencyseries ( ) . match ( other , psd = psd , low_frequency_cutoff = low_frequency_cutoff , high_frequency_cutoff = high_frequency_cutoff )
def check_constraint_convergence ( X , L , LX , Z , U , R , S , step_f , step_g , e_rel , e_abs ) : """Calculate if all constraints have converged . Using the stopping criteria from Boyd 2011 , Sec 3.3.1 , calculate whether the variables for each constraint have converged ."""
if isinstance ( L , list ) : M = len ( L ) convergence = True errors = [ ] # recursive call for i in range ( M ) : c , e = check_constraint_convergence ( X , L [ i ] , LX [ i ] , Z [ i ] , U [ i ] , R [ i ] , S [ i ] , step_f , step_g [ i ] , e_rel , e_abs ) convergence &= c errors . append ( e ) return convergence , errors else : # check convergence of prime residual R and dual residual S e_pri , e_dual = get_variable_errors ( X , L , LX , Z , U , step_g , e_rel , e_abs ) lR = l2 ( R ) lS = l2 ( S ) convergence = ( lR <= e_pri ) and ( lS <= e_dual ) return convergence , ( e_pri , e_dual , lR , lS )
def filter_grounded_only ( ) : """Filter to grounded Statements only ."""
if request . method == 'OPTIONS' : return { } response = request . body . read ( ) . decode ( 'utf-8' ) body = json . loads ( response ) stmts_json = body . get ( 'statements' ) score_threshold = body . get ( 'score_threshold' ) if score_threshold is not None : score_threshold = float ( score_threshold ) stmts = stmts_from_json ( stmts_json ) stmts_out = ac . filter_grounded_only ( stmts , score_threshold = score_threshold ) return _return_stmts ( stmts_out )
def from_Composition ( composition ) : """Return the LilyPond equivalent of a Composition in a string ."""
# warning Throw exception if not hasattr ( composition , 'tracks' ) : return False result = '\\header { title = "%s" composer = "%s" opus = "%s" } ' % ( composition . title , composition . author , composition . subtitle ) for track in composition . tracks : result += from_Track ( track ) + ' ' return result [ : - 1 ]
def download_source_dists ( self , arguments , use_wheels = False ) : """Download missing source distributions . : param arguments : The command line arguments to ` ` pip install . . . ` ` ( a list of strings ) . : param use _ wheels : Whether pip and pip - accel are allowed to use wheels _ ( : data : ` False ` by default for backwards compatibility with callers that use pip - accel as a Python API ) . : raises : Any exceptions raised by pip ."""
download_timer = Timer ( ) logger . info ( "Downloading missing distribution(s) .." ) requirements = self . get_pip_requirement_set ( arguments , use_remote_index = True , use_wheels = use_wheels ) logger . info ( "Finished downloading distribution(s) in %s." , download_timer ) return requirements
def nla_len ( self , value ) : """Length setter ."""
self . bytearray [ self . _get_slicers ( 0 ) ] = bytearray ( c_uint16 ( value or 0 ) )
def database ( state , host , name , # Desired database settings present = True , collate = None , charset = None , user = None , user_hostname = 'localhost' , user_privileges = 'ALL' , # Details for speaking to MySQL via ` mysql ` CLI mysql_user = None , mysql_password = None , mysql_host = None , mysql_port = None , ) : '''Add / remove MySQL databases . + name : the name of the database + present : whether the database should exist or not + collate : the collate to use when creating the database + charset : the charset to use when creating the database + user : MySQL user to grant privileges on this database to + user _ hostname : the hostname of the MySQL user to grant + user _ privileges : privileges to grant to any specified user + mysql _ * : global module arguments , see above Collate / charset : these will only be applied if the database does not exist - ie pyinfra will not attempt to alter the existing databases collate / character sets .'''
current_databases = host . fact . mysql_databases ( mysql_user , mysql_password , mysql_host , mysql_port , ) is_present = name in current_databases if not present : if is_present : yield make_execute_mysql_command ( 'DROP DATABASE {0}' . format ( name ) , user = mysql_user , password = mysql_password , host = mysql_host , port = mysql_port , ) return # We want the database but it doesn ' t exist if present and not is_present : sql_bits = [ 'CREATE DATABASE {0}' . format ( name ) ] if collate : sql_bits . append ( 'COLLATE {0}' . format ( collate ) ) if charset : sql_bits . append ( 'CHARSET {0}' . format ( charset ) ) yield make_execute_mysql_command ( ' ' . join ( sql_bits ) , user = mysql_user , password = mysql_password , host = mysql_host , port = mysql_port , ) # Ensure any user privileges for this database if user and user_privileges : yield privileges ( state , host , user , user_hostname = user_hostname , privileges = user_privileges , database = name , )
def _get_session_timeout_seconds ( cls , session_server ) : """: type session _ server : core . SessionServer : rtype : int"""
if session_server . user_company is not None : return session_server . user_company . session_timeout elif session_server . user_person is not None : return session_server . user_person . session_timeout elif session_server . user_api_key is not None : return session_server . user_api_key . requested_by_user . get_referenced_object ( ) . session_timeout else : raise BunqException ( )
def addObject ( self , featureIndices , name = None ) : """Adds a sequence ( list of feature indices ) to the Machine ."""
if name is None : name = len ( self . objects ) sequence = [ ] for f in featureIndices : sequence += [ ( 0 , f , ) ] self . objects [ name ] = sequence
def changeNickname ( self , nickname , user_id , thread_id = None , thread_type = ThreadType . USER ) : """Changes the nickname of a user in a thread : param nickname : New nickname : param user _ id : User that will have their nickname changed : param thread _ id : User / Group ID to change color of . See : ref : ` intro _ threads ` : param thread _ type : See : ref : ` intro _ threads ` : type thread _ type : models . ThreadType : raises : FBchatException if request failed"""
thread_id , thread_type = self . _getThread ( thread_id , thread_type ) data = { "nickname" : nickname , "participant_id" : user_id , "thread_or_other_fbid" : thread_id , } j = self . _post ( self . req_url . THREAD_NICKNAME , data , fix_request = True , as_json = True )
def initialize ( self , training_info , model , environment , device ) : """Initialize policy gradient from reinforcer settings"""
self . target_model = self . model_factory . instantiate ( action_space = environment . action_space ) . to ( device ) self . target_model . load_state_dict ( model . state_dict ( ) ) self . target_model . eval ( ) histogram_info = model . histogram_info ( ) self . vmin = histogram_info [ 'vmin' ] self . vmax = histogram_info [ 'vmax' ] self . num_atoms = histogram_info [ 'num_atoms' ] self . support_atoms = histogram_info [ 'support_atoms' ] self . atom_delta = histogram_info [ 'atom_delta' ]
def ltcube ( self , ** kwargs ) : """return the name of a livetime cube file"""
kwargs_copy = self . base_dict . copy ( ) kwargs_copy . update ( ** kwargs ) kwargs_copy [ 'dataset' ] = kwargs . get ( 'dataset' , self . dataset ( ** kwargs ) ) localpath = NameFactory . ltcube_format . format ( ** kwargs_copy ) if kwargs . get ( 'fullpath' , False ) : return self . fullpath ( localpath = localpath ) return localpath
def _get_file_size ( file_path ) : """Returns the size of the file at the specified file path , formatted as a 4 - byte unsigned integer bytearray ."""
size = getsize ( file_path ) file_size = bytearray ( 4 ) pack_into ( b"I" , file_size , 0 , size ) return file_size
def new ( self , boot_system_id ) : # type : ( bytes ) - > None '''A method to create a new Boot Record . Parameters : boot _ system _ id - The system identifier to associate with this Boot Record . Returns : Nothing .'''
if self . _initialized : raise Exception ( 'Boot Record already initialized' ) self . boot_system_identifier = boot_system_id . ljust ( 32 , b'\x00' ) self . boot_identifier = b'\x00' * 32 self . boot_system_use = b'\x00' * 197 # This will be set later self . _initialized = True
def is_sw_readable ( self ) : """Field is readable by software"""
sw = self . get_property ( 'sw' ) return sw in ( rdltypes . AccessType . rw , rdltypes . AccessType . rw1 , rdltypes . AccessType . r )
def return_hdr ( self ) : """Return the header for further use . Returns subj _ id : str subject identification code start _ time : datetime start time of the dataset s _ freq : float sampling frequency chan _ name : list of str list of all the channels n _ samples : int number of samples in the dataset orig : dict additional information taken directly from the header"""
subj_id = self . _header [ 'name' ] + ' ' + self . _header [ 'surname' ] chan_name = [ ch [ 'chan_name' ] for ch in self . _header [ 'chans' ] ] return subj_id , self . _header [ 'start_time' ] , self . _header [ 's_freq' ] , chan_name , self . _n_smp , self . _header
def connect ( self ) : """Connect to vCenter server"""
try : context = ssl . SSLContext ( ssl . PROTOCOL_TLSv1_2 ) if self . config [ 'no_ssl_verify' ] : requests . packages . urllib3 . disable_warnings ( ) context . verify_mode = ssl . CERT_NONE self . si = SmartConnectNoSSL ( host = self . config [ 'server' ] , user = self . config [ 'username' ] , pwd = self . config [ 'password' ] , port = int ( self . config [ 'port' ] ) , certFile = None , keyFile = None , ) else : self . si = SmartConnect ( host = self . config [ 'server' ] , user = self . config [ 'username' ] , pwd = self . config [ 'password' ] , port = int ( self . config [ 'port' ] ) , sslContext = context , certFile = None , keyFile = None , ) except Exception as e : print ( 'Unable to connect to vsphere server.' ) print ( e ) sys . exit ( 1 ) # add a clean up routine atexit . register ( Disconnect , self . si ) self . content = self . si . RetrieveContent ( )
def get_hidden_signups ( self ) : """Return a list of Users who are * not * in the All Students list but have signed up for an activity . This is usually a list of signups for z - Withdrawn from TJ"""
return EighthSignup . objects . filter ( scheduled_activity__block = self ) . exclude ( user__in = User . objects . get_students ( ) )
def num_mode_groups ( self ) : """Most devices only provide a single mode group , however devices such as the Wacom Cintiq 22HD provide two mode groups . If multiple mode groups are available , a caller should use : meth : ` ~ libinput . define . TabletPadModeGroup . has _ button ` , : meth : ` ~ libinput . define . TabletPadModeGroup . has _ ring ` and : meth : ` ~ libinput . define . TabletPadModeGroup . has _ strip ` to associate each button , ring and strip with the correct mode group . Returns : int : The number of mode groups available on this device . Raises : AttributeError"""
num = self . _libinput . libinput_device_tablet_pad_get_num_mode_groups ( self . _handle ) if num < 0 : raise AttributeError ( 'This device is not a tablet pad device' ) return num
def resize ( self , package ) : """POST / : login / machines / : id ? action = resize Initiate resizing of the remote machine to a new package ."""
if isinstance ( package , dict ) : package = package [ 'name' ] action = { 'action' : 'resize' , 'package' : package } j , r = self . datacenter . request ( 'POST' , self . path , params = action ) r . raise_for_status ( )
def compare_changes ( obj , ** kwargs ) : '''Compare two dicts returning only keys that exist in the first dict and are different in the second one'''
changes = { } for key , value in obj . items ( ) : if key in kwargs : if value != kwargs [ key ] : changes [ key ] = kwargs [ key ] return changes
def get_active_terms_ids ( ) : """Returns a list of the IDs of of all terms and conditions"""
active_terms_ids = cache . get ( 'tandc.active_terms_ids' ) if active_terms_ids is None : active_terms_dict = { } active_terms_ids = [ ] active_terms_set = TermsAndConditions . objects . filter ( date_active__isnull = False , date_active__lte = timezone . now ( ) ) . order_by ( 'date_active' ) for active_terms in active_terms_set : active_terms_dict [ active_terms . slug ] = active_terms . id active_terms_dict = OrderedDict ( sorted ( active_terms_dict . items ( ) , key = lambda t : t [ 0 ] ) ) for terms in active_terms_dict : active_terms_ids . append ( active_terms_dict [ terms ] ) cache . set ( 'tandc.active_terms_ids' , active_terms_ids , TERMS_CACHE_SECONDS ) return active_terms_ids
def _next_ontology ( self ) : """Dynamically retrieves the next ontology in the list"""
currentfile = self . current [ 'file' ] try : idx = self . all_ontologies . index ( currentfile ) return self . all_ontologies [ idx + 1 ] except : return self . all_ontologies [ 0 ]
def get ( cls , parent , name ) : """Get an instance matching the parent and name"""
return cls . query . filter_by ( parent = parent , name = name ) . one_or_none ( )
def create ( cls , interface_id , logical_interface_ref , second_interface_id = None , zone_ref = None , ** kwargs ) : """: param str interface _ id : two interfaces , i . e . ' 1-2 ' , ' 4-5 ' , ' 7-10 ' , etc : param str logical _ ref : logical interface reference : param str zone _ ref : reference to zone , set on second inline pair : rtype : dict"""
data = { 'inspect_unspecified_vlans' : True , 'nicid' : '{}-{}' . format ( str ( interface_id ) , str ( second_interface_id ) ) if second_interface_id else str ( interface_id ) , 'logical_interface_ref' : logical_interface_ref , 'zone_ref' : zone_ref } for k , v in kwargs . items ( ) : data . update ( { k : v } ) return cls ( data )
def sort_by_distance ( self , ps : Union [ "Units" , List [ "Point2" ] ] ) -> List [ "Point2" ] : """This returns the target points sorted as list . You should not pass a set or dict since those are not sortable . If you want to sort your units towards a point , use ' units . sorted _ by _ distance _ to ( point ) ' instead ."""
if len ( ps ) == 1 : return ps [ 0 ] # if ps and all ( isinstance ( p , Point2 ) for p in ps ) : # return sorted ( ps , key = lambda p : self . _ distance _ squared ( p ) ) return sorted ( ps , key = lambda p : self . _distance_squared ( p . position ) )
def eval_objfn ( self ) : """Compute components of regularisation function as well as total contribution to objective function ."""
dfd = self . obfn_g0 ( self . obfn_g0var ( ) ) cns = self . obfn_g1 ( self . obfn_g1var ( ) ) return ( dfd , cns )
def assertEqual ( first , second , message = None ) : """Assert that first equals second . : param first : First part to evaluate : param second : Second part to evaluate : param message : Failure message : raises : TestStepFail if not first = = second"""
if not first == second : raise TestStepFail ( format_message ( message ) if message is not None else "Assert: %s != %s" % ( str ( first ) , str ( second ) ) )
def timeline ( self ) : """Get timeline , reloading the site if needed ."""
rev = int ( self . db . get ( 'site:rev' ) ) if rev != self . revision : self . reload_site ( ) return self . _timeline
def _latex_ ( self ) : r"""The representation routine for transitions . > > > g1 = State ( " Cs " , 133 , 6 , 0 , 1 / Integer ( 2 ) , 3) > > > g2 = State ( " Cs " , 133 , 6 , 0 , 1 / Integer ( 2 ) , 4) > > > Transition ( g2 , g1 ) . _ latex _ ( ) ' ^ { 133 } \ \ mathrm { Cs } \ \ 6S _ { 1/2 } ^ { 4 } \ \ \ \ nrightarrow \ \ ^ { 133 } \ \ mathrm { Cs } \ \ 6S _ { 1/2 } ^ { 3 } '"""
if self . allowed : return self . e1 . _latex_ ( ) + '\\ \\rightarrow \\ ' + self . e2 . _latex_ ( ) elif not self . allowed : return self . e1 . _latex_ ( ) + '\\ \\nrightarrow \\ ' + self . e2 . _latex_ ( ) else : return self . e1 . _latex_ ( ) + '\\ \\rightarrow^? \\ ' + self . e2 . _latex_ ( ) return self . e1 . _latex_ ( ) + '\\ \\nleftrightarrow \\ ' + self . e2 . _latex_ ( )
def delete_mapping ( self , index , doc_type ) : """Delete a typed JSON document type from a specific index . ( See : ref : ` es - guide - reference - api - admin - indices - delete - mapping ` )"""
path = make_path ( index , doc_type ) return self . conn . _send_request ( 'DELETE' , path )
def _transform_index ( index , func , level = None ) : """Apply function to all values found in index . This includes transforming multiindex entries separately . Only apply function to one level of the MultiIndex if level is specified ."""
if isinstance ( index , MultiIndex ) : if level is not None : items = [ tuple ( func ( y ) if i == level else y for i , y in enumerate ( x ) ) for x in index ] else : items = [ tuple ( func ( y ) for y in x ) for x in index ] return MultiIndex . from_tuples ( items , names = index . names ) else : items = [ func ( x ) for x in index ] return Index ( items , name = index . name , tupleize_cols = False )
def _create_prefix_notification ( outgoing_msg , rpc_session ) : """Constructs prefix notification with data from given outgoing message . Given RPC session is used to create RPC notification message ."""
assert outgoing_msg path = outgoing_msg . path assert path vpn_nlri = path . nlri assert path . source is not None if path . source != VRF_TABLE : # Extract relevant info for update - add / update - delete . params = [ { ROUTE_DISTINGUISHER : outgoing_msg . route_dist , PREFIX : vpn_nlri . prefix , NEXT_HOP : path . nexthop , VRF_RF : VrfConf . rf_2_vrf_rf ( path . route_family ) } ] if path . nlri . ROUTE_FAMILY . safi not in ( subaddr_family . IP_FLOWSPEC , subaddr_family . VPN_FLOWSPEC ) : params [ VPN_LABEL ] = path . label_list [ 0 ] if not path . is_withdraw : # Create notification to NetworkController . rpc_msg = rpc_session . create_notification ( NOTIFICATION_ADD_REMOTE_PREFIX , params ) else : # Create update - delete request to NetworkController . rpc_msg = rpc_session . create_notification ( NOTIFICATION_DELETE_REMOTE_PREFIX , params ) else : # Extract relevant info for update - add / update - delete . params = [ { ROUTE_DISTINGUISHER : outgoing_msg . route_dist , PREFIX : vpn_nlri . prefix , NEXT_HOP : path . nexthop , VRF_RF : VrfConf . rf_2_vrf_rf ( path . route_family ) , ORIGIN_RD : path . origin_rd } ] if not path . is_withdraw : # Create notification to NetworkController . rpc_msg = rpc_session . create_notification ( NOTIFICATION_ADD_LOCAL_PREFIX , params ) else : # Create update - delete request to NetworkController . rpc_msg = rpc_session . create_notification ( NOTIFICATION_DELETE_LOCAL_PREFIX , params ) return rpc_msg
def goback ( self , days = 1 ) : """Go back days 刪除最新天數資料數據 days 代表刪除多少天數 ( 倒退幾天 )"""
for i in xrange ( days ) : self . raw_data . pop ( ) self . data_date . pop ( ) self . stock_range . pop ( ) self . stock_vol . pop ( ) self . stock_open . pop ( ) self . stock_h . pop ( ) self . stock_l . pop ( )
def create_prj_browser ( self , ) : """Create the project browser This creates a combobox brower for projects and adds it to the ui : returns : the created combo box browser : rtype : : class : ` jukeboxcore . gui . widgets . browser . ComboBoxBrowser ` : raises : None"""
prjbrws = ComboBoxBrowser ( 1 , headers = [ 'Project:' ] ) self . central_vbox . insertWidget ( 0 , prjbrws ) return prjbrws
def from_hsv ( h , s , v , alpha = 1.0 , wref = _DEFAULT_WREF ) : """Create a new instance based on the specifed HSV values . Parameters : The Hus component value [ 0 . . . 1] The Saturation component value [ 0 . . . 1] The Value component [ 0 . . . 1] : alpha : The color transparency [ 0 . . . 1 ] , default is opaque : wref : The whitepoint reference , default is 2 ° D65. Returns : A grapefruit . Color instance . > > > Color . from _ hsv ( 30 , 1 , 1) Color ( 1.0 , 0.5 , 0.0 , 1.0) > > > Color . from _ hsv ( 30 , 1 , 1 , 0.5) Color ( 1.0 , 0.5 , 0.0 , 0.5)"""
h2 , s , l = rgb_to_hsl ( * hsv_to_rgb ( h , s , v ) ) return Color ( ( h , s , l ) , 'hsl' , alpha , wref )
def recordtype ( typename , field_names , verbose = False , ** default_kwds ) : '''Returns a new class with named fields . @ keyword field _ defaults : A mapping from ( a subset of ) field names to default values . @ keyword default : If provided , the default value for all fields without an explicit default in ` field _ defaults ` . > > > Point = recordtype ( ' Point ' , ' x y ' , default = 0) > > > Point . _ _ doc _ _ # docstring for the new class ' Point ( x , y ) ' > > > Point ( ) # instantiate with defaults Point ( x = 0 , y = 0) > > > p = Point ( 11 , y = 22 ) # instantiate with positional args or keywords > > > p [ 0 ] + p . y # accessible by name and index 33 > > > p . x = 100 ; p [ 1 ] = 200 # modifiable by name and index Point ( x = 100 , y = 200) > > > x , y = p # unpack > > > x , y (100 , 200) > > > d = p . todict ( ) # convert to a dictionary > > > d [ ' x ' ] 100 > > > Point ( * * d ) = = p # convert from a dictionary True'''
# Parse and validate the field names . Validation serves two purposes , # generating informative error messages and preventing template injection attacks . if isinstance ( field_names , str ) : # names separated by whitespace and / or commas field_names = field_names . replace ( ',' , ' ' ) . split ( ) field_names = tuple ( map ( str , field_names ) ) if not field_names : raise ValueError ( 'Records must have at least one field' ) for name in ( typename , ) + field_names : if not min ( c . isalnum ( ) or c == '_' for c in name ) : raise ValueError ( 'Type names and field names can only contain ' 'alphanumeric characters and underscores: %r' % name ) if iskeyword ( name ) : raise ValueError ( 'Type names and field names cannot be a keyword: %r' % name ) if name [ 0 ] . isdigit ( ) : raise ValueError ( 'Type names and field names cannot start with a ' 'number: %r' % name ) seen_names = set ( ) for name in field_names : if name . startswith ( '_' ) : raise ValueError ( 'Field names cannot start with an underscore: %r' % name ) if name in seen_names : raise ValueError ( 'Encountered duplicate field name: %r' % name ) seen_names . add ( name ) # determine the func _ defaults of _ _ init _ _ field_defaults = default_kwds . pop ( 'field_defaults' , { } ) if 'default' in default_kwds : default = default_kwds . pop ( 'default' ) init_defaults = tuple ( field_defaults . get ( f , default ) for f in field_names ) elif not field_defaults : init_defaults = None else : default_fields = field_names [ - len ( field_defaults ) : ] if set ( default_fields ) != set ( field_defaults ) : raise ValueError ( 'Missing default parameter values' ) init_defaults = tuple ( field_defaults [ f ] for f in default_fields ) if default_kwds : raise ValueError ( 'Invalid keyword arguments: %s' % default_kwds ) # Create and fill - in the class template numfields = len ( field_names ) argtxt = ', ' . join ( field_names ) reprtxt = ', ' . join ( '%s=%%r' % f for f in field_names ) dicttxt = ', ' . join ( '%r: self.%s' % ( f , f ) for f in field_names ) tupletxt = repr ( tuple ( 'self.%s' % f for f in field_names ) ) . replace ( "'" , '' ) inittxt = '; ' . join ( 'self.%s=%s' % ( f , f ) for f in field_names ) itertxt = '; ' . join ( 'yield self.%s' % f for f in field_names ) eqtxt = ' and ' . join ( 'self.%s==other.%s' % ( f , f ) for f in field_names ) template = dedent ( ''' class %(typename)s(object): '%(typename)s(%(argtxt)s)' __slots__ = %(field_names)r def __init__(self, %(argtxt)s): %(inittxt)s def __len__(self): return %(numfields)d def __iter__(self): %(itertxt)s def __getitem__(self, index): return getattr(self, self.__slots__[index]) def __setitem__(self, index, value): return setattr(self, self.__slots__[index], value) def todict(self): 'Return a new dict which maps field names to their values' return {%(dicttxt)s} def __repr__(self): return '%(typename)s(%(reprtxt)s)' %% %(tupletxt)s def __eq__(self, other): return isinstance(other, self.__class__) and %(eqtxt)s def __ne__(self, other): return not self==other def __getstate__(self): return %(tupletxt)s def __setstate__(self, state): %(tupletxt)s = state ''' ) % locals ( ) # Execute the template string in a temporary namespace namespace = { } try : exec ( template , namespace ) if verbose : print ( template ) except SyntaxError as e : raise SyntaxError ( e . message + ':\n' + template ) cls = namespace [ typename ] if sys . version_info . major == 3 : cls . __init__ . __defaults__ = init_defaults elif sys . version_info . major == 2 : cls . __init__ . im_func . func_defaults = init_defaults else : # import pdb # pdb . set _ trace ( ) assert 0 # For pickling to work , the _ _ module _ _ variable needs to be set to the frame # where the named tuple is created . Bypass this step in enviroments where # sys . _ getframe is not defined ( Jython for example ) . if hasattr ( sys , '_getframe' ) and sys . platform != 'cli' : cls . __module__ = sys . _getframe ( 1 ) . f_globals [ '__name__' ] return cls
def edge2geoff ( from_node , to_node , properties , edge_relationship_name , encoder ) : """converts a NetworkX edge into a Geoff string . Parameters from _ node : str or int the ID of a NetworkX source node to _ node : str or int the ID of a NetworkX target node properties : dict a dictionary of edge attributes edge _ relationship _ name : str string that describes the relationship between the two nodes encoder : json . JSONEncoder an instance of a JSON encoder ( e . g . ` json . JSONEncoder ` ) Returns geoff : str a Geoff string"""
edge_string = None if properties : args = [ from_node , edge_relationship_name , encoder . encode ( properties ) , to_node ] edge_string = '({0})-[:{1} {2}]->({3})' . format ( * args ) else : args = [ from_node , edge_relationship_name , to_node ] edge_string = '({0})-[:{1}]->({2})' . format ( * args ) return edge_string
def unpickle_stats ( stats ) : """Unpickle a pstats . Stats object"""
stats = cPickle . loads ( stats ) stats . stream = True return stats
def strip_encoding_cookie ( filelike ) : """Generator to pull lines from a text - mode file , skipping the encoding cookie if it is found in the first two lines ."""
it = iter ( filelike ) try : first = next ( it ) if not cookie_comment_re . match ( first ) : yield first second = next ( it ) if not cookie_comment_re . match ( second ) : yield second except StopIteration : return for line in it : yield line