signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def deprecated_function ( func , warning = DEPRECATED_FUNCTION_WARNING ) : """Adds a ` DeprecationWarning ` to a function Parameters func : ` callable ` the function to decorate with a ` DeprecationWarning ` warning : ` str ` , optional the warning to present Notes The final warning message is formatted as ` ` warning . format ( func ) ` ` so you can use attribute references to the function itself . See the default message as an example ."""
@ wraps ( func ) def wrapped_func ( * args , ** kwargs ) : warnings . warn ( DEPRECATED_FUNCTION_WARNING . format ( func ) , category = DeprecationWarning , stacklevel = 2 , ) return func ( * args , ** kwargs ) return wrapped_func
def get_web_auth_session_key ( self , url , token = "" ) : """Retrieves the session key of a web authorization process by its URL ."""
session_key , _username = self . get_web_auth_session_key_username ( url , token ) return session_key
def is_amex ( n ) : """Checks if credit card number fits the american express format ."""
n , length = str ( n ) , len ( str ( n ) ) if length == 15 : if n [ 0 ] == '3' and ( n [ 1 ] == '4' or n [ 1 ] == '7' ) : return True return False
def disconnect ( self , code ) : """Called when WebSocket connection is closed ."""
Subscriber . objects . filter ( session_id = self . session_id ) . delete ( )
def date_this_month ( self , before_today = True , after_today = False ) : """Gets a Date object for the current month . : param before _ today : include days in current month before today : param after _ today : include days in current month after today : param tzinfo : timezone , instance of datetime . tzinfo subclass : example DateTime ( ' 2012-04-04 11:02:02 ' ) : return DateTime"""
today = date . today ( ) this_month_start = today . replace ( day = 1 ) next_month_start = this_month_start + relativedelta . relativedelta ( months = 1 ) if before_today and after_today : return self . date_between_dates ( this_month_start , next_month_start ) elif not before_today and after_today : return self . date_between_dates ( today , next_month_start ) elif not after_today and before_today : return self . date_between_dates ( this_month_start , today ) else : return today
def format_expose ( expose ) : """Converts a port number or multiple port numbers , as used in the Dockerfile ` ` EXPOSE ` ` command , to a tuple . : param : Port numbers , can be as integer , string , or a list / tuple of those . : type expose : int | unicode | str | list | tuple : return : A tuple , to be separated by spaces before inserting in a Dockerfile . : rtype : tuple"""
if isinstance ( expose , six . string_types ) : return expose , elif isinstance ( expose , collections . Iterable ) : return map ( six . text_type , expose ) return six . text_type ( expose ) ,
def _on_decisions_event ( self , event = None , ** kwargs ) : """Called when an Event is received on the decisions channel . Saves the value in group _ decisions . If num _ subperiods is None , immediately broadcasts the event back out on the group _ decisions channel ."""
if not self . ran_ready_function : logger . warning ( 'ignoring decision from {} before when_all_players_ready: {}' . format ( event . participant . code , event . value ) ) return with track ( '_on_decisions_event' ) : self . group_decisions [ event . participant . code ] = event . value self . _group_decisions_updated = True self . save ( update_fields = [ 'group_decisions' , '_group_decisions_updated' ] ) if not self . num_subperiods ( ) and not self . rate_limit ( ) : self . send ( 'group_decisions' , self . group_decisions )
def get_excitation_spectrum ( self , width = 0.1 , npoints = 2000 ) : """Generate an excitation spectra from the singlet roots of TDDFT calculations . Args : width ( float ) : Width for Gaussian smearing . npoints ( int ) : Number of energy points . More points = > smoother curve . Returns : ( ExcitationSpectrum ) which can be plotted using pymatgen . vis . plotters . SpectrumPlotter ."""
roots = self . parse_tddft ( ) data = roots [ "singlet" ] en = np . array ( [ d [ "energy" ] for d in data ] ) osc = np . array ( [ d [ "osc_strength" ] for d in data ] ) epad = 20.0 * width emin = en [ 0 ] - epad emax = en [ - 1 ] + epad de = ( emax - emin ) / npoints # Use width of at least two grid points if width < 2 * de : width = 2 * de energies = [ emin + ie * de for ie in range ( npoints ) ] cutoff = 20.0 * width gamma = 0.5 * width gamma_sqrd = gamma * gamma de = ( energies [ - 1 ] - energies [ 0 ] ) / ( len ( energies ) - 1 ) prefac = gamma / np . pi * de x = [ ] y = [ ] for energy in energies : xx0 = energy - en stot = osc / ( xx0 * xx0 + gamma_sqrd ) t = np . sum ( stot [ np . abs ( xx0 ) <= cutoff ] ) x . append ( energy ) y . append ( t * prefac ) return ExcitationSpectrum ( x , y )
def _on_action_triggered ( self ) : """Emits open _ requested when a recent file action has been triggered ."""
action = self . sender ( ) assert isinstance ( action , QtWidgets . QAction ) path = action . data ( ) self . open_requested . emit ( path ) self . update_actions ( )
def get_vlan_brief_output_provisioned_vlans_count ( self , ** kwargs ) : """Auto Generated Code"""
config = ET . Element ( "config" ) get_vlan_brief = ET . Element ( "get_vlan_brief" ) config = get_vlan_brief output = ET . SubElement ( get_vlan_brief , "output" ) provisioned_vlans_count = ET . SubElement ( output , "provisioned-vlans-count" ) provisioned_vlans_count . text = kwargs . pop ( 'provisioned_vlans_count' ) callback = kwargs . pop ( 'callback' , self . _callback ) return callback ( config )
def main ( ) : """Main function of Lexicon ."""
# Dynamically determine all the providers available and gather command line arguments . parsed_args = generate_cli_main_parser ( ) . parse_args ( ) log_level = logging . getLevelName ( parsed_args . log_level ) logging . basicConfig ( stream = sys . stdout , level = log_level , format = '%(message)s' ) logger . debug ( 'Arguments: %s' , parsed_args ) # In the CLI context , will get configuration interactively : # * from the command line # * from the environment variables # * from lexicon configuration files found in given - - config - dir ( default is current dir ) config = ConfigResolver ( ) config . with_args ( parsed_args ) . with_env ( ) . with_config_dir ( parsed_args . config_dir ) client = Client ( config ) results = client . execute ( ) handle_output ( results , parsed_args . output , config . resolve ( 'lexicon:action' ) )
def merge_string_tuples ( tuple1 : tuple , tuple2 : tuple ) -> tuple : """Function to merge two string tuples by concatenation . > > > merge _ string _ tuples ( ( ' Manjeet ' , ' Nikhil ' , ' Akshat ' ) , ( ' Singh ' , ' Meherwal ' , ' Garg ' ) ) ( ' Manjeet Singh ' , ' Nikhil Meherwal ' , ' Akshat Garg ' ) > > > merge _ string _ tuples ( ( ' Shaik ' , ' Ayesha ' , ' Sanya ' ) , ( ' Dawood ' , ' Begum ' , ' Singh ' ) ) ( ' Shaik Dawood ' , ' Ayesha Begum ' , ' Sanya Singh ' ) > > > merge _ string _ tuples ( ( ' Harpreet ' , ' Priyanka ' , ' Muskan ' ) , ( ' Kour ' , ' Agarwal ' , ' Sethi ' ) ) ( ' HarpreetKour ' , ' Priyanka Agarwal ' , ' MuskanSethi ' ) : para tuple1 : First tuple of strings : para tuple2 : Second tuple of strings : return : Return a tuple of combined strings"""
result = tuple ( ( str1 + str2 ) for ( str1 , str2 ) in zip ( tuple1 , tuple2 ) ) return result
def _jobs_to_do ( self , restrictions ) : """: return : the relation containing the keys to be computed ( derived from self . key _ source )"""
if self . restriction : raise DataJointError ( 'Cannot call populate on a restricted table. ' 'Instead, pass conditions to populate() as arguments.' ) todo = self . key_source if not isinstance ( todo , QueryExpression ) : raise DataJointError ( 'Invalid key_source value' ) # check if target lacks any attributes from the primary key of key _ source try : raise DataJointError ( 'The populate target lacks attribute %s from the primary key of key_source' % next ( name for name in todo . heading . primary_key if name not in self . target . heading ) ) except StopIteration : pass return ( todo & AndList ( restrictions ) ) . proj ( )
def load ( self , shapefile = None ) : """Opens a shapefile from a filename or file - like object . Normally this method would be called by the constructor with the file object or file name as an argument ."""
if shapefile : ( shapeName , ext ) = os . path . splitext ( shapefile ) self . shapeName = shapeName try : self . shp = open ( "%s.shp" % shapeName , "rb" ) except IOError : raise ShapefileException ( "Unable to open %s.shp" % shapeName ) try : self . shx = open ( "%s.shx" % shapeName , "rb" ) except IOError : raise ShapefileException ( "Unable to open %s.shx" % shapeName ) try : self . dbf = open ( "%s.dbf" % shapeName , "rb" ) except IOError : raise ShapefileException ( "Unable to open %s.dbf" % shapeName ) if self . shp : self . __shpHeader ( ) if self . dbf : self . __dbfHeader ( )
def standarize ( trainingset ) : """Morph the input signal to a mean of 0 and scale the signal strength by dividing with the standard deviation ( rather that forcing a [ 0 , 1 ] range )"""
def encoder ( dataset ) : for instance in dataset : if np . any ( stds == 0 ) : nonzero_indexes = np . where ( stds != 0 ) instance . features [ nonzero_indexes ] = ( instance . features [ nonzero_indexes ] - means [ nonzero_indexes ] ) / stds [ nonzero_indexes ] else : instance . features = ( instance . features - means ) / stds return dataset # end training_data = np . array ( [ instance . features for instance in trainingset ] ) means = training_data . mean ( axis = 0 ) stds = training_data . std ( axis = 0 ) return encoder
def AAAA ( host , nameserver = None ) : '''Return the AAAA record for ` ` host ` ` . Always returns a list . CLI Example : . . code - block : : bash salt ns1 dig . AAAA www . google . com'''
dig = [ 'dig' , '+short' , six . text_type ( host ) , 'AAAA' ] if nameserver is not None : dig . append ( '@{0}' . format ( nameserver ) ) cmd = __salt__ [ 'cmd.run_all' ] ( dig , python_shell = False ) # In this case , 0 is not the same as False if cmd [ 'retcode' ] != 0 : log . warning ( 'dig returned exit code \'%s\'. Returning empty list as fallback.' , cmd [ 'retcode' ] ) return [ ] # make sure all entries are IPs return [ x for x in cmd [ 'stdout' ] . split ( '\n' ) if check_ip ( x ) ]
def getArrays ( self ) : """Return wavelength and flux arrays in user units . Returns wave : array _ like Wavelength array in ` ` self . waveunits ` ` . flux : array _ like Flux array in ` ` self . fluxunits ` ` . When necessary , ` ` self . primary _ area ` ` is used for unit conversion ."""
if hasattr ( self , 'primary_area' ) : area = self . primary_area else : area = None wave = self . GetWaveSet ( ) flux = self ( wave ) flux = units . Photlam ( ) . Convert ( wave , flux , self . fluxunits . name , area = area ) wave = units . Angstrom ( ) . Convert ( wave , self . waveunits . name ) return wave , flux
def load_remote ( url , ** kwargs ) : """Load a mesh at a remote URL into a local trimesh object . This must be called explicitly rather than automatically from trimesh . load to ensure users don ' t accidentally make network requests . Parameters url : string URL containing mesh file * * kwargs : passed to ` load `"""
# import here to keep requirement soft import requests # download the mesh response = requests . get ( url ) # wrap as file object file_obj = util . wrap_as_stream ( response . content ) # so loaders can access textures / etc resolver = visual . resolvers . WebResolver ( url ) # actually load loaded = load ( file_obj = file_obj , file_type = url , resolver = resolver , ** kwargs ) return loaded
def competitions_data_download_file ( self , id , file_name , ** kwargs ) : # noqa : E501 """Download competition data file # noqa : E501 This method makes a synchronous HTTP request by default . To make an asynchronous HTTP request , please pass async _ req = True > > > thread = api . competitions _ data _ download _ file ( id , file _ name , async _ req = True ) > > > result = thread . get ( ) : param async _ req bool : param str id : Competition name ( required ) : param str file _ name : Competition name ( required ) : return : Result If the method is called asynchronously , returns the request thread ."""
kwargs [ '_return_http_data_only' ] = True if kwargs . get ( 'async_req' ) : return self . competitions_data_download_file_with_http_info ( id , file_name , ** kwargs ) # noqa : E501 else : ( data ) = self . competitions_data_download_file_with_http_info ( id , file_name , ** kwargs ) # noqa : E501 return data
def hexedit ( x ) : """Run external hex editor on a packet or bytes . Set editor in conf . prog . hexedit"""
x = bytes ( x ) fname = get_temp_file ( ) with open ( fname , "wb" ) as f : f . write ( x ) subprocess . call ( [ conf . prog . hexedit , fname ] ) with open ( fname , "rb" ) as f : x = f . read ( ) return x
def celery_task_wrapper ( f ) : """Provides a task wrapper for celery that sets up cache and ensures that the local store is cleared after completion"""
from celery . utils import fun_takes_kwargs @ wraps ( f , assigned = available_attrs ( f ) ) def newf ( * args , ** kwargs ) : backend = get_backend ( ) was_patched = backend . _patched get_backend ( ) . patch ( ) # since this function takes all keyword arguments , # we will pass only the ones the function below accepts , # just as celery does supported_keys = fun_takes_kwargs ( f , kwargs ) new_kwargs = dict ( ( key , val ) for key , val in kwargs . items ( ) if key in supported_keys ) try : ret = f ( * args , ** new_kwargs ) finally : local . clear ( ) if not was_patched : get_backend ( ) . unpatch ( ) return ret return newf
def down_by_time ( * filters , remote_dir = DEFAULT_REMOTE_DIR , local_dir = "." , count = 1 ) : """Sync most recent file by date , time attribues"""
files = command . list_files ( * filters , remote_dir = remote_dir ) most_recent = sorted ( files , key = lambda f : f . datetime ) to_sync = most_recent [ - count : ] _notify_sync ( Direction . down , to_sync ) down_by_files ( to_sync [ : : - 1 ] , local_dir = local_dir )
def plot_cells ( cell_1 , cell_2 , cell_3 ) : """Plots three cells"""
fig , ( ( ax1 , ax2 , ax3 ) ) = plt . subplots ( 1 , 3 , figsize = ( 12 , 5 ) ) for ax in [ ax1 , ax2 , ax3 ] : ax . grid ( False ) ax . set_xticks ( [ ] ) ax . set_yticks ( [ ] ) ax1 . set_title ( "Type 1" ) ax1 . imshow ( cell_1 ) ax2 . set_title ( "Type 2" ) ax2 . imshow ( cell_2 ) ax3 . set_title ( "Type 3" ) ax3 . imshow ( cell_3 ) return ax1 , ax2 , ax3
def extend ( self , * array_list ) : """Concatenate this array with the given arrays . This method doesn ' t modify current array . Instead , it creates new one , that have all of arrays . ( see : meth : ` . WBinArray . concat ` method ) : param array _ list : list of WBinArray : return : newly created WBinArray"""
result = WBinArray ( int ( self ) , len ( self ) ) for array in array_list : result = result . concat ( array ) return result
def training_job_summaries ( self , force_refresh = False ) : """A ( paginated ) list of everything from ` ` ListTrainingJobsForTuningJob ` ` . Args : force _ refresh ( bool ) : Set to True to fetch the latest data from SageMaker API . Returns : dict : The Amazon SageMaker response for ` ` ListTrainingJobsForTuningJob ` ` ."""
if force_refresh : self . clear_cache ( ) if self . _training_job_summaries is not None : return self . _training_job_summaries output = [ ] next_args = { } for count in range ( 100 ) : logging . debug ( "Calling list_training_jobs_for_hyper_parameter_tuning_job %d" % count ) raw_result = self . _sage_client . list_training_jobs_for_hyper_parameter_tuning_job ( HyperParameterTuningJobName = self . name , MaxResults = 100 , ** next_args ) new_output = raw_result [ 'TrainingJobSummaries' ] output . extend ( new_output ) logging . debug ( "Got %d more TrainingJobs. Total so far: %d" % ( len ( new_output ) , len ( output ) ) ) if ( 'NextToken' in raw_result ) and ( len ( new_output ) > 0 ) : next_args [ 'NextToken' ] = raw_result [ 'NextToken' ] else : break self . _training_job_summaries = output return output
def generate_phase_1 ( dim = 40 ) : """The first step in creating datapoints in the Poirazi & Mel model . This returns a vector of dimension dim , with the last four values set to 1 and the rest drawn from a normal distribution ."""
phase_1 = numpy . random . normal ( 0 , 1 , dim ) for i in range ( dim - 4 , dim ) : phase_1 [ i ] = 1.0 return phase_1
def paint ( self , iconic , painter , rect , mode , state , options ) : """Main paint method ."""
for opt in options : self . _paint_icon ( iconic , painter , rect , mode , state , opt )
def decode ( data ) : """Decode the multibase decoded data : param data : multibase encoded data : type data : str or bytes : return : decoded data : rtype : str : raises ValueError : if the data is not multibase encoded"""
data = ensure_bytes ( data , 'utf8' ) codec = get_codec ( data ) return codec . converter . decode ( data [ CODE_LENGTH : ] )
def get_security_group_id ( self , name ) : """Take name string , give back security group ID . To get around VPC ' s API being stupid ."""
# Memoize entire list of groups if not hasattr ( self , '_security_groups' ) : self . _security_groups = { } for group in self . get_all_security_groups ( ) : self . _security_groups [ group . name ] = group . id return self . _security_groups [ name ]
def apply_injectables ( self , targets ) : """Given an iterable of ` Target ` instances , apply their transitive injectables ."""
target_types = { type ( t ) for t in targets } target_subsystem_deps = { s for s in itertools . chain ( * ( t . subsystems ( ) for t in target_types ) ) } for subsystem in target_subsystem_deps : # TODO : The is _ initialized ( ) check is primarily for tests and would be nice to do away with . if issubclass ( subsystem , InjectablesMixin ) and subsystem . is_initialized ( ) : subsystem . global_instance ( ) . injectables ( self )
def from_payload ( self , payload ) : """Init frame from binary data ."""
self . session_id = payload [ 0 ] * 256 + payload [ 1 ] self . status = CommandSendConfirmationStatus ( payload [ 2 ] )
def connect ( self ) : """Initiate the channel we want to start streams from ."""
self . socketIO = SocketIO ( host = self . iosocket_server , port = 80 , resource = self . iosocket_resource , proxies = self . proxies , headers = self . headers , transports = [ "websocket" ] , Namespace = AtlasNamespace , ) self . socketIO . on ( self . EVENT_NAME_ERROR , self . handle_error )
def recv_match ( self , condition = None , type = None , blocking = False ) : '''recv the next message that matches the given condition type can be a string or a list of strings'''
if type is not None and not isinstance ( type , list ) : type = [ type ] while True : m = self . recv_msg ( ) if m is None : return None if type is not None and not m . get_type ( ) in type : continue if not mavutil . evaluate_condition ( condition , self . messages ) : continue return m
def missing_representative_sequence ( self ) : """list : List of genes with no mapping to a representative sequence ."""
return [ x . id for x in self . genes if not self . genes_with_a_representative_sequence . has_id ( x . id ) ]
def center ( a : Union [ Set [ "Point2" ] , List [ "Point2" ] ] ) -> "Point2" : """Returns the central point for points in list"""
s = Point2 ( ( 0 , 0 ) ) for p in a : s += p return s / len ( a )
def skopeo_push ( self , repository = None , tag = None ) : """Push image from Docker daemon to Docker using skopeo : param repository : repository to be pushed to : param tag : tag : return : pushed image"""
return self . copy ( repository , tag , SkopeoTransport . DOCKER_DAEMON , SkopeoTransport . DOCKER ) . using_transport ( SkopeoTransport . DOCKER )
def intersect ( self , other_seg , tol = 1e-12 ) : """Finds the intersections of two segments . returns a list of tuples ( t1 , t2 ) such that self . point ( t1 ) = = other _ seg . point ( t2 ) . Note : This will fail if the two segments coincide for more than a finite collection of points ."""
if isinstance ( other_seg , Line ) : return bezier_by_line_intersections ( self , other_seg ) elif ( isinstance ( other_seg , QuadraticBezier ) or isinstance ( other_seg , CubicBezier ) ) : assert self != other_seg longer_length = max ( self . length ( ) , other_seg . length ( ) ) return bezier_intersections ( self , other_seg , longer_length = longer_length , tol = tol , tol_deC = tol ) elif isinstance ( other_seg , Arc ) : t2t1s = other_seg . intersect ( self ) return [ ( t1 , t2 ) for t2 , t1 in t2t1s ] elif isinstance ( other_seg , Path ) : raise TypeError ( "other_seg must be a path segment, not a Path object, use " "Path.intersect()." ) else : raise TypeError ( "other_seg must be a path segment." )
def backwards ( apps , schema_editor ) : """Delete sample events , including derivative repeat and variation events ."""
titles = [ 'Daily Event' , 'Weekday Event' , 'Weekend Event' , 'Weekly Event' , 'Monthly Event' , 'Yearly Event' , ] samples = EventBase . objects . filter ( title__in = titles ) samples . delete ( )
def read_tcp ( self , length ) : """Read Transmission Control Protocol ( TCP ) . Structure of TCP header [ RFC 793 ] : 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 | Source Port | Destination Port | | Sequence Number | | Acknowledgement Number | | Data | | U | A | P | R | S | F | | | Offset | Reserved | R | C | S | S | Y | I | Window | | | | G | K | H | T | N | N | | | Checksum | Urgent Pointer | | Options | Padding | | data | Octets Bits Name Description 0 0 tcp . srcport Source Port 2 16 tcp . dstport Destination Port 4 32 tcp . seq Sequence Number 8 64 tcp . ack Acknowledgement Number ( if ACK set ) 12 96 tcp . hdr _ len Data Offset 12 100 - Reserved ( must be zero ) 12 103 tcp . flags . ns ECN Concealment Protection ( NS ) 13 104 tcp . flags . cwr Congestion Window Reduced ( CWR ) 13 105 tcp . flags . ece ECN - Echo ( ECE ) 13 106 tcp . flags . urg Urgent ( URG ) 13 107 tcp . flags . ack Acknowledgement ( ACK ) 13 108 tcp . flags . psh Push Function ( PSH ) 13 109 tcp . flags . rst Reset Connection ( RST ) 13 110 tcp . flags . syn Synchronize Sequence Numbers ( SYN ) 13 111 tcp . flags . fin Last Packet from Sender ( FIN ) 14 112 tcp . window _ size Size of Receive Window 16 128 tcp . checksum Checksum 18 144 tcp . urgent _ pointer Urgent Pointer ( if URG set ) 20 160 tcp . opt TCP Options ( if data offset > 5)"""
if length is None : length = len ( self ) _srcp = self . _read_unpack ( 2 ) _dstp = self . _read_unpack ( 2 ) _seqn = self . _read_unpack ( 4 ) _ackn = self . _read_unpack ( 4 ) _lenf = self . _read_binary ( 1 ) _flag = self . _read_binary ( 1 ) _wins = self . _read_unpack ( 2 ) _csum = self . _read_fileng ( 2 ) _urgp = self . _read_unpack ( 2 ) tcp = dict ( srcport = _srcp , dstport = _dstp , seq = _seqn , ack = _ackn , hdr_len = int ( _lenf [ : 4 ] , base = 2 ) * 4 , flags = dict ( ns = True if int ( _lenf [ 7 ] ) else False , cwr = True if int ( _flag [ 0 ] ) else False , ece = True if int ( _flag [ 1 ] ) else False , urg = True if int ( _flag [ 2 ] ) else False , ack = True if int ( _flag [ 3 ] ) else False , psh = True if int ( _flag [ 4 ] ) else False , rst = True if int ( _flag [ 5 ] ) else False , syn = True if int ( _flag [ 6 ] ) else False , fin = True if int ( _flag [ 7 ] ) else False , ) , window_size = _wins , checksum = _csum , urgent_pointer = _urgp , ) # packet type flags self . _syn = True if int ( _flag [ 6 ] ) else False self . _ack = True if int ( _flag [ 3 ] ) else False _hlen = tcp [ 'hdr_len' ] _optl = _hlen - 20 if _optl : options = self . _read_tcp_options ( _optl ) tcp [ 'opt' ] = options [ 0 ] # tuple of option acronyms tcp . update ( options [ 1 ] ) # merge option info to buffer length -= _hlen tcp [ 'packet' ] = self . _read_packet ( header = _hlen , payload = length ) return self . _decode_next_layer ( tcp , None , length )
def find_group ( self , star , starlist ) : """Find the ids of those stars in ` ` starlist ` ` which are at a distance less than ` ` crit _ separation ` ` from ` ` star ` ` . Parameters star : ` ~ astropy . table . Row ` Star which will be either the head of a cluster or an isolated one . starlist : ` ~ astropy . table . Table ` List of star positions . Columns named as ` ` x _ 0 ` ` and ` ` y _ 0 ` ` , which corresponds to the centroid coordinates of the sources , must be provided . Returns Array containing the ids of those stars which are at a distance less than ` ` crit _ separation ` ` from ` ` star ` ` ."""
star_distance = np . hypot ( star [ 'x_0' ] - starlist [ 'x_0' ] , star [ 'y_0' ] - starlist [ 'y_0' ] ) distance_criteria = star_distance < self . crit_separation return np . asarray ( starlist [ distance_criteria ] [ 'id' ] )
def restore ( self , path , configuration_type = "running" , restore_method = "override" , vrf_management_name = None ) : """Restore configuration on device from provided configuration file Restore configuration from local file system or ftp / tftp server into ' running - config ' or ' startup - config ' . : param path : relative path to the file on the remote host tftp : / / server / sourcefile : param configuration _ type : the configuration type to restore ( StartUp or Running ) : param restore _ method : override current config or not : param vrf _ management _ name : Virtual Routing and Forwarding management name : return : exception on crash"""
if hasattr ( self . resource_config , "vrf_management_name" ) : vrf_management_name = vrf_management_name or self . resource_config . vrf_management_name self . _validate_configuration_type ( configuration_type ) path = self . get_path ( path ) self . restore_flow . execute_flow ( path = path , configuration_type = configuration_type . lower ( ) , restore_method = restore_method . lower ( ) , vrf_management_name = vrf_management_name )
def docx_preprocess ( docx , batch = False ) : """Load docx files from local filepath if not already b64 encoded"""
if batch : return [ docx_preprocess ( doc , batch = False ) for doc in docx ] if os . path . isfile ( docx ) : # a filepath is provided , read and encode return b64encode ( open ( docx , 'rb' ) . read ( ) ) else : # assume doc is already b64 encoded return docx
def _define_output_buffers ( self ) : """Prepare a dictionary so we know what buffers have to be update with the the output of every step ."""
# First define buffers that need input data self . target_buffers = { None : [ ( step , self . buffers [ step ] ) for step in self . _get_input_steps ( ) ] } # Go through all steps and append the buffers of their child nodes for step in self . steps_sorted : if step != self : child_steps = [ edge [ 1 ] for edge in self . graph . out_edges ( step ) ] self . target_buffers [ step ] = [ ( child_step , self . buffers [ child_step ] ) for child_step in child_steps ]
def heatmap ( n_x = 5 , n_y = 10 ) : """Returns a DataFrame with the required format for a heatmap plot Parameters : n _ x : int Number of x categories n _ y : int Number of y categories"""
x = [ 'x_' + str ( _ ) for _ in range ( n_x ) ] y = [ 'y_' + str ( _ ) for _ in range ( n_y ) ] return pd . DataFrame ( surface ( n_x - 1 , n_y - 1 ) . values , index = x , columns = y )
def _do_logon ( self ) : """Log on , unconditionally . This can be used to re - logon . This requires credentials to be provided . Raises : : exc : ` ~ zhmcclient . ClientAuthError ` : exc : ` ~ zhmcclient . ServerAuthError ` : exc : ` ~ zhmcclient . ConnectionError ` : exc : ` ~ zhmcclient . ParseError ` : exc : ` ~ zhmcclient . HTTPError `"""
if self . _userid is None : raise ClientAuthError ( "Userid is not provided." ) if self . _password is None : if self . _get_password : self . _password = self . _get_password ( self . _host , self . _userid ) else : raise ClientAuthError ( "Password is not provided." ) logon_uri = '/api/sessions' logon_body = { 'userid' : self . _userid , 'password' : self . _password } self . _headers . pop ( 'X-API-Session' , None ) # Just in case self . _session = self . _new_session ( self . retry_timeout_config ) logon_res = self . post ( logon_uri , logon_body , logon_required = False ) self . _session_id = logon_res [ 'api-session' ] self . _headers [ 'X-API-Session' ] = self . _session_id
def parse_checksums ( checksums_string ) : """Parse a file containing checksums and filenames ."""
checksums_list = [ ] for line in checksums_string . split ( '\n' ) : try : # skip empty lines if line == '' : continue checksum , filename = line . split ( ) # strip leading . / if filename . startswith ( './' ) : filename = filename [ 2 : ] checksums_list . append ( { 'checksum' : checksum , 'file' : filename } ) except ValueError : logging . debug ( 'Skipping over unexpected checksum line %r' , line ) continue return checksums_list
def sync_remote_to_local ( force = "no" ) : """Sync your remote postgres database with local Example : fabrik prod sync _ remote _ to _ local"""
_check_requirements ( ) if force != "yes" : message = "This will replace your local database '%s' with the " "remote '%s', are you sure [y/n]" % ( env . local_psql_db , env . psql_db ) answer = prompt ( message , "y" ) if answer != "y" : logger . info ( "Sync stopped" ) return init_tasks ( ) # Bootstrap fabrik # Create database dump remote_file = "postgresql/sync_%s.sql.tar.gz" % int ( time . time ( ) * 1000 ) remote_path = paths . get_backup_path ( remote_file ) env . run ( "mkdir -p %s" % paths . get_backup_path ( "postgresql" ) ) with context_managers . shell_env ( PGPASSWORD = env . psql_password ) : env . run ( "pg_dump -h localhost -Fc -f %s -U %s %s -x -O" % ( remote_path , env . psql_user , env . psql_db ) ) local_path = "/tmp/%s" % remote_file # Download sync file get ( remote_path , local_path ) # Import sync file by performing the following task ( drop , create , import ) with context_managers . shell_env ( PGPASSWORD = env . local_psql_password ) : elocal ( "pg_restore --clean -h localhost -d %s -U %s '%s'" % ( env . local_psql_db , env . local_psql_user , local_path ) ) # Cleanup env . run ( "rm %s" % remote_path ) elocal ( "rm %s" % local_path ) # Trigger hook run_hook ( "postgres.after_sync_remote_to_local" ) logger . info ( "Sync complete" )
def shakeshake2_indiv_grad ( x1 , x2 , dy ) : """Overriding gradient for shake - shake of 2 tensors ."""
y = shakeshake2_py ( x1 , x2 , individual = True ) dx = tf . gradients ( ys = [ y ] , xs = [ x1 , x2 ] , grad_ys = [ dy ] ) return dx
def convert ( self , * formats ) : """Return an Image instance with the first matching format . For each format in ` ` * args ` ` : If the image ' s : attr : ` format ` attribute is the same as the format , return self , otherwise try the next format . If none of the formats match , return a new Image instance with the last format ."""
for format in formats : format = Image . image_format ( format ) if self . format == format : return self else : return self . _convert ( format )
def get_root_path ( self , path ) : """See : py : meth : ` ~ stash . repository . Repository . get _ root _ path ` ."""
# Look at the directories present in the current working directory . In # case a . svn directory is present , we know we are in the root directory # of a Subversion repository ( for Subversion 1.7 . x ) . In case no # repository specific folder is found , and the current directory has a # parent directory , look if a repository specific directory can be found # in the parent directory . while path != '/' : if '.svn' in os . listdir ( path ) : return path path = os . path . abspath ( os . path . join ( path , os . pardir ) ) # No Subversion repository found . return None
def all ( self ) : r"""Returns all content in this node , regardless of whitespace or not . This includes all LaTeX needed to reconstruct the original source . > > > from TexSoup import TexSoup > > > soup = TexSoup ( r ' ' ' . . . \ newcommand { reverseconcat } [ 3 ] { # 3#2#1} > > > list ( soup . all ) [ ' \ n ' , \ newcommand { reverseconcat } [ 3 ] { # 3#2#1 } , ' \ n ' ]"""
for child in self . expr . all : if isinstance ( child , TexExpr ) : node = TexNode ( child ) node . parent = self yield node else : yield child
def update ( self , byte_arr ) : """Read bytes and update the CRC computed ."""
if byte_arr : self . value = self . calculate ( byte_arr , self . value )
def transform_from_chomsky_normal_form ( root ) : # type : ( Nonterminal ) - > Nonterminal """Transform the tree created by grammar in the Chomsky Normal Form to original rules . : param root : Root of parsed tree . : return : Modified tree ."""
# Transforms leaves items = Traversing . post_order ( root ) items = filter ( lambda x : isinstance ( x , ( ChomskyTermRule , ChomskyTerminalReplaceRule ) ) , items ) de = deque ( items ) while de : rule = de . popleft ( ) if isinstance ( rule , ChomskyTermRule ) : upper_nonterm = rule . from_symbols [ 0 ] # type : Nonterminal term = rule . to_symbols [ 0 ] Manipulations . replaceNode ( upper_nonterm , term ) elif isinstance ( rule , ChomskyTerminalReplaceRule ) : created_rule = rule . from_rule ( ) # type : Rule Manipulations . replaceRule ( rule , created_rule ) de . append ( created_rule ) # Transform inner nodes items = Traversing . post_order ( root ) items = filter ( lambda x : isinstance ( x , ChomskySplitRule ) , items ) de = deque ( items ) while de : rule = de . popleft ( ) if isinstance ( rule , ChomskySplitRule ) : created_rule = rule . from_rule ( ) # type : Rule # parent nonterminals for p in rule . from_symbols : # type : Nonterminal p . _set_to_rule ( created_rule ) created_rule . _from_symbols . append ( p ) # left child left_child = rule . to_symbols [ 0 ] # type : Nonterminal left_child . _set_from_rule ( created_rule ) created_rule . _to_symbols . append ( left_child ) # right childs for ch in rule . to_symbols [ 1 ] . to_rule . to_symbols : # type : Nonterminal ch . _set_from_rule ( created_rule ) created_rule . to_symbols . append ( ch ) # add back if the rules is ChomskySplitRule again de . appendleft ( created_rule ) return root
def log_create ( self , instance , ** kwargs ) : """Helper method to create a new log entry . This method automatically populates some fields when no explicit value is given . : param instance : The model instance to log a change for . : type instance : Model : param kwargs : Field overrides for the : py : class : ` LogEntry ` object . : return : The new log entry or ` None ` if there were no changes . : rtype : LogEntry"""
changes = kwargs . get ( 'changes' , None ) pk = self . _get_pk_value ( instance ) if changes is not None : kwargs . setdefault ( 'content_type' , ContentType . objects . get_for_model ( instance ) ) kwargs . setdefault ( 'object_pk' , pk ) kwargs . setdefault ( 'object_repr' , smart_text ( instance ) ) if isinstance ( pk , integer_types ) : kwargs . setdefault ( 'object_id' , pk ) get_additional_data = getattr ( instance , 'get_additional_data' , None ) if callable ( get_additional_data ) : kwargs . setdefault ( 'additional_data' , get_additional_data ( ) ) # Delete log entries with the same pk as a newly created model . This should only be necessary when an pk is # used twice . if kwargs . get ( 'action' , None ) is LogEntry . Action . CREATE : if kwargs . get ( 'object_id' , None ) is not None and self . filter ( content_type = kwargs . get ( 'content_type' ) , object_id = kwargs . get ( 'object_id' ) ) . exists ( ) : self . filter ( content_type = kwargs . get ( 'content_type' ) , object_id = kwargs . get ( 'object_id' ) ) . delete ( ) else : self . filter ( content_type = kwargs . get ( 'content_type' ) , object_pk = kwargs . get ( 'object_pk' , '' ) ) . delete ( ) # save LogEntry to same database instance is using db = instance . _state . db return self . create ( ** kwargs ) if db is None or db == '' else self . using ( db ) . create ( ** kwargs ) return None
def tempdeny ( ip = None , ttl = None , port = None , direction = None , comment = '' ) : '''Add a rule to the temporary ip deny list . See : func : ` _ access _ rule ` . 1 - Add an IP : CLI Example : . . code - block : : bash salt ' * ' csf . tempdeny 127.0.0.1 300 port = 22 direction = ' in ' comment = ' # Brute force attempt ' '''
return _tmp_access_rule ( 'tempdeny' , ip , ttl , port , direction , comment )
def list_nodes ( call = None , ** kwargs ) : '''Return a list of the VMs that in this location'''
if call == 'action' : raise SaltCloudSystemExit ( 'The list_nodes function must be called with -f or --function.' ) ret = { } conn = get_conn ( ) server_list = conn . server_list ( ) if not server_list : return { } for server in server_list : server_tmp = conn . server_show ( server_list [ server ] [ 'id' ] ) . get ( server ) # If the server is deleted while looking it up , skip if server_tmp is None : continue private = [ ] public = [ ] if 'addresses' not in server_tmp : server_tmp [ 'addresses' ] = { } for network in server_tmp [ 'addresses' ] : for address in server_tmp [ 'addresses' ] [ network ] : if salt . utils . cloud . is_public_ip ( address . get ( 'addr' , '' ) ) : public . append ( address [ 'addr' ] ) elif ':' in address [ 'addr' ] : public . append ( address [ 'addr' ] ) elif '.' in address [ 'addr' ] : private . append ( address [ 'addr' ] ) if server_tmp [ 'accessIPv4' ] : if salt . utils . cloud . is_public_ip ( server_tmp [ 'accessIPv4' ] ) : public . append ( server_tmp [ 'accessIPv4' ] ) else : private . append ( server_tmp [ 'accessIPv4' ] ) if server_tmp [ 'accessIPv6' ] : public . append ( server_tmp [ 'accessIPv6' ] ) ret [ server ] = { 'id' : server_tmp [ 'id' ] , 'image' : server_tmp [ 'image' ] [ 'id' ] , 'size' : server_tmp [ 'flavor' ] [ 'id' ] , 'state' : server_tmp [ 'state' ] , 'private_ips' : private , 'public_ips' : public , } return ret
def obfuscate_unique ( tokens , index , replace , replacement , * args ) : """If the token string ( a unique value anywhere ) inside * tokens [ index ] * matches * replace * , return * replacement * . . . note : : This function is only for replacing absolutely unique ocurrences of * replace * ( where we don ' t have to worry about their position ) ."""
def return_replacement ( replacement ) : UNIQUE_REPLACEMENTS [ replacement ] = replace return replacement tok = tokens [ index ] token_type = tok [ 0 ] token_string = tok [ 1 ] if token_type != tokenize . NAME : return None # Skip this token if token_string == replace : return return_replacement ( replacement )
def list ( self , order_id , ** params ) : """Retrieve order ' s line items Returns all line items associated to order : calls : ` ` get / orders / { order _ id } / line _ items ` ` : param int order _ id : Unique identifier of a Order . : param dict params : ( optional ) Search options . : return : List of dictionaries that support attriubte - style access , which represent collection of LineItems . : rtype : list"""
_ , _ , line_items = self . http_client . get ( "/orders/{order_id}/line_items" . format ( order_id = order_id ) , params = params ) return line_items
def zero_crossing_last ( frames ) : """Finds the last zero crossing in frames"""
frames = N . array ( frames ) crossings = N . where ( N . diff ( N . sign ( frames ) ) ) # crossings = N . where ( frames [ : n ] * frames [ 1 : n + 1 ] < 0) if len ( crossings [ 0 ] ) == 0 : print "No zero crossing" return len ( frames ) - 1 return crossings [ 0 ] [ - 1 ]
def unpack_attribute ( att ) : """Unpack an embedded attribute into a python or numpy object ."""
if att . unsigned : log . warning ( 'Unsupported unsigned attribute!' ) # TDS 5.0 now has a dataType attribute that takes precedence if att . len == 0 : # Empty val = None elif att . dataType == stream . STRING : # Then look for new datatype string val = att . sdata elif att . dataType : # Then a non - zero new data type val = np . frombuffer ( att . data , dtype = '>' + _dtypeLookup [ att . dataType ] , count = att . len ) elif att . type : # Then non - zero old - data type0 val = np . frombuffer ( att . data , dtype = _attrConverters [ att . type ] , count = att . len ) elif att . sdata : # This leaves both 0 , try old string val = att . sdata else : # Assume new datatype is Char ( 0) val = np . array ( att . data , dtype = _dtypeLookup [ att . dataType ] ) if att . len == 1 : val = val [ 0 ] return att . name , val
def unsubscribe_user_from_discussion ( recID , uid ) : """Unsubscribe users from a discussion . : param recID : record ID corresponding to the discussion we want to unsubscribe the user : param uid : user id : return 1 if successful , 0 if not"""
query = """DELETE FROM "cmtSUBSCRIPTION" WHERE id_bibrec=%s AND id_user=%s""" params = ( recID , uid ) try : res = run_sql ( query , params ) except : return 0 if res > 0 : return 1 return 0
def cleanup_interfaces ( self ) : """Removes all / sys / class / gpio / gpioN interfaces that this script created , and deletes callback bindings . Should be used after using interrupts ."""
debug ( "Cleaning up interfaces..." ) for gpio_id in self . _gpio_kernel_interfaces_created : # Close the value - file and remove interrupt bindings self . del_interrupt_callback ( gpio_id ) # Remove the kernel GPIO interface debug ( "- unexporting GPIO %s" % gpio_id ) with open ( _SYS_GPIO_ROOT + "unexport" , "w" ) as f : f . write ( "%s" % gpio_id ) # Reset list of created interfaces self . _gpio_kernel_interfaces_created = [ ]
def filedet ( name , fobj = None , suffix = None ) : """Detect file type by filename . : param name : file name : param fobj : file object : param suffix : file suffix like ` ` py ` ` , ` ` . py ` ` : return : file type full name , such as ` ` python ` ` , ` ` bash ` `"""
name = name or ( fobj and fobj . name ) or suffix separated = name . split ( '.' ) if len ( separated ) == 1 : raise FiledetException ( 'file name error.' ) key = '.' + separated [ - 1 ] return _file_type_map . get ( key )
def multi_evaluate ( self , x , out = None ) : """Evaluate log of the density to propose ` ` x ` ` , namely log ( q ( x ) ) for each row in x . : param x : Matrix - like array ; the proposed points . Expect i - th accessible as ` ` x [ i ] ` ` . : param out : Vector - like array , length = = ` ` len ( x ) ` ` , optional ; If provided , the output is written into this array ."""
if out is None : out = _np . empty ( len ( x ) ) else : assert len ( out ) == len ( x ) for i , point in enumerate ( x ) : out [ i ] = self . evaluate ( point ) return out
def handle_run_command ( parser , args ) : """Implement ` run ` sub - command ."""
MAX_LEVELS = 15 # - - - Look for ` pyftpsync . yaml ` in current folder and parents - - - cur_level = 0 cur_folder = os . getcwd ( ) config_path = None while cur_level < MAX_LEVELS : path = os . path . join ( cur_folder , CONFIG_FILE_NAME ) # print ( " Searching for { } . . . " . format ( path ) ) if os . path . isfile ( path ) : config_path = path break parent = os . path . dirname ( cur_folder ) if parent == cur_folder : break cur_folder = parent cur_level += 1 if not config_path : parser . error ( "Could not locate `.pyftpsync.yaml` in {} or {} parent folders." . format ( os . getcwd ( ) , cur_level ) ) # - - - Parse ` pyftpsync . yaml ` and set ` args ` attributes - - - try : with open ( config_path , "rb" ) as f : config = yaml . safe_load ( f ) except Exception as e : parser . error ( "Error parsing {}: {}" . format ( config_path , e ) ) # write _ error ( " Error parsing { } : { } " . format ( config _ path , e ) ) # raise # print ( config ) if "tasks" not in config : parser . error ( "Missing option `tasks` in {}" . format ( config_path ) ) common_config = config . get ( "common_config" , { } ) default_task = config . get ( "default_task" , "default" ) task_name = args . task or default_task if task_name not in config [ "tasks" ] : parser . error ( "Missing option `tasks.{}` in {}" . format ( task_name , config_path ) ) task = config [ "tasks" ] [ task_name ] write ( "Running task '{}' from {}" . format ( task_name , config_path ) ) common_config . update ( task ) task = common_config # write ( " task " , task ) # - - - Check task syntax - - - task_args = set ( task . keys ( ) ) missing_args = MANDATORY_TASK_ARGS . difference ( task_args ) if missing_args : parser . error ( "Missing mandatory options: tasks.{}.{}" . format ( task_name , ", " . join ( missing_args ) ) ) allowed_args = KNOWN_TASK_ARGS . union ( MANDATORY_TASK_ARGS ) invalid_args = task_args . difference ( allowed_args ) if invalid_args : parser . error ( "Invalid options: tasks.{}.{}" . format ( task_name , ", " . join ( invalid_args ) ) ) # write ( " args " , args ) for name in allowed_args : val = task . get ( name , None ) # default ) if val is None : continue # option not specified in yaml # Override yaml entry by command line cmd_val = getattr ( args , name , None ) # write ( " check - - { } : { } = > { } " . format ( name , val , cmd _ val ) ) if cmd_val != val : override = False if name in OVERRIDABLE_BOOL_ARGS and cmd_val : override = True elif name in { "here" , "root" } and ( args . here or args . root ) : override = True elif name == "verbose" and cmd_val != 3 : override = True if override : write ( "Yaml entry overriden by --{}: {} => {}" . format ( name , val , cmd_val ) ) continue setattr ( args , name , val ) # - - - Figure out local target path - - - cur_folder = os . getcwd ( ) root_folder = os . path . dirname ( config_path ) path_ofs = os . path . relpath ( os . getcwd ( ) , root_folder ) if cur_level == 0 or args . root : path_ofs = "" args . local = root_folder elif args . here : write ( "Using sub-branch {sub} of {root}" . format ( root = root_folder , sub = path_ofs ) ) args . local = cur_folder args . remote = os . path . join ( args . remote , path_ofs ) else : parser . error ( "`.pyftpsync.yaml` configuration was found in a parent directory. " "Please pass an additional argument to clarify:\n" " --root: synchronize whole project ({root})\n" " --here: synchronize sub branch ({root}/{sub})" . format ( root = root_folder , sub = path_ofs ) )
def convertbits ( data , frombits , tobits , pad = True ) : """General power - of - 2 base conversion ."""
acc = 0 bits = 0 ret = [ ] maxv = ( 1 << tobits ) - 1 max_acc = ( 1 << ( frombits + tobits - 1 ) ) - 1 for value in data : if value < 0 or ( value >> frombits ) : return None acc = ( ( acc << frombits ) | value ) & max_acc bits += frombits while bits >= tobits : bits -= tobits ret . append ( ( acc >> bits ) & maxv ) if pad : if bits : ret . append ( ( acc << ( tobits - bits ) ) & maxv ) elif bits >= frombits or ( ( acc << ( tobits - bits ) ) & maxv ) : return None return ret
def write ( self , oprot ) : '''Write this object to the given output protocol and return self . : type oprot : thryft . protocol . _ output _ protocol . _ OutputProtocol : rtype : pastpy . gen . database . impl . online . online _ database _ object _ detail _ image . OnlineDatabaseObjectDetailImage'''
oprot . write_struct_begin ( 'OnlineDatabaseObjectDetailImage' ) oprot . write_field_begin ( name = 'full_size_url' , type = 11 , id = None ) oprot . write_string ( self . full_size_url ) oprot . write_field_end ( ) oprot . write_field_begin ( name = 'mediaid' , type = 11 , id = None ) oprot . write_string ( self . mediaid ) oprot . write_field_end ( ) oprot . write_field_begin ( name = 'objectid' , type = 11 , id = None ) oprot . write_string ( self . objectid ) oprot . write_field_end ( ) oprot . write_field_begin ( name = 'src' , type = 11 , id = None ) oprot . write_string ( self . src ) oprot . write_field_end ( ) oprot . write_field_begin ( name = 'thumbnail_url' , type = 11 , id = None ) oprot . write_string ( self . thumbnail_url ) oprot . write_field_end ( ) oprot . write_field_begin ( name = 'title' , type = 11 , id = None ) oprot . write_string ( self . title ) oprot . write_field_end ( ) oprot . write_field_begin ( name = 'type' , type = 11 , id = None ) oprot . write_string ( str ( self . type ) ) oprot . write_field_end ( ) oprot . write_field_stop ( ) oprot . write_struct_end ( ) return self
def file_saved_in_editorstack ( self , editorstack_id_str , original_filename , filename ) : """A file was saved in editorstack , this notifies others"""
for editorstack in self . editorstacks : if str ( id ( editorstack ) ) != editorstack_id_str : editorstack . file_saved_in_other_editorstack ( original_filename , filename )
def Open ( self , filename , read_only = False ) : """Opens the database file . Args : filename ( str ) : filename of the database . read _ only ( Optional [ bool ] ) : True if the database should be opened in read - only mode . Since sqlite3 does not support a real read - only mode we fake it by only permitting SELECT queries . Returns : bool : True if successful . Raises : RuntimeError : if the database is already opened ."""
if self . _connection : raise RuntimeError ( 'Cannot open database already opened.' ) self . filename = filename self . read_only = read_only try : self . _connection = sqlite3 . connect ( filename ) except sqlite3 . OperationalError : return False if not self . _connection : return False self . _cursor = self . _connection . cursor ( ) if not self . _cursor : return False return True
def merge ( self , other ) : """Merges two prefixes"""
other = PrefixCell . coerce ( other ) if self . is_equal ( other ) : # pick among dependencies return self elif other . is_entailed_by ( self ) : return self elif self . is_entailed_by ( other ) : self . value = other . value elif self . is_contradictory ( other ) : raise Contradiction ( "Cannot merge prefix '%s' with '%s'" % ( self , other ) ) else : if len ( self . value ) > len ( other . value ) : self . value = other . value [ : ] # otherwise , return self return self
def smart_open ( filename : str , mode : str = "rt" , ftype : str = "auto" , errors : str = 'replace' ) : """Returns a file descriptor for filename with UTF - 8 encoding . If mode is " rt " , file is opened read - only . If ftype is " auto " , uses gzip iff filename endswith . gz . If ftype is { " gzip " , " gz " } , uses gzip . If ftype is " auto " and read mode requested , uses gzip iff is _ gzip _ file ( filename ) . Note : encoding error handling defaults to " replace " : param filename : The filename to open . : param mode : Reader mode . : param ftype : File type . If ' auto ' checks filename suffix for gz to try gzip . open . : param errors : Encoding error handling during reading . Defaults to ' replace ' . : return : File descriptor ."""
if ftype in ( 'gzip' , 'gz' ) or ( ftype == 'auto' and filename . endswith ( ".gz" ) ) or ( ftype == 'auto' and 'r' in mode and is_gzip_file ( filename ) ) : return gzip . open ( filename , mode = mode , encoding = 'utf-8' , errors = errors ) else : return open ( filename , mode = mode , encoding = 'utf-8' , errors = errors )
def person_update ( self , people ) : """Update the status of people : param people : All people of this sensor : type people : list [ paps . person . Person ] : rtype : None : raises SensorUpdateException : Failed to update"""
packet = APPUpdateMessage ( device_id = Id . NOT_SET , people = people ) self . _send_packet ( self . _server_ip , self . _server_port , packet , acknowledge_packet = False )
def updateAccuracy ( self , * accuracy ) : """Updates current accuracy flag"""
for acc in accuracy : if not isinstance ( acc , int ) : acc = self . _ACCURACY_REVERSE_MAPPING [ acc ] self . accuracy |= acc
def image_path_from_index ( self , index ) : """given image index , find out full path Parameters index : int index of a specific image Returns full path of this image"""
assert self . image_set_index is not None , "Dataset not initialized" pos = self . image_set_index [ index ] n_db , n_index = self . _locate_index ( index ) return self . imdbs [ n_db ] . image_path_from_index ( n_index )
def update_options ( cls , options , items ) : """Switch default options and backend if new backend is supplied in items ."""
# Get new backend backend_spec = items . get ( 'backend' , Store . current_backend ) split = backend_spec . split ( ':' ) backend , mode = split if len ( split ) == 2 else ( split [ 0 ] , 'default' ) if ':' not in backend_spec : backend_spec += ':default' if 'max_branches' in items : print ( 'Warning: The max_branches option is now deprecated. Ignoring.' ) del items [ 'max_branches' ] # Get previous backend prev_backend = Store . current_backend renderer = Store . renderers [ prev_backend ] prev_backend_spec = prev_backend + ':' + renderer . mode # Update allowed formats for p in [ 'fig' , 'holomap' ] : cls . allowed [ p ] = list_formats ( p , backend_spec ) # Return if backend invalid and let validation error if backend not in Store . renderers : options [ 'backend' ] = backend_spec return options # Get backend specific options backend_options = dict ( cls . _backend_options [ backend_spec ] ) cls . _backend_options [ prev_backend_spec ] = { k : v for k , v in cls . options . items ( ) if k in cls . remembered } # Fill in remembered options with defaults for opt in cls . remembered : if opt not in backend_options : backend_options [ opt ] = cls . defaults [ opt ] # Switch format if mode does not allow it for p in [ 'fig' , 'holomap' ] : if backend_options . get ( p ) not in cls . allowed [ p ] : backend_options [ p ] = cls . allowed [ p ] [ 0 ] # Ensure backend and mode are set backend_options [ 'backend' ] = backend_spec backend_options [ 'mode' ] = mode return backend_options
def delete_mount_cache ( real_name ) : '''. . versionadded : : 2018.3.0 Provide information if the path is mounted CLI Example : . . code - block : : bash salt ' * ' mount . delete _ mount _ cache / mnt / share'''
cache = salt . utils . mount . read_cache ( __opts__ ) if cache : if 'mounts' in cache : if real_name in cache [ 'mounts' ] : del cache [ 'mounts' ] [ real_name ] cache_write = salt . utils . mount . write_cache ( cache , __opts__ ) if not cache_write : raise CommandExecutionError ( 'Unable to write mount cache.' ) return True
def _get_utc_sun_time_deg ( self , deg ) : """Return the times in minutes from 00:00 ( utc ) for a given sun altitude . This is done for a given sun altitude in sunrise ` deg ` degrees This function only works for altitudes sun really is . If the sun never gets to this altitude , the returned sunset and sunrise values will be negative . This can happen in low altitude when latitude is nearing the poles in winter times , the sun never goes very high in the sky there . Algorithm from http : / / www . srrb . noaa . gov / highlights / sunrise / calcdetails . html The low accuracy solar position equations are used . These routines are based on Jean Meeus ' s book Astronomical Algorithms ."""
gama = 0 # location of sun in yearly cycle in radians eqtime = 0 # difference betwen sun noon and clock noon decl = 0 # sun declanation hour_angle = 0 # solar hour angle sunrise_angle = math . pi * deg / 180.0 # sun angle at sunrise / set # get the day of year day_of_year = self . gday_of_year ( ) # get radians of sun orbit around earth = ) gama = 2.0 * math . pi * ( ( day_of_year - 1 ) / 365.0 ) # get the diff betwen suns clock and wall clock in minutes eqtime = 229.18 * ( 0.000075 + 0.001868 * math . cos ( gama ) - 0.032077 * math . sin ( gama ) - 0.014615 * math . cos ( 2.0 * gama ) - 0.040849 * math . sin ( 2.0 * gama ) ) # calculate suns declanation at the equater in radians decl = ( 0.006918 - 0.399912 * math . cos ( gama ) + 0.070257 * math . sin ( gama ) - 0.006758 * math . cos ( 2.0 * gama ) + 0.000907 * math . sin ( 2.0 * gama ) - 0.002697 * math . cos ( 3.0 * gama ) + 0.00148 * math . sin ( 3.0 * gama ) ) # we use radians , ratio is 2pi / 360 latitude = math . pi * self . location . latitude / 180.0 # the sun real time diff from noon at sunset / rise in radians try : hour_angle = ( math . acos ( math . cos ( sunrise_angle ) / ( math . cos ( latitude ) * math . cos ( decl ) ) - math . tan ( latitude ) * math . tan ( decl ) ) ) # check for too high altitudes and return negative values except ValueError : return - 720 , - 720 # we use minutes , ratio is 1440min / 2pi hour_angle = 720.0 * hour_angle / math . pi # get sunset / rise times in utc wall clock in minutes from 00:00 time # sunrise / sunset longitude = self . location . longitude return int ( 720.0 - 4.0 * longitude - hour_angle - eqtime ) , int ( 720.0 - 4.0 * longitude + hour_angle - eqtime )
def _reset ( cls ) : """If we have forked since the watch dictionaries were initialized , all that has is garbage , so clear it ."""
if os . getpid ( ) != cls . _cls_pid : cls . _cls_pid = os . getpid ( ) cls . _cls_instances_by_target . clear ( ) cls . _cls_thread_by_target . clear ( )
def get_user_codeframe ( tb ) : """Modify traceback to only include the user code ' s execution frame Always call in this fashion : e = sys . exc _ info ( ) user _ tb = get _ user _ codeframe ( e [ 2 ] ) or e [ 2] so that you can get the original frame back if you need to ( this is necessary because copying traceback objects is tricky and this is a good workaround )"""
while tb is not None : f = tb . tb_frame co = f . f_code filename = co . co_filename if filename [ 0 ] == '<' : # This is a meta - descriptor # ( probably either " < unknown > " or " < string > " ) # and is likely the user ' s code we ' re executing return tb else : tb = tb . tb_next # We could not find the user ' s frame . return False
def _slice_area_from_bbox ( self , src_area , dst_area , ll_bbox = None , xy_bbox = None ) : """Slice the provided area using the bounds provided ."""
if ll_bbox is not None : dst_area = AreaDefinition ( 'crop_area' , 'crop_area' , 'crop_latlong' , { 'proj' : 'latlong' } , 100 , 100 , ll_bbox ) elif xy_bbox is not None : dst_area = AreaDefinition ( 'crop_area' , 'crop_area' , 'crop_xy' , src_area . proj_dict , src_area . x_size , src_area . y_size , xy_bbox ) x_slice , y_slice = src_area . get_area_slices ( dst_area ) return src_area [ y_slice , x_slice ] , y_slice , x_slice
def update_ticket ( self , ticket_id = None , body = None ) : """Update a ticket . : param integer ticket _ id : the id of the ticket to update : param string body : entry to update in the ticket"""
return self . ticket . addUpdate ( { 'entry' : body } , id = ticket_id )
def dumps ( cls , obj , protocol = 0 ) : """Equivalent to pickle . dumps except that the HoloViews option tree is saved appropriately ."""
cls . save_option_state = True val = pickle . dumps ( obj , protocol = protocol ) cls . save_option_state = False return val
def set_template ( self , id_environment , name , network ) : """Set template value . If id _ environment = 0 , set ' ' to all environments related with the template name . : param id _ environment : Environment Identifier . : param name : Template Name . : param network : IPv4 or IPv6. : return : None : raise InvalidParameterError : Invalid param . : raise AmbienteNaoExisteError : Ambiente não cadastrado . : raise DataBaseError : Falha na networkapi ao acessar o banco de dados . : raise XMLError : Falha na networkapi ao ler o XML de requisição ou gerar o XML de resposta ."""
url = 'environment/set_template/' + str ( id_environment ) + '/' environment_map = dict ( ) environment_map [ 'name' ] = name environment_map [ 'network' ] = network code , xml = self . submit ( { 'environment' : environment_map } , 'POST' , url ) return self . response ( code , xml )
def stop_host ( self , config_file ) : """Stops a managed host specified by ` config _ file ` ."""
res = self . send_json_request ( 'host/stop' , data = { 'config' : config_file } ) if res . status_code != 200 : raise UnexpectedResponse ( 'Attempted to stop a JSHost. Response: {res_code}: {res_text}' . format ( res_code = res . status_code , res_text = res . text , ) ) return res . json ( )
def isSquare ( matrix ) : """Check that ` ` matrix ` ` is square . Returns is _ square : bool ` ` True ` ` if ` ` matrix ` ` is square , ` ` False ` ` otherwise ."""
try : try : dim1 , dim2 = matrix . shape except AttributeError : dim1 , dim2 = _np . array ( matrix ) . shape except ValueError : return False if dim1 == dim2 : return True return False
def _memoize ( self , name , getter , * args , ** kwargs ) : """Cache a stable expensive - to - get item value for later ( optimized ) retrieval ."""
field = "custom_m_" + name cached = self . fetch ( field ) if cached : value = cached else : value = getter ( * args , ** kwargs ) self . _make_it_so ( "caching %s=%r for" % ( name , value , ) , [ "custom.set" ] , field [ 7 : ] , value ) self . _fields [ field ] = value return value
def top_right ( self ) : '''Returns the axis instance at the top right of the page , where the postage stamp and aperture is displayed'''
res = self . body_top_right [ self . tcount ] ( ) self . tcount += 1 return res
def do_get ( self , from_path , to_path ) : """Copy file from Ndrive to local file and print out out the metadata . Examples : Ndrive > get file . txt ~ / ndrive - file . txt"""
to_file = open ( os . path . expanduser ( to_path ) , "wb" ) self . n . downloadFile ( self . current_path + "/" + from_path , to_path )
def peek ( init , exposes , debug = False ) : """Default deserializer factory . Arguments : init ( callable ) : type constructor . exposes ( iterable ) : attributes to be peeked and passed to ` init ` . Returns : callable : deserializer ( ` peek ` routine ) ."""
def _peek ( store , container , _stack = None ) : args = [ store . peek ( objname , container , _stack = _stack ) for objname in exposes ] if debug : print ( args ) return init ( * args ) return _peek
def create_pixeltypegrid ( grid_pars , grid_data ) : """Creates pixelgrid and arrays of axis values . Starting from : - grid _ pars : 2D numpy array , 1 column per parameter , unlimited number of cols - grid _ data : 2D numpy array , 1 column per variable , data corresponding to the rows in grid _ pars The grid should be rectangular and complete , i . e . every combination of the unique values in the parameter columns should exist . If not , a nan value will be inserted . @ param grid _ pars : Npar x Ngrid array of parameters @ type grid _ pars : array @ param grid _ data : Ndata x Ngrid array of data @ type grid _ data : array @ return : axis values and pixelgrid @ rtype : array , array"""
uniques = [ np . unique ( column , return_inverse = True ) for column in grid_pars ] # [0 ] are the unique values , [ 1 ] the indices for these to recreate the original array # we need to copy the values of the unique axes explicitly into new arrays # otherwise we can get issues with the interpolator axis_values = [ ] for uniques_ in uniques : this_axes = np . zeros ( len ( uniques_ [ 0 ] ) ) this_axes [ : ] = uniques_ [ 0 ] axis_values . append ( this_axes ) # axis _ values = [ uniques _ [ 0 ] for uniques _ in uniques ] # axis _ values = [ np . require ( uniques _ [ 0 ] , requirements = [ ' A ' , ' O ' , ' W ' , ' F ' ] ) for uniques _ in uniques ] unique_val_indices = [ uniques_ [ 1 ] for uniques_ in uniques ] data_dim = np . shape ( grid_data ) [ 0 ] par_dims = [ len ( uv [ 0 ] ) for uv in uniques ] par_dims . append ( data_dim ) pixelgrid = np . ones ( par_dims ) # We put np . inf as default value . If we get an inf , that means we tried to access # a region of the pixelgrid that is not populated by the data table pixelgrid [ pixelgrid == 1 ] = np . inf # now populate the multiDgrid indices = [ uv [ 1 ] for uv in uniques ] pixelgrid [ indices ] = grid_data . T return tuple ( axis_values ) , pixelgrid
def _read_mode_unpack ( self , size , kind ) : """Read options request unpack process . Positional arguments : * size - int , length of option * kind - int , option kind value Returns : * dict - - extracted option Structure of IPv4 options : Octets Bits Name Description 0 0 ip . opt . kind Kind 0 0 ip . opt . type . copy Copied Flag 0 1 ip . opt . type . class Option Class 0 3 ip . opt . type . number Option Number 1 8 ip . opt . length Length 2 16 ip . opt . data Kind - specific Data"""
if size < 3 : raise ProtocolError ( f'{self.alias}: [Optno {kind}] invalid format' ) data = dict ( kind = kind , type = self . _read_opt_type ( kind ) , length = size , data = self . _read_unpack ( size ) , ) return data
def get_linestyle ( self , increment = 1 ) : """Returns the current marker , then increments the marker by what ' s specified"""
i = self . linestyles_index self . linestyles_index += increment if self . linestyles_index >= len ( self . linestyles ) : self . linestyles_index = self . linestyles_index - len ( self . linestyles ) if self . linestyles_index >= len ( self . linestyles ) : self . linestyles_index = 0 # to be safe return self . linestyles [ i ]
def auto_directory ( rel_name ) : """if you ' re using py . path you make do that as : py . path . local ( full _ path ) . ensure _ dir ( )"""
dir_name = rel_path ( rel_name , check = False ) if not os . path . exists ( dir_name ) : os . makedirs ( dir_name , exist_ok = True ) return dir_name
def try_one_generator ( project , name , generator , target_type , properties , sources ) : """Checks if generator invocation can be pruned , because it ' s guaranteed to fail . If so , quickly returns empty list . Otherwise , calls try _ one _ generator _ really ."""
if __debug__ : from . targets import ProjectTarget assert isinstance ( project , ProjectTarget ) assert isinstance ( name , basestring ) or name is None assert isinstance ( generator , Generator ) assert isinstance ( target_type , basestring ) assert isinstance ( properties , property_set . PropertySet ) assert is_iterable_typed ( sources , virtual_target . VirtualTarget ) source_types = [ ] for s in sources : source_types . append ( s . type ( ) ) viable_source_types = viable_source_types_for_generator ( generator ) if source_types and viable_source_types != [ '*' ] and not set_ . intersection ( source_types , viable_source_types ) : if project . manager ( ) . logger ( ) . on ( ) : id = generator . id ( ) project . manager ( ) . logger ( ) . log ( __name__ , "generator '%s' pruned" % id ) project . manager ( ) . logger ( ) . log ( __name__ , "source_types" '%s' % source_types ) project . manager ( ) . logger ( ) . log ( __name__ , "viable_source_types '%s'" % viable_source_types ) return [ ] else : return try_one_generator_really ( project , name , generator , target_type , properties , sources )
def grok_ttl ( secret ) : """Parses the TTL information"""
ttl_obj = { } lease_msg = '' if 'lease' in secret : ttl_obj [ 'lease' ] = secret [ 'lease' ] lease_msg = "lease:%s" % ( ttl_obj [ 'lease' ] ) if 'lease_max' in secret : ttl_obj [ 'lease_max' ] = secret [ 'lease_max' ] elif 'lease' in ttl_obj : ttl_obj [ 'lease_max' ] = ttl_obj [ 'lease' ] if 'lease_max' in ttl_obj : lease_msg = "%s lease_max:%s" % ( lease_msg , ttl_obj [ 'lease_max' ] ) return ttl_obj , lease_msg
def log_call ( call_name ) : """Log the API call to the logger ."""
def decorator ( f ) : @ wraps ( f ) def wrapper ( * args , ** kw ) : instance = args [ 0 ] instance . logger . info ( call_name , { "content" : request . get_json ( ) } ) return f ( * args , ** kw ) return wrapper return decorator
def upload_entities_tsv ( namespace , workspace , entities_tsv ) : """Upload entities from a tsv loadfile . File - based wrapper for api . upload _ entities ( ) . A loadfile is a tab - separated text file with a header row describing entity type and attribute names , followed by rows of entities and their attribute values . Ex : entity : participant _ id age alive participant _ 23 25 Y participant _ 27 35 N Args : namespace ( str ) : project to which workspace belongs workspace ( str ) : Workspace name entities _ tsv ( file ) : FireCloud loadfile , see format above"""
if isinstance ( entities_tsv , string_types ) : with open ( entities_tsv , "r" ) as tsv : entity_data = tsv . read ( ) elif isinstance ( entities_tsv , io . StringIO ) : entity_data = entities_tsv . getvalue ( ) else : raise ValueError ( 'Unsupported input type.' ) return upload_entities ( namespace , workspace , entity_data )
def create_profile ( hostname , username , password , profile_type , name , ** kwargs ) : r'''A function to connect to a bigip device and create a profile . hostname The host / address of the bigip device username The iControl REST username password The iControl REST password profile _ type The type of profile to create name The name of the profile to create kwargs ` ` [ arg = val ] . . . [ arg = key1 : val1 , key2 : val2 ] . . . ` ` Consult F5 BIGIP user guide for specific options for each monitor type . Typically , tmsh arg names are used . Creating Complex Args Profiles can get pretty complicated in terms of the amount of possible config options . Use the following shorthand to create complex arguments such as lists , dictionaries , and lists of dictionaries . An option is also provided to pass raw json as well . lists ` ` [ i , i , i ] ` ` : ` ` param = ' item1 , item2 , item3 ' ` ` Dictionary ` ` [ k : v , k : v , k , v ] ` ` : ` ` param = ' key - 1 : val - 1 , key - 2 : val2 , key - 3 : va - 3 ' ` ` List of Dictionaries ` ` [ k : v , k : v | k : v , k : v | k : v , k : v ] ` ` : ` ` param = ' key - 1 : val - 1 , key - 2 : val - 2 | key - 1 : val - 1 , key - 2 : val - 2 | key - 1 : val - 1 , key - 2 : val - 2 ' ` ` JSON : ` ` ' j { . . . } j ' ` ` : ` ` cert - key - chain = ' j { " default " : { " cert " : " default . crt " , " chain " : " default . crt " , " key " : " default . key " } } j ' ` ` Escaping Delimiters : Use ` ` \ , ` ` or ` ` \ : ` ` or ` ` \ | ` ` to escape characters which shouldn ' t be treated as delimiters i . e . ` ` ciphers = ' DEFAULT \ : ! SSLv3 ' ` ` CLI Examples : : salt ' * ' bigip . create _ profile bigip admin admin http my - http - profile defaultsFrom = ' / Common / http ' salt ' * ' bigip . create _ profile bigip admin admin http my - http - profile defaultsFrom = ' / Common / http ' \ enforcement = maxHeaderCount : 3200 , maxRequests : 10'''
# build session bigip_session = _build_session ( username , password ) # construct the payload payload = { } payload [ 'name' ] = name # there ' s a ton of different profiles and a ton of options for each type of profile . # this logic relies that the end user knows which options are meant for which profile types for key , value in six . iteritems ( kwargs ) : if not key . startswith ( '__' ) : if key not in [ 'hostname' , 'username' , 'password' , 'profile_type' ] : key = key . replace ( '_' , '-' ) try : payload [ key ] = _set_value ( value ) except salt . exceptions . CommandExecutionError : return 'Error: Unable to Parse JSON data for parameter: {key}\n{value}' . format ( key = key , value = value ) # post to REST try : response = bigip_session . post ( BIG_IP_URL_BASE . format ( host = hostname ) + '/ltm/profile/{type}' . format ( type = profile_type ) , data = salt . utils . json . dumps ( payload ) ) except requests . exceptions . ConnectionError as e : return _load_connection_error ( hostname , e ) return _load_response ( response )
def create_dict_subelement ( root , subelement , content , ** kwargs ) : """Create a XML subelement from a Python dictionary ."""
attribs = kwargs . get ( 'attribs' , None ) namespace = kwargs . get ( 'namespace' , None ) key = subelement # Add subelement ' s namespace and attributes . if namespace and attribs : subelement = SubElement ( root , namespace + subelement , attribs ) elif namespace : subelement = SubElement ( root , namespace + subelement ) elif attribs : subelement = SubElement ( root , subelement , attribs ) # Otherwise , create SubElement without any extra data . else : subelement = SubElement ( root , subelement ) if not isinstance ( content , dict ) : subelement . text = content # Do special case ordering for degree children on etd _ ms . elif key == 'degree' : for degree_order_key in DEGREE_ORDER : for descriptor , value in content . items ( ) : if descriptor == degree_order_key : sub_descriptors = SubElement ( subelement , descriptor ) sub_descriptors . text = value else : for descriptor , value in content . items ( ) : sub_descriptors = SubElement ( subelement , descriptor ) sub_descriptors . text = value
def register_instances ( self , load_balancer_name , instances ) : """Add new Instances to an existing Load Balancer . : type load _ balancer _ name : string : param load _ balancer _ name : The name of the Load Balancer : type instances : List of strings : param instances : The instance ID ' s of the EC2 instances to add . : rtype : List of strings : return : An updated list of instances for this Load Balancer ."""
params = { 'LoadBalancerName' : load_balancer_name } self . build_list_params ( params , instances , 'Instances.member.%d.InstanceId' ) return self . get_list ( 'RegisterInstancesWithLoadBalancer' , params , [ ( 'member' , InstanceInfo ) ] )