signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def rapidfire ( self , check_status = True , max_nlaunch = - 1 , max_loops = 1 , sleep_time = 5 , ** kwargs ) : """Use : class : ` PyLauncher ` to submits tasks in rapidfire mode . kwargs contains the options passed to the launcher . Args : check _ status : max _ nlaunch : Maximum number of launches . default : no limit . max _ loops : Maximum number of loops sleep _ time : seconds to sleep between rapidfire loop iterations Return : Number of tasks submitted ."""
self . check_pid_file ( ) self . set_spectator_mode ( False ) if check_status : self . check_status ( ) from . launcher import PyLauncher return PyLauncher ( self , ** kwargs ) . rapidfire ( max_nlaunch = max_nlaunch , max_loops = max_loops , sleep_time = sleep_time )
def obfn_g1 ( self , Y1 ) : r"""Compute : math : ` g _ 1 ( \ mathbf { y _ 1 } ) ` component of ADMM objective function ."""
return np . linalg . norm ( ( self . Pcn ( Y1 ) - Y1 ) )
def _parse_summaryRecordSysNumber ( summaryRecordSysNumber ) : """Try to parse vague , not likely machine - readable description and return first token , which contains enough numbers in it ."""
def number_of_digits ( token ) : digits = filter ( lambda x : x . isdigit ( ) , token ) return len ( digits ) tokens = map ( lambda x : remove_hairs ( x , r" .,:;<>(){}[]\/" ) , summaryRecordSysNumber . split ( ) ) # pick only tokens that contains 3 digits contains_digits = filter ( lambda x : number_of_digits ( x ) > 3 , tokens ) if not contains_digits : return "" return contains_digits [ 0 ]
def has_edit_permission ( self , request , obj = None , version = None ) : """Returns a boolean if the user in the request has edit permission for the object . Can also be passed a version object to check if the user has permission to edit a version of the object ( if they own it ) ."""
# Has the edit permission for this object type permission_name = '{}.edit_{}' . format ( self . opts . app_label , self . opts . model_name ) has_permission = request . user . has_perm ( permission_name ) if obj is not None and has_permission is False : has_permission = request . user . has_perm ( permission_name , obj = obj ) if has_permission and version is not None : # Version must not be saved , and must belong to this user if version . version_number or version . owner != request . user : has_permission = False return has_permission
def health_check ( self ) : """Checks to make sure the file is there ."""
logger . debug ( 'Health Check on file for: {namespace}' . format ( namespace = self . namespace ) ) return os . path . isfile ( self . data_file )
def iqr ( data , channels = None ) : """Calculate the Interquartile Range of the events in an FCSData object . Parameters data : FCSData or numpy array NxD flow cytometry data where N is the number of events and D is the number of parameters ( aka channels ) . channels : int or str or list of int or list of str , optional Channels on which to calculate the statistic . If None , use all channels . Returns float or numpy array The Interquartile Range of the events in the specified channels of ` data ` . Notes The Interquartile Range ( IQR ) of a dataset is defined as the interval between the 25 % and the 75 % percentiles of such dataset ."""
# Slice data to take statistics from if channels is None : data_stats = data else : data_stats = data [ : , channels ] # Calculate and return statistic q75 , q25 = np . percentile ( data_stats , [ 75 , 25 ] , axis = 0 ) return q75 - q25
def configure ( self , statistics = "max" , max_ticks = 5 , plot_hists = True , flip = True , serif = True , sigma2d = False , sigmas = None , summary = None , bins = None , rainbow = None , colors = None , linestyles = None , linewidths = None , kde = False , smooth = None , cloud = None , shade = None , shade_alpha = None , shade_gradient = None , bar_shade = None , num_cloud = None , color_params = None , plot_color_params = False , cmaps = None , plot_contour = None , plot_point = None , global_point = True , marker_style = None , marker_size = None , marker_alpha = None , usetex = True , diagonal_tick_labels = True , label_font_size = 12 , tick_font_size = 10 , spacing = None , contour_labels = None , contour_label_font_size = 10 , legend_kwargs = None , legend_location = None , legend_artists = None , legend_color_text = True , watermark_text_kwargs = None , summary_area = 0.6827 ) : # pragma : no cover r"""Configure the general plotting parameters common across the bar and contour plots . If you do not call this explicitly , the : func : ` plot ` method will invoke this method automatically . Please ensure that you call this method * after * adding all the relevant data to the chain consumer , as the consume changes configuration values depending on the supplied data . Parameters statistics : string | list [ str ] , optional Which sort of statistics to use . Defaults to ` " max " ` for maximum likelihood statistics . Other available options are ` " mean " ` , ` " cumulative " ` , ` " max _ symmetric " ` , ` " max _ closest " ` and ` " max _ central " ` . In the very , very rare case you want to enable different statistics for different chains , you can pass in a list of strings . max _ ticks : int , optional The maximum number of ticks to use on the plots plot _ hists : bool , optional Whether to plot marginalised distributions or not flip : bool , optional Set to false if , when plotting only two parameters , you do not want it to rotate the histogram so that it is horizontal . sigma2d : bool , optional Defaults to ` False ` . When ` False ` , uses : math : ` \ sigma ` levels for 1D Gaussians - ie confidence levels of 68 % and 95 % . When ` True ` , uses the confidence levels for 2D Gaussians , where 1 and 2 : math : ` \ sigma ` represents 39 % and 86 % confidence levels respectively . sigmas : np . array , optional The : math : ` \ sigma ` contour levels to plot . Defaults to [ 0 , 1 , 2 , 3 ] for a single chain and [ 0 , 1 , 2 ] for multiple chains . serif : bool , optional Whether to display ticks and labels with serif font . summary : bool , optional If overridden , sets whether parameter summaries should be set as axis titles . Will not work if you have multiple chains bins : int | float , list [ int | float ] , optional The number of bins to use . By default uses : math : ` \ frac { \ sqrt { n } } { 10 } ` , where : math : ` n ` are the number of data points . Giving an integer will set the number of bins to the given value . Giving a float will scale the number of bins , such that giving ` ` bins = 1.5 ` ` will result in using : math : ` \ frac { 1.5 \ sqrt { n } } { 10 } ` bins . Note this parameter is most useful if ` kde = False ` is also passed , so you can actually see the bins and not a KDE . rainbow : bool | list [ bool ] , optional Set to True to force use of rainbow colours colors : str ( hex ) | list [ str ( hex ) ] , optional Provide a list of colours to use for each chain . If you provide more chains than colours , you * will * get the rainbow colour spectrum . If you only pass one colour , all chains are set to this colour . This probably won ' t look good . linestyles : str | list [ str ] , optional Provide a list of line styles to plot the contours and marginalised distributions with . By default , this will become a list of solid lines . If a string is passed instead of a list , this style is used for all chains . linewidths : float | list [ float ] , optional Provide a list of line widths to plot the contours and marginalised distributions with . By default , this is a width of 1 . If a float is passed instead of a list , this width is used for all chains . kde : bool | float | list [ bool | float ] , optional Whether to use a Gaussian KDE to smooth marginalised posteriors . If false , uses bins and linear interpolation , so ensure you have plenty of samples if your distribution is highly non - gaussian . Due to the slowness of performing a KDE on all data , it is often useful to disable this before producing final plots . If float , scales the width of the KDE bandpass manually . smooth : int | list [ int ] , optional Defaults to 3 . How much to smooth the marginalised distributions using a gaussian filter . If ` ` kde ` ` is set to true , this parameter is ignored . Setting it to either ` ` 0 ` ` , ` ` False ` ` disables smoothing . For grid data , smoothing is set to 0 by default , not 3. cloud : bool | list [ bool ] , optional If set , overrides the default behaviour and plots the cloud or not shade : bool | list [ bool ] optional If set , overrides the default behaviour and plots filled contours or not . If a list of bools is passed , you can turn shading on or off for specific chains . shade _ alpha : float | list [ float ] , optional Filled contour alpha value override . Default is 1.0 . If a list is passed , you can set the shade opacity for specific chains . shade _ gradient : float | list [ float ] , optional How much to vary colours in different contour levels . bar _ shade : bool | list [ bool ] , optional If set to true , shades in confidence regions in under histogram . By default this happens if you less than 3 chains , but is disabled if you are comparing more chains . You can pass a list if you wish to shade some chains but not others . num _ cloud : int | list [ int ] , optional The number of scatter points to show when enabling ` cloud ` or setting one of the parameters to colour scatter . Defaults to 15k per chain . color _ params : str | list [ str ] , optional The name of the parameter to use for the colour scatter . Defaults to none , for no colour . If set to ' weights ' , ' log _ weights ' , or ' posterior ' ( without the quotes ) , and that is not a parameter in the chain , it will respectively use the weights , log weights , or posterior , to colour the points . plot _ color _ params : bool | list [ bool ] , optional Whether or not the colour parameter should also be plotted as a posterior surface . cmaps : str | list [ str ] , optional The matplotlib colourmap to use in the ` colour _ param ` . If you have multiple ` color _ param ` s , you can specific a different cmap for each variable . By default ChainConsumer will cycle between several cmaps . plot _ contour : bool | list [ bool ] , optional Whether to plot the whole contour ( as opposed to a point ) . Defaults to true for less than 25 concurrent chains . plot _ point : bool | list [ bool ] , optional Whether to plot a maximum likelihood point . Defaults to true for more then 24 chains . global _ point : bool , optional Whether the point which gets plotted is the global posterior maximum , or the marginalised 2D posterior maximum . Note that when you use marginalised 2D maximums for the points , you do not get the 1D histograms . Defaults to ` True ` , for a global maximum value . marker _ style : str | list [ str ] , optional The marker style to use when plotting points . Defaults to ` ' . ' ` marker _ size : numeric | list [ numeric ] , optional Size of markers , if plotted . Defaults to ` 4 ` . marker _ alpha : numeric | list [ numeric ] , optional The alpha values when plotting markers . usetex : bool , optional Whether or not to parse text as LaTeX in plots . diagonal _ tick _ labels : bool , optional Whether to display tick labels on a 45 degree angle . label _ font _ size : int | float , optional The font size for plot axis labels and axis titles if summaries are configured to display . tick _ font _ size : int | float , optional The font size for the tick labels in the plots . spacing : float , optional The amount of spacing to add between plots . Defaults to ` None ` , which equates to 1.0 for less than 6 dimensions and 0.0 for higher dimensions . contour _ labels : string , optional If unset do not plot contour labels . If set to " confidence " , label the using confidence intervals . If set to " sigma " , labels using sigma . contour _ label _ font _ size : int | float , optional The font size for contour labels , if they are enabled . legend _ kwargs : dict , optional Extra arguments to pass to the legend api . legend _ location : tuple ( int , int ) , optional Specifies the subplot in which to locate the legend . By default , this will be ( 0 , - 1 ) , corresponding to the top right subplot if there are more than two parameters , and the bottom left plot for only two parameters with flip on . For having the legend in the primary subplot in the bottom left , set to ( - 1,0 ) . legend _ artists : bool , optional Whether to include hide artists in the legend . If all linestyles and line widths are identical , this will default to false ( as only the colours change ) . Otherwise it will be true . legend _ color _ text : bool , optional Whether to colour the legend text . watermark _ text _ kwargs : dict , optional Options to pass to the fontdict property when generating text for the watermark . summary _ area : float , optional The confidence interval used when generating parameter summaries . Defaults to 1 sigma , aka 0.6827 Returns ChainConsumer Itself , to allow chaining calls ."""
# Dirty way of ensuring overrides happen when requested l = locals ( ) explicit = [ ] for k in l . keys ( ) : if l [ k ] is not None : explicit . append ( k ) if k . endswith ( "s" ) : explicit . append ( k [ : - 1 ] ) self . _init_params ( ) num_chains = len ( self . chains ) assert rainbow is None or colors is None , "You cannot both ask for rainbow colours and then give explicit colours" # Determine statistics assert statistics is not None , "statistics should be a string or list of strings!" if isinstance ( statistics , str ) : assert statistics in list ( Analysis . summaries ) , "statistics %s not recognised. Should be in %s" % ( statistics , Analysis . summaries ) statistics = [ statistics . lower ( ) ] * len ( self . chains ) elif isinstance ( statistics , list ) : for i , l in enumerate ( statistics ) : statistics [ i ] = l . lower ( ) else : raise ValueError ( "statistics is not a string or a list!" ) # Determine KDEs if isinstance ( kde , bool ) or isinstance ( kde , float ) : kde = [ False if c . grid else kde for c in self . chains ] kde_override = [ c . kde for c in self . chains ] kde = [ c2 if c2 is not None else c1 for c1 , c2 in zip ( kde , kde_override ) ] # Determine bins if bins is None : bins = get_bins ( self . chains ) elif isinstance ( bins , list ) : bins = [ b2 if isinstance ( b2 , int ) else np . floor ( b2 * b1 ) for b1 , b2 in zip ( get_bins ( self . chains ) , bins ) ] elif isinstance ( bins , float ) : bins = [ np . floor ( b * bins ) for b in get_bins ( self . chains ) ] elif isinstance ( bins , int ) : bins = [ bins ] * len ( self . chains ) else : raise ValueError ( "bins value is not a recognised class (float or int)" ) # Determine smoothing if smooth is None : smooth = [ 0 if c . grid or k else 3 for c , k in zip ( self . chains , kde ) ] else : if smooth is not None and not smooth : smooth = 0 if isinstance ( smooth , list ) : smooth = [ 0 if k else s for s , k in zip ( smooth , kde ) ] else : smooth = [ 0 if k else smooth for k in kde ] # Determine color parameters if color_params is None : color_params = [ None ] * num_chains else : if isinstance ( color_params , str ) : color_params = [ color_params if color_params in cs . parameters + [ "log_weights" , "weights" , "posterior" ] else None for cs in self . chains ] color_params = [ None if c == "posterior" and self . chains [ i ] . posterior is None else c for i , c in enumerate ( color_params ) ] elif isinstance ( color_params , list ) or isinstance ( color_params , tuple ) : for c , chain in zip ( color_params , self . chains ) : p = chain . parameters if c is not None : assert c in p , "Color parameter %s not in parameters %s" % ( c , p ) # Determine if we should plot color parameters if isinstance ( plot_color_params , bool ) : plot_color_params = [ plot_color_params ] * len ( color_params ) # Determine cmaps if cmaps is None : param_cmaps = { } cmaps = [ ] i = 0 for cp in color_params : if cp is None : cmaps . append ( None ) elif cp in param_cmaps : cmaps . append ( param_cmaps [ cp ] ) else : param_cmaps [ cp ] = self . _cmaps [ i ] cmaps . append ( self . _cmaps [ i ] ) i = ( i + 1 ) % len ( self . _cmaps ) # Determine colours if colors is None : if rainbow : colors = self . color_finder . get_colormap ( num_chains ) else : if num_chains > len ( self . _all_colours ) : num_needed_colours = np . sum ( [ c is None for c in color_params ] ) colour_list = self . color_finder . get_colormap ( num_needed_colours ) else : colour_list = self . _all_colours colors = [ ] ci = 0 for c in color_params : if c : colors . append ( '#000000' ) else : colors . append ( colour_list [ ci ] ) ci += 1 elif isinstance ( colors , str ) : colors = [ colors ] * len ( self . chains ) colors = self . color_finder . get_formatted ( colors ) # Determine linestyles if linestyles is None : i = 0 linestyles = [ ] for c in color_params : if c is None : linestyles . append ( self . _linestyles [ 0 ] ) else : linestyles . append ( self . _linestyles [ i ] ) i = ( i + 1 ) % len ( self . _linestyles ) elif isinstance ( linestyles , str ) : linestyles = [ linestyles ] * len ( self . chains ) # Determine linewidths if linewidths is None : linewidths = [ 1.0 ] * len ( self . chains ) elif isinstance ( linewidths , float ) or isinstance ( linewidths , int ) : linewidths = [ linewidths ] * len ( self . chains ) # Determine clouds if cloud is None : cloud = False cloud = [ cloud or c is not None for c in color_params ] # Determine cloud points if num_cloud is None : num_cloud = 30000 if isinstance ( num_cloud , int ) or isinstance ( num_cloud , float ) : num_cloud = [ int ( num_cloud ) ] * num_chains # Should we shade the contours if shade is None : if shade_alpha is None : shade = num_chains <= 3 else : shade = True if isinstance ( shade , bool ) : # If not overridden , do not shade chains with colour scatter points shade = [ shade and c is None for c in color_params ] # Modify shade alpha based on how many chains we have if shade_alpha is None : if num_chains == 1 : if contour_labels is not None : shade_alpha = 0.75 else : shade_alpha = 1.0 else : shade_alpha = 1.0 / num_chains # Decrease the shading amount if there are colour scatter points if isinstance ( shade_alpha , float ) or isinstance ( shade_alpha , int ) : shade_alpha = [ shade_alpha if c is None else 0.25 * shade_alpha for c in color_params ] if shade_gradient is None : shade_gradient = 1.0 if isinstance ( shade_gradient , float ) : shade_gradient = [ shade_gradient ] * num_chains elif isinstance ( shade_gradient , list ) : assert len ( shade_gradient ) == num_chains , "Have %d shade_gradient but % chains" % ( len ( shade_gradient ) , num_chains ) contour_over_points = num_chains < 20 if plot_contour is None : plot_contour = [ contour_over_points if chain . posterior is not None else True for chain in self . chains ] elif isinstance ( plot_contour , bool ) : plot_contour = [ plot_contour ] * num_chains if plot_point is None : plot_point = [ not contour_over_points ] * num_chains elif isinstance ( plot_point , bool ) : plot_point = [ plot_point ] * num_chains if marker_style is None : marker_style = [ '.' ] * num_chains elif isinstance ( marker_style , str ) : marker_style = [ marker_style ] * num_chains if marker_size is None : marker_size = [ 4 ] * num_chains elif isinstance ( marker_style , ( int , float ) ) : marker_size = [ marker_size ] * num_chains if marker_alpha is None : marker_alpha = [ 1.0 ] * num_chains elif isinstance ( marker_alpha , ( int , float ) ) : marker_alpha = [ marker_alpha ] * num_chains # Figure out if we should display parameter summaries if summary is not None : summary = summary and num_chains == 1 # Figure out bar shading if bar_shade is None : bar_shade = num_chains <= 3 if isinstance ( bar_shade , bool ) : bar_shade = [ bar_shade ] * num_chains # Figure out how many sigmas to plot if sigmas is None : if num_chains == 1 : sigmas = np . array ( [ 0 , 1 , 2 ] ) else : sigmas = np . array ( [ 0 , 1 , 2 ] ) if sigmas [ 0 ] != 0 : sigmas = np . concatenate ( ( [ 0 ] , sigmas ) ) sigmas = np . sort ( sigmas ) if contour_labels is not None : assert isinstance ( contour_labels , str ) , "contour_labels parameter should be a string" contour_labels = contour_labels . lower ( ) assert contour_labels in [ "sigma" , "confidence" ] , "contour_labels should be either sigma or confidence" assert isinstance ( contour_label_font_size , int ) or isinstance ( contour_label_font_size , float ) , "contour_label_font_size needs to be numeric" if legend_artists is None : legend_artists = len ( set ( linestyles ) ) > 1 or len ( set ( linewidths ) ) > 1 if legend_kwargs is not None : assert isinstance ( legend_kwargs , dict ) , "legend_kwargs should be a dict" else : legend_kwargs = { } if num_chains < 3 : labelspacing = 0.5 elif num_chains == 3 : labelspacing = 0.2 else : labelspacing = 0.15 legend_kwargs_default = { "labelspacing" : labelspacing , "loc" : "upper right" , "frameon" : False , "fontsize" : label_font_size , "handlelength" : 1 , "handletextpad" : 0.2 , "borderaxespad" : 0.0 } legend_kwargs_default . update ( legend_kwargs ) watermark_text_kwargs_default = { "color" : "#333333" , "alpha" : 0.7 , "verticalalignment" : "center" , "horizontalalignment" : "center" } if watermark_text_kwargs is None : watermark_text_kwargs = { } watermark_text_kwargs_default . update ( watermark_text_kwargs ) assert isinstance ( summary_area , float ) , "summary_area needs to be a float, not %s!" % type ( summary_area ) assert summary_area > 0 , "summary_area should be a positive number, instead is %s!" % summary_area assert summary_area < 1 , "summary_area must be less than unity, instead is %s!" % summary_area assert isinstance ( global_point , bool ) , "global_point should be a bool" # List options for i , c in enumerate ( self . chains ) : try : c . update_unset_config ( "statistics" , statistics [ i ] , override = explicit ) c . update_unset_config ( "color" , colors [ i ] , override = explicit ) c . update_unset_config ( "linestyle" , linestyles [ i ] , override = explicit ) c . update_unset_config ( "linewidth" , linewidths [ i ] , override = explicit ) c . update_unset_config ( "cloud" , cloud [ i ] , override = explicit ) c . update_unset_config ( "shade" , shade [ i ] , override = explicit ) c . update_unset_config ( "shade_alpha" , shade_alpha [ i ] , override = explicit ) c . update_unset_config ( "shade_gradient" , shade_gradient [ i ] , override = explicit ) c . update_unset_config ( "bar_shade" , bar_shade [ i ] , override = explicit ) c . update_unset_config ( "bins" , bins [ i ] , override = explicit ) c . update_unset_config ( "kde" , kde [ i ] , override = explicit ) c . update_unset_config ( "smooth" , smooth [ i ] , override = explicit ) c . update_unset_config ( "color_params" , color_params [ i ] , override = explicit ) c . update_unset_config ( "plot_color_params" , plot_color_params [ i ] , override = explicit ) c . update_unset_config ( "cmap" , cmaps [ i ] , override = explicit ) c . update_unset_config ( "num_cloud" , num_cloud [ i ] , override = explicit ) c . update_unset_config ( "marker_style" , marker_style [ i ] , override = explicit ) c . update_unset_config ( "marker_size" , marker_size [ i ] , override = explicit ) c . update_unset_config ( "marker_alpha" , marker_alpha [ i ] , override = explicit ) c . update_unset_config ( "plot_contour" , plot_contour [ i ] , override = explicit ) c . update_unset_config ( "plot_point" , plot_point [ i ] , override = explicit ) c . config [ "summary_area" ] = summary_area except IndentationError as e : print ( "Index error when assigning chain properties, make sure you " "have enough properties set for the number of chains you have loaded! " "See the stack trace for which config item has the wrong number of entries." ) raise e # Non list options self . config [ "sigma2d" ] = sigma2d self . config [ "sigmas" ] = sigmas self . config [ "summary" ] = summary self . config [ "flip" ] = flip self . config [ "serif" ] = serif self . config [ "plot_hists" ] = plot_hists self . config [ "max_ticks" ] = max_ticks self . config [ "usetex" ] = usetex self . config [ "diagonal_tick_labels" ] = diagonal_tick_labels self . config [ "label_font_size" ] = label_font_size self . config [ "tick_font_size" ] = tick_font_size self . config [ "spacing" ] = spacing self . config [ "contour_labels" ] = contour_labels self . config [ "contour_label_font_size" ] = contour_label_font_size self . config [ "legend_location" ] = legend_location self . config [ "legend_kwargs" ] = legend_kwargs_default self . config [ "legend_artists" ] = legend_artists self . config [ "legend_color_text" ] = legend_color_text self . config [ "watermark_text_kwargs" ] = watermark_text_kwargs_default self . config [ "global_point" ] = global_point self . _configured = True return self
def get_quantiles ( acquisition_par , fmin , m , s ) : '''Quantiles of the Gaussian distribution useful to determine the acquisition function values : param acquisition _ par : parameter of the acquisition function : param fmin : current minimum . : param m : vector of means . : param s : vector of standard deviations .'''
if isinstance ( s , np . ndarray ) : s [ s < 1e-10 ] = 1e-10 elif s < 1e-10 : s = 1e-10 u = ( fmin - m - acquisition_par ) / s phi = np . exp ( - 0.5 * u ** 2 ) / np . sqrt ( 2 * np . pi ) Phi = 0.5 * erfc ( - u / np . sqrt ( 2 ) ) return ( phi , Phi , u )
def pip_get_installed ( ) : """Code extracted from the middle of the pip freeze command . FIXME : does not list anything installed via - e"""
from pip . _internal . utils . misc import dist_is_local return tuple ( dist_to_req ( dist ) for dist in fresh_working_set ( ) if dist_is_local ( dist ) if dist . key != 'python' # See # 220 )
async def disable_digital_reporting ( self , command ) : """Disable Firmata reporting for a digital pin . : param command : { " method " : " disable _ digital _ reporting " , " params " : [ PIN ] } : returns : No return message ."""
pin = int ( command [ 0 ] ) await self . core . disable_digital_reporting ( pin )
def from_app_role ( cls , url , path , role_id , secret_id ) : """Constructor : use AppRole authentication to read secrets from a Vault path See https : / / www . vaultproject . io / docs / auth / approle . html Args : url : Vault url path : Vault path where secrets are stored role _ id : Vault RoleID secret _ id : Vault SecretID"""
token = cls . _fetch_app_role_token ( url , role_id , secret_id ) source_dict = cls . _fetch_secrets ( url , path , token ) return cls ( source_dict , url , path , token )
def shell_command ( class_path ) : """Drop into a debugging shell ."""
loader = ClassLoader ( * class_path ) shell . start_shell ( local_ns = { 'ClassFile' : ClassFile , 'loader' : loader , 'constants' : importlib . import_module ( 'jawa.constants' ) , } )
def query ( self , * args , ** kwargs ) : """Generic query method . In reality , your storage class would have its own query methods , Performs a Mongo find on the Marketdata index metadata collection . See : http : / / api . mongodb . org / python / current / api / pymongo / collection . html"""
for x in self . _collection . find ( * args , ** kwargs ) : x [ 'stuff' ] = cPickle . loads ( x [ 'stuff' ] ) del x [ '_id' ] # Remove default unique ' _ id ' field from doc yield Stuff ( ** x )
def _inject_closure_values_fix_closures ( c , injected , ** kwargs ) : """Recursively fix closures Python bytecode for a closure looks like : : LOAD _ CLOSURE var1 BUILD _ TUPLE < n _ of _ vars _ closed _ over > LOAD _ CONST < code _ object _ containing _ closure > MAKE _ CLOSURE or this in 3.6 ( MAKE _ CLOSURE is no longer an opcode ) : : LOAD _ CLOSURE var1 BUILD _ TUPLE < n _ of _ vars _ closed _ over > LOAD _ CONST < code _ object _ containing _ closure > LOAD _ CONST < locals > MAKE _ FUNCTION This function finds closures and adds the injected closed variables in the right place ."""
code = c . code orig_len = len ( code ) for iback , ( opcode , value ) in enumerate ( reversed ( code ) ) : i = orig_len - iback - 1 if opcode != MAKE_CLOSURE : continue codeobj = code [ i - 1 - OPCODE_OFFSET ] assert codeobj [ 0 ] == byteplay . LOAD_CONST build_tuple = code [ i - 2 - OPCODE_OFFSET ] assert build_tuple [ 0 ] == byteplay . BUILD_TUPLE n_closed = build_tuple [ 1 ] load_closures = code [ i - 2 - OPCODE_OFFSET - n_closed : i - 2 - OPCODE_OFFSET ] assert all ( o == byteplay . LOAD_CLOSURE for o , _ in load_closures ) newlcs = [ ( byteplay . LOAD_CLOSURE , inj ) for inj in injected ] code [ i - 2 - OPCODE_OFFSET ] = byteplay . BUILD_TUPLE , n_closed + len ( injected ) code [ i - 2 - OPCODE_OFFSET : i - 2 - OPCODE_OFFSET ] = newlcs _inject_closure_values_fix_code ( codeobj [ 1 ] , injected , ** kwargs )
def update_created_pools ( self ) : """When the set of live nodes change , the loadbalancer will change its mind on host distances . It might change it on the node that came / left but also on other nodes ( for instance , if a node dies , another previously ignored node may be now considered ) . This method ensures that all hosts for which a pool should exist have one , and hosts that shouldn ' t don ' t . For internal use only ."""
futures = set ( ) for host in self . cluster . metadata . all_hosts ( ) : distance = self . _profile_manager . distance ( host ) pool = self . _pools . get ( host ) future = None if not pool or pool . is_shutdown : # we don ' t eagerly set is _ up on previously ignored hosts . None is included here # to allow us to attempt connections to hosts that have gone from ignored to something # else . if distance != HostDistance . IGNORED and host . is_up in ( True , None ) : future = self . add_or_renew_pool ( host , False ) elif distance != pool . host_distance : # the distance has changed if distance == HostDistance . IGNORED : future = self . remove_pool ( host ) else : pool . host_distance = distance if future : futures . add ( future ) return futures
def p_common_scalar_magic_line ( p ) : 'common _ scalar : LINE'
p [ 0 ] = ast . MagicConstant ( p [ 1 ] . upper ( ) , p . lineno ( 1 ) , lineno = p . lineno ( 1 ) )
def get_chat_administrators ( self , chat_id ) : """Use this method to get a list of administrators in a chat . On success , returns an Array of ChatMember objects that contains information about all chat administrators except other bots . : param chat _ id : : return :"""
result = apihelper . get_chat_administrators ( self . token , chat_id ) ret = [ ] for r in result : ret . append ( types . ChatMember . de_json ( r ) ) return ret
def get_mouse_location2 ( self ) : """Get all mouse location - related data . : return : a namedtuple with ` ` x ` ` , ` ` y ` ` , ` ` screen _ num ` ` and ` ` window ` ` fields"""
x = ctypes . c_int ( 0 ) y = ctypes . c_int ( 0 ) screen_num_ret = ctypes . c_ulong ( 0 ) window_ret = ctypes . c_ulong ( 0 ) _libxdo . xdo_get_mouse_location2 ( self . _xdo , ctypes . byref ( x ) , ctypes . byref ( y ) , ctypes . byref ( screen_num_ret ) , ctypes . byref ( window_ret ) ) return mouse_location2 ( x . value , y . value , screen_num_ret . value , window_ret . value )
def parse_args ( bels : list , char_locs : CharLocs , parsed : Parsed , errors : Errors ) -> Tuple [ Parsed , Errors ] : """Parse arguments from functions Args : bels : BEL string as list of chars char _ locs : char locations for parens , commas and quotes parsed : function locations errors : error messages Returns : ( functions , errors ) : function and arg locations plus error messages"""
commas = char_locs [ "commas" ] # Process each span key in parsed from beginning for span in parsed : if parsed [ span ] [ "type" ] != "Function" or "parens_span" not in parsed [ span ] : continue # Skip if not argument - less sp , ep = parsed [ span ] [ "parens_span" ] # calculate args _ end position if ep == - 1 : # supports bel completion args_end = len ( bels ) - 1 else : args_end = ep - 1 # Parse arguments args = [ ] arg_start = sp + 1 each_arg_end_list = sorted ( [ end - 1 for end in commas . get ( sp , [ ] ) ] + [ args_end ] ) for arg_end in each_arg_end_list : # log . debug ( f ' Arg _ start : { arg _ start } Arg _ end : { arg _ end } ' ) # Skip blanks at beginning of argument while arg_start < args_end and bels [ arg_start ] == " " : arg_start += 1 # Trim arg _ end ( e . g . HGNC : AKT1 , HGNC : EGF ) - if there are spaces before comma trimmed_arg_end = arg_end while trimmed_arg_end > arg_start and bels [ trimmed_arg_end ] == " " : trimmed_arg_end -= 1 if trimmed_arg_end < arg_start : trimmed_arg_end = arg_start arg = "" . join ( bels [ arg_start : trimmed_arg_end + 1 ] ) # log . debug ( f ' Adding arg to args : { arg _ start } { trimmed _ arg _ end } ' ) args . append ( { "arg" : arg , "span" : ( arg_start , trimmed_arg_end ) } ) arg_start = arg_end + 2 parsed [ span ] [ "args" ] = args return parsed , errors
def _parse_postmeta ( element ) : import phpserialize """Retrive post metadata as a dictionary"""
metadata = { } fields = element . findall ( "./{%s}postmeta" % WP_NAMESPACE ) for field in fields : key = field . find ( "./{%s}meta_key" % WP_NAMESPACE ) . text value = field . find ( "./{%s}meta_value" % WP_NAMESPACE ) . text if key == "_wp_attachment_metadata" : stream = StringIO ( value . encode ( ) ) try : data = phpserialize . load ( stream ) metadata [ "attachment_metadata" ] = data except ValueError as e : pass except Exception as e : raise ( e ) if key == "_wp_attached_file" : metadata [ "attached_file" ] = value return metadata
def after_init_apps ( sender ) : """Check redis version"""
from uliweb import settings from uliweb . utils . common import log check = settings . get_var ( 'REDIS/check_version' ) if check : client = get_redis ( ) try : info = client . info ( ) except Exception as e : log . exception ( e ) log . error ( 'Redis is not started!' ) return redis_version = info [ 'redis_version' ] version = tuple ( map ( int , redis_version . split ( '.' ) ) ) op = re_compare_op . search ( check ) if op : _op = op . group ( ) _v = check [ op . end ( ) + 1 : ] . strip ( ) else : _op = '=' _v = check nv = tuple ( map ( int , _v . split ( '.' ) ) ) if _op == '=' : flag = version [ : len ( nv ) ] == nv elif _op == '>=' : flag = version >= nv elif _op == '>' : flag = version > nv elif _op == '<=' : flag = version <= nv elif _op == '<' : flag = version < nv else : log . error ( "Can't support operator %s when check redis version" % _op ) if not flag : log . error ( "Redis version %s is not matched what you want %s" % ( redis_version , _v ) )
def get_param ( self , param , default = None ) : """Get a parameter in config ( handle default value ) : param param : name of the parameter to recover : type param : string : param default : the default value , raises an exception if param is not in configuration and default is None ( which is the default value ) . : type default : string or None : rtype : the value of the parameter or the default value if not set in configuration"""
if param in self . config : return self . config [ param ] elif default is not None : return default else : raise MissingParameter ( 'ppolicy' , param )
def run_field_scan ( ModelClass , model_kwargs , t_output_every , t_upto , field , vals , force_resume = True , parallel = False ) : """Run many models with a range of parameter sets . Parameters ModelClass : callable A class or factory function that returns a model object by calling ` ModelClass ( model _ kwargs ) ` model _ kwargs : dict See ` ModelClass ` explanation . t _ output _ every : float see : class : ` Runner ` . t _ upto : float Run each model until the time is equal to this field : str The name of the field to be varied , whose values are in ` vals ` . vals : array _ like Iterable of values to use to instantiate each Model object . parallel : bool Whether or not to run the models in parallel , using the Multiprocessing library . If ` True ` , the number of concurrent tasks will be equal to one less than the number of available cores detected ."""
model_kwarg_sets = [ dict ( model_kwargs , field = val ) for val in vals ] run_kwarg_scan ( ModelClass , model_kwarg_sets , t_output_every , t_upto , force_resume , parallel )
def wcs ( self ) : """World coordinate system ( ` ~ astropy . wcs . WCS ` ) ."""
wcs = WCS ( naxis = 2 ) wcs . wcs . crval = self . config [ 'crval' ] wcs . wcs . crpix = self . config [ 'crpix' ] wcs . wcs . cdelt = self . config [ 'cdelt' ] wcs . wcs . ctype = self . config [ 'ctype' ] return wcs
def evaluate_extracted_tokens ( gold_content , extr_content ) : """Evaluate the similarity between gold - standard and extracted content , typically for a single HTML document , as another way of evaluating the performance of an extractor model . Args : gold _ content ( str or Sequence [ str ] ) : Gold - standard content , either as a string or as an already - tokenized list of tokens . extr _ content ( str or Sequence [ str ] ) : Extracted content , either as a string or as an already - tokenized list of tokens . Returns : Dict [ str , float ]"""
if isinstance ( gold_content , string_ ) : gold_content = simple_tokenizer ( gold_content ) if isinstance ( extr_content , string_ ) : extr_content = simple_tokenizer ( extr_content ) gold_set = set ( gold_content ) extr_set = set ( extr_content ) jaccard = len ( gold_set & extr_set ) / len ( gold_set | extr_set ) levenshtein = dameraulevenshtein ( gold_content , extr_content ) return { 'jaccard' : jaccard , 'levenshtein' : levenshtein }
def update_line ( self , t , x , y , ** kw ) : """overwrite data for trace t"""
self . panel . update_line ( t , x , y , ** kw )
def absent ( name , auth = None ) : '''Ensure service does not exist name Name of the service'''
ret = { 'name' : name , 'changes' : { } , 'result' : True , 'comment' : '' } __salt__ [ 'keystoneng.setup_clouds' ] ( auth ) service = __salt__ [ 'keystoneng.service_get' ] ( name = name ) if service : if __opts__ [ 'test' ] is True : ret [ 'result' ] = None ret [ 'changes' ] = { 'id' : service . id } ret [ 'comment' ] = 'Service will be deleted.' return ret __salt__ [ 'keystoneng.service_delete' ] ( name = service ) ret [ 'changes' ] [ 'id' ] = service . id ret [ 'comment' ] = 'Deleted service' return ret
def remove_trailing_stars ( self , new_path = None , in_place = True , check = False ) : """Remove the bad character that can be inserted by some programs at the end of sequences ."""
# Optional check # if check and int ( sh . grep ( '-c' , '\*' , self . path , _ok_code = [ 0 , 1 ] ) ) == 0 : return self # Faster with bash utilities # if in_place is True : sh . sed ( '-i' , 's/\*$//g' , self . path ) return self # Standard way # if new_path is None : new_fasta = self . __class__ ( new_temp_path ( ) ) else : new_fasta = self . __class__ ( new_path ) new_fasta . create ( ) for seq in self : new_fasta . add_str ( str ( seq . seq ) . rstrip ( '*' ) , seq . id ) new_fasta . close ( ) return new_fasta
def subset_sum ( x , R ) : """Subsetsum by splitting : param x : table of values : param R : target value : returns bool : if there is a subsequence of x with total sum R : complexity : : math : ` O ( n ^ { \\ lceil n / 2 \\ rceil } ) `"""
k = len ( x ) // 2 # divide input Y = [ v for v in part_sum ( x [ : k ] ) ] Z = [ R - v for v in part_sum ( x [ k : ] ) ] Y . sort ( ) # test of intersection between Y and Z Z . sort ( ) i = 0 j = 0 while i < len ( Y ) and j < len ( Z ) : if Y [ i ] == Z [ j ] : return True elif Y [ i ] < Z [ j ] : # increment index of smallest element i += 1 else : j += 1 return False
def id_pools_vmac_ranges ( self ) : """Gets the IdPoolsRanges API Client for VMAC Ranges . Returns : IdPoolsRanges :"""
if not self . __id_pools_vmac_ranges : self . __id_pools_vmac_ranges = IdPoolsRanges ( 'vmac' , self . __connection ) return self . __id_pools_vmac_ranges
def files ( self ) : '''Listing files related to workflows related to current directory'''
try : cur = self . conn . cursor ( ) cur . execute ( 'SELECT id, item FROM workflows WHERE entry_type = "tracked_files"' ) return [ ( x [ 0 ] , eval ( x [ 1 ] ) ) for x in cur . fetchall ( ) ] except sqlite3 . DatabaseError as e : env . logger . warning ( f'Failed to get files from signature database: {e}' ) return [ ]
def crosslisting_feature ( catalog , soup ) : """Parses all the crosslistings . These refer to the similar CRNs , such as a grad & undergrad level course ."""
listing = { } for elem in soup . coursedb . findAll ( 'crosslisting' ) : seats = int ( elem [ 'seats' ] ) crns = [ safeInt ( crn . string ) for crn in elem . findAll ( 'crn' ) ] # we want to refer to the same object to save space cl = CrossListing ( crns , seats ) for crn in crns : listing [ crn ] = cl catalog . crosslistings = FrozenDict ( listing ) logger . info ( 'Catalog has %d course crosslistings' % len ( catalog . crosslistings ) )
def operations ( nsteps ) : '''Returns the number of operations needed for nsteps of GMRES'''
return { 'A' : 1 + nsteps , 'M' : 2 + nsteps , 'Ml' : 2 + nsteps , 'Mr' : 1 + nsteps , 'ip_B' : 2 + nsteps + nsteps * ( nsteps + 1 ) / 2 , 'axpy' : 4 + 2 * nsteps + nsteps * ( nsteps + 1 ) / 2 }
def seek ( self , offset , whence = 0 ) : """Move to a new offset either relative or absolute . whence = 0 is absolute , whence = 1 is relative , whence = 2 is relative to the end . Any relative or absolute seek operation which would result in a negative position is undefined and that case can be ignored in the implementation . Any seek operation which moves the position after the stream should succeed . tell ( ) should report that position and read ( ) should return an empty bytes object ."""
if self . closed : raise ValueError ( "I/O operation on closed file" ) if whence == 0 : if offset < 0 : raise IOError ( 'seek would move position outside the file' ) self . pos = offset elif whence == 1 : if self . pos + offset < 0 : raise IOError ( 'seek would move position outside the file' ) self . pos += offset elif whence == 2 : if self . size + offset < 0 : raise IOError ( 'seek would move position outside the file' ) self . pos = self . size + offset return self . pos
def locate_numbers ( input_string : str ) -> int : """This function will find the numbers in a string and return their starting position . Args : input _ string : An input string that may contain numbers . Returns : The starting position of the numbers in the input string . Examples : > > > locate _ numbers ( ' there are 70 flats in this apartment ' ) 10 > > > locate _ numbers ( ' every adult have 32 teeth ' ) 17 > > > locate _ numbers ( ' isha has 79 chocolates in her bag ' )"""
import re for match in re . finditer ( '\d+' , input_string ) : return match . start ( )
def join ( * args , trailing_slash = False ) : """Return a url path joined from the arguments . It correctly handles blank / None arguments , and removes back - to - back slashes , eg : : assert join ( ' / ' , ' foo ' , None , ' bar ' , ' ' , ' baz ' ) = = ' / foo / bar / baz ' assert join ( ' / ' , ' / foo ' , ' / ' , ' / bar / ' ) = = ' / foo / bar ' Note that it removes trailing slashes by default , so if you want to keep those , then you need to pass the ` ` trailing _ slash ` ` keyword argument : : assert join ( ' / foo ' , ' baz ' , None , trailing _ slash = True ) = = ' / foo / baz / '"""
dirty_path = '/' . join ( map ( lambda x : x and x or '' , args ) ) path = re . sub ( r'/+' , '/' , dirty_path ) if path in { '' , '/' } : return '/' path = path . rstrip ( '/' ) return path if not trailing_slash else path + '/'
def find_family ( self , pattern = r".*" , flags = 0 , node = None ) : """Returns the Nodes from given family . : param pattern : Matching pattern . : type pattern : unicode : param flags : Matching regex flags . : type flags : int : param node : Node to start walking from . : type node : AbstractNode or AbstractCompositeNode or GraphModelNode : return : Family nodes . : rtype : list"""
return self . __root_node . find_family ( pattern , flags , node or self . __root_node )
def noun_phrases ( self ) : """Returns a list of noun phrases for this blob ."""
return WordList ( [ phrase . strip ( ) for phrase in self . np_extractor . extract ( self . raw ) if len ( phrase . split ( ) ) > 1 ] )
def resolve_ssl_version ( candidate ) : """like resolve _ cert _ reqs"""
if candidate is None : return PROTOCOL_SSLv23 if isinstance ( candidate , str ) : res = getattr ( ssl , candidate , None ) if res is None : res = getattr ( ssl , 'PROTOCOL_' + candidate ) return res return candidate
def AppMoVCopeland ( profile , alpha = 0.5 ) : """Returns an integer that is equal to the margin of victory of the election profile , that is , the smallest number k such that changing k votes can change the winners . : ivar Profile profile : A Profile object that represents an election profile ."""
# Currently , we expect the profile to contain complete ordering over candidates . elecType = profile . getElecType ( ) if elecType != "soc" and elecType != "toc" : print ( "ERROR: unsupported profile type" ) exit ( ) # Initialization n = profile . numVoters m = profile . numCands # Compute the original winner d # Initialize each Copeland score as 0.0. copelandscores = { } for cand in profile . candMap . keys ( ) : copelandscores [ cand ] = 0.0 # For each pair of candidates , calculate the number of votes in which one beat the other . wmgMap = profile . getWmg ( ) for cand1 , cand2 in itertools . combinations ( wmgMap . keys ( ) , 2 ) : if cand2 in wmgMap [ cand1 ] . keys ( ) : if wmgMap [ cand1 ] [ cand2 ] > 0 : copelandscores [ cand1 ] += 1.0 elif wmgMap [ cand1 ] [ cand2 ] < 0 : copelandscores [ cand2 ] += 1.0 # If a pair of candidates is tied , we add alpha to their score for each vote . else : copelandscores [ cand1 ] += alpha copelandscores [ cand2 ] += alpha d = max ( copelandscores . items ( ) , key = lambda x : x [ 1 ] ) [ 0 ] # Compute c * = argmin _ c RM ( d , c ) relative_margin = { } alter_without_d = delete ( range ( 1 , m + 1 ) , d - 1 ) for c in alter_without_d : relative_margin [ c ] = RM ( wmgMap , n , m , d , c , alpha ) c_star = min ( relative_margin . items ( ) , key = lambda x : x [ 1 ] ) [ 0 ] return relative_margin [ c_star ] * ( math . ceil ( log ( m ) ) + 1 )
def create_pipeline_run ( pipeline , context_by_op ) : """Create a pipeline run / instance ."""
pipeline_run = PipelineRun . objects . create ( pipeline = pipeline ) dag , ops = pipeline . dag # Go trough the operation and create operation runs and the upstreams op_runs = { } runs_by_ops = { } for op_id in dag . keys ( ) : op_run = OperationRun . objects . create ( pipeline_run = pipeline_run , operation_id = op_id , celery_task_context = context_by_op . get ( op_id ) ) op_run_id = op_run . id op_runs [ op_run_id ] = op_run runs_by_ops [ op_id ] = op_run_id # Create operations upstreams # We set the upstream for the topologically sorted dag set_topological_dag_upstreams ( dag = dag , ops = ops , op_runs = op_runs , runs_by_ops = runs_by_ops )
def is_ssl_error ( error = None ) : """Checks if the given error ( or the current one ) is an SSL error ."""
exc_types = ( ssl . SSLError , ) try : from OpenSSL . SSL import Error exc_types += ( Error , ) except ImportError : pass if error is None : error = sys . exc_info ( ) [ 1 ] return isinstance ( error , exc_types )
def _detect_database_platform ( self ) : """Detects and sets the database platform . Evaluates custom platform class and version in order to set the correct platform . : raises InvalidPlatformSpecified : if an invalid platform was specified for this connection ."""
version = self . _get_database_platform_version ( ) if version is not None : self . _platform = self . _create_database_platform_for_version ( version ) else : self . _platform = self . get_dbal_platform ( )
def draw_plot ( self ) : """Redraws the plot"""
import numpy , pylab state = self . state if len ( self . data [ 0 ] ) == 0 : print ( "no data to plot" ) return vhigh = max ( self . data [ 0 ] ) vlow = min ( self . data [ 0 ] ) for i in range ( 1 , len ( self . plot_data ) ) : vhigh = max ( vhigh , max ( self . data [ i ] ) ) vlow = min ( vlow , min ( self . data [ i ] ) ) ymin = vlow - 0.05 * ( vhigh - vlow ) ymax = vhigh + 0.05 * ( vhigh - vlow ) if ymin == ymax : ymax = ymin + 0.1 ymin = ymin - 0.1 self . axes . set_ybound ( lower = ymin , upper = ymax ) self . axes . grid ( True , color = 'gray' ) pylab . setp ( self . axes . get_xticklabels ( ) , visible = True ) pylab . setp ( self . axes . get_legend ( ) . get_texts ( ) , fontsize = 'small' ) for i in range ( len ( self . plot_data ) ) : ydata = numpy . array ( self . data [ i ] ) xdata = self . xdata if len ( ydata ) < len ( self . xdata ) : xdata = xdata [ - len ( ydata ) : ] self . plot_data [ i ] . set_xdata ( xdata ) self . plot_data [ i ] . set_ydata ( ydata ) self . canvas . draw ( )
async def answer_callback_query ( self , callback_query_id : base . String , text : typing . Union [ base . String , None ] = None , show_alert : typing . Union [ base . Boolean , None ] = None , url : typing . Union [ base . String , None ] = None , cache_time : typing . Union [ base . Integer , None ] = None ) -> base . Boolean : """Use this method to send answers to callback queries sent from inline keyboards . The answer will be displayed to the user as a notification at the top of the chat screen or as an alert . Alternatively , the user can be redirected to the specified Game URL . For this option to work , you must first create a game for your bot via @ Botfather and accept the terms . Otherwise , you may use links like t . me / your _ bot ? start = XXXX that open your bot with a parameter . Source : https : / / core . telegram . org / bots / api # answercallbackquery : param callback _ query _ id : Unique identifier for the query to be answered : type callback _ query _ id : : obj : ` base . String ` : param text : Text of the notification . If not specified , nothing will be shown to the user , 0-1024 characters : type text : : obj : ` typing . Union [ base . String , None ] ` : param show _ alert : If true , an alert will be shown by the client instead of a notification at the top of the chat screen . Defaults to false . : type show _ alert : : obj : ` typing . Union [ base . Boolean , None ] ` : param url : URL that will be opened by the user ' s client : type url : : obj : ` typing . Union [ base . String , None ] ` : param cache _ time : The maximum amount of time in seconds that the result of the callback query may be cached client - side . : type cache _ time : : obj : ` typing . Union [ base . Integer , None ] ` : return : On success , True is returned : rtype : : obj : ` base . Boolean `"""
payload = generate_payload ( ** locals ( ) ) result = await self . request ( api . Methods . ANSWER_CALLBACK_QUERY , payload ) return result
def unregister_plotter ( identifier , sorter = True , plot_func = True ) : """Unregister a : class : ` psyplot . plotter . Plotter ` for the projects Parameters identifier : str Name of the attribute that is used to filter for the instances belonging to this plotter or to create plots with this plotter sorter : bool If True , the identifier will be unregistered from the : class : ` Project ` class plot _ func : bool If True , the identifier will be unregistered from the : class : ` ProjectPlotter ` class"""
d = registered_plotters . get ( identifier , { } ) if sorter and hasattr ( Project , identifier ) : delattr ( Project , identifier ) d [ 'sorter' ] = False if plot_func and hasattr ( ProjectPlotter , identifier ) : for cls in [ ProjectPlotter , DatasetPlotter , DataArrayPlotter ] : delattr ( cls , identifier ) try : delattr ( plot , '_' + identifier ) except AttributeError : pass d [ 'plot_func' ] = False if sorter and plot_func : registered_plotters . pop ( identifier , None )
def get_configure ( self , repo = None , name = None , groups = None , main_cfg = False ) : """Get the vent . template settings for a given tool by looking at the plugin _ manifest"""
constraints = locals ( ) del constraints [ 'main_cfg' ] status = ( True , None ) template_dict = { } return_str = '' if main_cfg : vent_cfg = Template ( self . vent_config ) for section in vent_cfg . sections ( ) [ 1 ] : template_dict [ section ] = { } for vals in vent_cfg . section ( section ) [ 1 ] : template_dict [ section ] [ vals [ 0 ] ] = vals [ 1 ] else : # all possible vent . template options stored in plugin _ manifest options = [ 'info' , 'service' , 'settings' , 'docker' , 'gpu' ] tools = Template ( System ( ) . manifest ) . constrain_opts ( constraints , options ) [ 0 ] if tools : # should only be one tool tool = list ( tools . keys ( ) ) [ 0 ] # load all vent . template options into dict for section in tools [ tool ] : template_dict [ section ] = json . loads ( tools [ tool ] [ section ] ) else : status = ( False , "Couldn't get vent.template information" ) if status [ 0 ] : # display all those options as they would in the file for section in template_dict : return_str += '[' + section + ']\n' # ensure instances shows up in configuration for option in template_dict [ section ] : if option . startswith ( '#' ) : return_str += option + '\n' else : return_str += option + ' = ' return_str += template_dict [ section ] [ option ] + '\n' return_str += '\n' # only one newline at end of file status = ( True , return_str [ : - 1 ] ) return status
def can_create_asset_content_with_record_types ( self , asset_id = None , asset_content_record_types = None ) : """Tests if this user can create an ` ` AssetContent ` ` using the desired record types . While ` ` RepositoryManager . getAssetContentRecordTypes ( ) ` ` can be used to test which records are supported , this method tests which records are required for creating a specific ` ` AssetContent ` ` . Providing an empty array tests if an ` ` AssetContent ` ` can be created with no records . : param asset _ id : the ` ` Id ` ` of an ` ` Asset ` ` : type asset _ id : ` ` osid . id . Id ` ` : param asset _ content _ record _ types : array of asset content record types : type asset _ content _ record _ types : ` ` osid . type . Type [ ] ` ` : return : ` ` true ` ` if ` ` AssetContent ` ` creation using the specified ` ` Types ` ` is supported , ` ` false ` ` otherwise : rtype : ` ` boolean ` ` : raise : ` ` NullArgument ` ` - - ` ` asset _ id ` ` or ` ` asset _ content _ record _ types ` ` is ` ` null ` ` * compliance : mandatory - - This method must be implemented . *"""
url_path = construct_url ( 'authorization' , bank_id = self . _catalog_idstr ) return self . _get_request ( url_path ) [ 'assetHints' ] [ 'canCreate' ]
def _find_unused_static_warnings ( filename , lines , ast_list ) : """Warn about unused static variables ."""
static_declarations = dict ( _get_static_declarations ( ast_list ) ) def find_variables_use ( body ) : for child in body : if child . name in static_declarations : static_use_counts [ child . name ] += 1 static_use_counts = collections . Counter ( ) for node in ast_list : if isinstance ( node , ast . Function ) and node . body : find_variables_use ( node . body ) elif isinstance ( node , ast . Class ) and node . body : for child in node . body : if isinstance ( child , ast . Function ) and child . body : find_variables_use ( child . body ) count = 0 for ( name , _ ) in sorted ( static_declarations . items ( ) , key = lambda x : x [ 1 ] . start ) : if not static_use_counts [ name ] : print ( "{}:{}: unused variable '{}'" . format ( filename , lines . get_line_number ( static_declarations [ name ] . start ) , name ) ) count += 1 return count
def print_row ( * argv ) : """Print one row of data"""
# for i in range ( 0 , len ( argv ) ) : # row + = f " { argv [ i ] } " # columns row = "" # id row += f"{argv[0]:<3}" # name row += f" {argv[1]:<13}" # allocation row += f" {argv[2]:>5}" # level # row + = f " { argv [ 3 ] } " print ( row )
def main ( ) : """Shows basic usage of the Google Calendar API . Creates a Google Calendar API service object and outputs a list of the next 10 events on the user ' s calendar ."""
credentials = get_credentials ( ) http = credentials . authorize ( httplib2 . Http ( ) ) service = discovery . build ( 'calendar' , 'v3' , http = http ) now = datetime . datetime . utcnow ( ) . isoformat ( ) + 'Z' # ' Z ' indicates UTC time print ( 'Getting the upcoming 10 events' ) eventsResult = service . events ( ) . list ( calendarId = 'primary' , timeMin = now , maxResults = 10 , singleEvents = True , orderBy = 'startTime' ) . execute ( ) events = eventsResult . get ( 'items' , [ ] ) if not events : print ( 'No upcoming events found.' ) for event in events : start = event [ 'start' ] . get ( 'dateTime' , event [ 'start' ] . get ( 'date' ) ) print ( start , event [ 'summary' ] )
def _is_duplicate_record ( self , rtype , name , content ) : """Check if DNS entry already exists ."""
records = self . _list_records ( rtype , name , content ) is_duplicate = len ( records ) >= 1 if is_duplicate : LOGGER . info ( 'Duplicate record %s %s %s, NOOP' , rtype , name , content ) return is_duplicate
def generate_menu ( self , ass , text , path = None , level = 0 ) : """Function generates menu from based on ass parameter"""
menu = self . create_menu ( ) for index , sub in enumerate ( sorted ( ass [ 1 ] , key = lambda y : y [ 0 ] . fullname . lower ( ) ) ) : if index != 0 : text += "|" text += "- " + sub [ 0 ] . fullname new_path = list ( path ) if level == 0 : new_path . append ( ass [ 0 ] . name ) new_path . append ( sub [ 0 ] . name ) menu_item = self . menu_item ( sub , new_path ) if sub [ 1 ] : # If assistant has subassistants ( sub_menu , txt ) = self . generate_menu ( sub , text , new_path , level = level + 1 ) menu_item . set_submenu ( sub_menu ) menu . append ( menu_item ) return menu , text
def match_column_labels ( self , match_value_or_fct , levels = None , max_matches = 0 , empty_res = 1 ) : """Check the original DataFrame ' s column labels to find a subset of the current region : param match _ value _ or _ fct : value or function ( hdr _ value ) which returns True for match : param levels : [ None , scalar , indexer ] : param max _ matches : maximum number of columns to return : return :"""
allmatches = self . parent . _find_column_label_positions ( match_value_or_fct , levels ) # only keep matches which are within this region matches = [ m for m in allmatches if m in self . col_ilocs ] if max_matches and len ( matches ) > max_matches : matches = matches [ : max_matches ] if matches : return RegionFormatter ( self . parent , self . row_ilocs , pd . Int64Index ( matches ) ) elif empty_res : return self . empty_frame ( )
def interval_overlap ( a , b , x , y ) : """Returns by how much two intervals overlap assumed that a < = b and x < = y"""
if b <= x or a >= y : return 0 elif x <= a <= y : return min ( b , y ) - a elif x <= b <= y : return b - max ( a , x ) elif a >= x and b <= y : return b - a else : assert False
def assert_greater ( first , second , msg_fmt = "{msg}" ) : """Fail if first is not greater than second . > > > assert _ greater ( ' foo ' , ' bar ' ) > > > assert _ greater ( 5 , 5) Traceback ( most recent call last ) : AssertionError : 5 is not greater than 5 The following msg _ fmt arguments are supported : * msg - the default error message * first - the first argument * second - the second argument"""
if not first > second : msg = "{!r} is not greater than {!r}" . format ( first , second ) fail ( msg_fmt . format ( msg = msg , first = first , second = second ) )
def log ( self , lvl , msg , * args , ** kwargs ) : """Both prints to stdout / stderr and the django . currencies logger"""
logger . log ( lvl , msg , * args , ** kwargs ) if lvl >= self . verbosity : if args : fmsg = msg % args else : fmsg = msg % kwargs if lvl >= logging . WARNING : self . stderr . write ( fmsg ) else : self . stdout . write ( fmsg )
def set_bucket_props ( self , transport , bucket , props ) : """set _ bucket _ props ( bucket , props ) Sets bucket properties for the given bucket . . . note : : This request is automatically retried : attr : ` retries ` times if it fails due to network error . : param bucket : the bucket whose properties will be set : type bucket : RiakBucket : param props : the properties to set : type props : dict"""
_validate_bucket_props ( props ) return transport . set_bucket_props ( bucket , props )
def close ( self ) : """Closes associated resources of this request object . This closes all file handles explicitly . You can also use the request object in a with statement with will automatically close it . . . versionadded : : 0.9"""
files = self . __dict__ . get ( 'files' ) for key , value in iter_multi_items ( files or ( ) ) : value . close ( )
def summary ( self ) : """Summary by packages and dependencies"""
print ( "\nStatus summary" ) print ( "=" * 79 ) print ( "{0}found {1} dependencies in {2} packages.{3}\n" . format ( self . grey , self . count_dep , self . count_pkg , self . endc ) )
def nodes ( self ) : """Return the list of nodes ."""
getnode = self . _nodes . __getitem__ return [ getnode ( nid ) for nid in self . _nodeids ]
def last_day ( self ) : """Return the last day of Yom Tov or Shabbat . This is useful for three - day holidays , for example : it will return the last in a string of Yom Tov + Shabbat . If this HDate is Shabbat followed by no Yom Tov , returns the Saturday . If this HDate is neither Yom Tov , nor Shabbat , this just returns itself ."""
day_iter = self while day_iter . next_day . is_yom_tov or day_iter . next_day . is_shabbat : day_iter = day_iter . next_day return day_iter
def parse_data_type ( self , index , ** kwargs ) : """Parse a type to an other type"""
if not index . isValid ( ) : return False try : if kwargs [ 'atype' ] == "date" : self . _data [ index . row ( ) ] [ index . column ( ) ] = datestr_to_datetime ( self . _data [ index . row ( ) ] [ index . column ( ) ] , kwargs [ 'dayfirst' ] ) . date ( ) elif kwargs [ 'atype' ] == "perc" : _tmp = self . _data [ index . row ( ) ] [ index . column ( ) ] . replace ( "%" , "" ) self . _data [ index . row ( ) ] [ index . column ( ) ] = eval ( _tmp ) / 100. elif kwargs [ 'atype' ] == "account" : _tmp = self . _data [ index . row ( ) ] [ index . column ( ) ] . replace ( "," , "" ) self . _data [ index . row ( ) ] [ index . column ( ) ] = eval ( _tmp ) elif kwargs [ 'atype' ] == "unicode" : self . _data [ index . row ( ) ] [ index . column ( ) ] = to_text_string ( self . _data [ index . row ( ) ] [ index . column ( ) ] ) elif kwargs [ 'atype' ] == "int" : self . _data [ index . row ( ) ] [ index . column ( ) ] = int ( self . _data [ index . row ( ) ] [ index . column ( ) ] ) elif kwargs [ 'atype' ] == "float" : self . _data [ index . row ( ) ] [ index . column ( ) ] = float ( self . _data [ index . row ( ) ] [ index . column ( ) ] ) self . dataChanged . emit ( index , index ) except Exception as instance : print ( instance )
def unzip ( iterable ) : """The inverse of : func : ` zip ` , this function disaggregates the elements of the zipped * iterable * . The ` ` i ` ` - th iterable contains the ` ` i ` ` - th element from each element of the zipped iterable . The first element is used to to determine the length of the remaining elements . > > > iterable = [ ( ' a ' , 1 ) , ( ' b ' , 2 ) , ( ' c ' , 3 ) , ( ' d ' , 4 ) ] > > > letters , numbers = unzip ( iterable ) > > > list ( letters ) [ ' a ' , ' b ' , ' c ' , ' d ' ] > > > list ( numbers ) [1 , 2 , 3 , 4] This is similar to using ` ` zip ( * iterable ) ` ` , but it avoids reading * iterable * into memory . Note , however , that this function uses : func : ` itertools . tee ` and thus may require significant storage ."""
head , iterable = spy ( iter ( iterable ) ) if not head : # empty iterable , e . g . zip ( [ ] , [ ] , [ ] ) return ( ) # spy returns a one - length iterable as head head = head [ 0 ] iterables = tee ( iterable , len ( head ) ) def itemgetter ( i ) : def getter ( obj ) : try : return obj [ i ] except IndexError : # basically if we have an iterable like # iter ( [ ( 1 , 2 , 3 ) , ( 4 , 5 ) , ( 6 , ) ] ) # the second unzipped iterable would fail at the third tuple # since it would try to access tup [ 1] # same with the third unzipped iterable and the second tuple # to support these " improperly zipped " iterables , # we create a custom itemgetter # which just stops the unzipped iterables # at first length mismatch raise StopIteration return getter return tuple ( map ( itemgetter ( i ) , it ) for i , it in enumerate ( iterables ) )
def _calc_damping ( mod_reducs , x_2 , x_2_mean , x_3 , x_3_mean ) : """Compute the damping ratio using Equation ( 16 ) ."""
# Mean values of the predictors x_1_mean = - 1.0 x_1 = np . log ( np . log ( 1 / mod_reducs ) + 0.103 ) ones = np . ones_like ( mod_reducs ) x = np . c_ [ ones , x_1 , x_2 , x_3 , ( x_1 - x_1_mean ) * ( x_2 - x_2_mean ) , ( x_2 - x_2_mean ) * ( x_3 - x_3_mean ) ] c = np . c_ [ 2.86 , 0.571 , - 0.103 , - 0.141 , 0.0419 , - 0.240 ] ln_damping = ( c * x ) . sum ( axis = 1 ) return np . exp ( ln_damping ) / 100.
def crypto_hash_sha512 ( message ) : """Hashes and returns the message ` ` message ` ` . : param message : bytes : rtype : bytes"""
digest = ffi . new ( "unsigned char[]" , crypto_hash_sha512_BYTES ) rc = lib . crypto_hash_sha512 ( digest , message , len ( message ) ) ensure ( rc == 0 , 'Unexpected library error' , raising = exc . RuntimeError ) return ffi . buffer ( digest , crypto_hash_sha512_BYTES ) [ : ]
def rmv_normal_cov ( mu , C , size = 1 ) : """Random multivariate normal variates ."""
mu_size = np . shape ( mu ) if size == 1 : return np . random . multivariate_normal ( mu , C , size ) . reshape ( mu_size ) else : return np . random . multivariate_normal ( mu , C , size ) . reshape ( ( size , ) + mu_size )
def connect ( self , autospawn = False , wait = False ) : '''Connect to pulseaudio server . " autospawn " option will start new pulse daemon , if necessary . Specifying " wait " option will make function block until pulseaudio server appears .'''
if self . _loop_closed : raise PulseError ( 'Eventloop object was already' ' destroyed and cannot be reused from this instance.' ) if self . connected is not None : self . _ctx_init ( ) flags , self . connected = 0 , None if not autospawn : flags |= c . PA_CONTEXT_NOAUTOSPAWN if wait : flags |= c . PA_CONTEXT_NOFAIL try : c . pa . context_connect ( self . _ctx , self . server , flags , None ) except c . pa . CallError : self . connected = False while self . connected is None : self . _pulse_iterate ( ) if self . connected is False : raise PulseError ( 'Failed to connect to pulseaudio server' )
def dumpcache ( self ) : '''Usage : dumpcache - display file hash cache'''
if cached . cacheloaded : # pprint . pprint ( cached . cache ) MyPrettyPrinter ( ) . pprint ( cached . cache ) return const . ENoError else : perr ( "Cache not loaded." ) return const . ECacheNotLoaded
def avail_sizes ( call = None ) : '''Return a dict of all available VM sizes on the cloud provider with relevant data . This data is provided in three dicts .'''
if call == 'action' : raise SaltCloudSystemExit ( 'The avail_sizes function must be called with ' '-f or --function, or with the --list-sizes option' ) ret = { 'block devices' : { } , 'memory' : { } , 'processors' : { } , } conn = get_conn ( ) response = conn . getCreateObjectOptions ( ) for device in response [ 'blockDevices' ] : # return device [ ' template ' ] [ ' blockDevices ' ] ret [ 'block devices' ] [ device [ 'itemPrice' ] [ 'item' ] [ 'description' ] ] = { 'name' : device [ 'itemPrice' ] [ 'item' ] [ 'description' ] , 'capacity' : device [ 'template' ] [ 'blockDevices' ] [ 0 ] [ 'diskImage' ] [ 'capacity' ] , } for memory in response [ 'memory' ] : ret [ 'memory' ] [ memory [ 'itemPrice' ] [ 'item' ] [ 'description' ] ] = { 'name' : memory [ 'itemPrice' ] [ 'item' ] [ 'description' ] , 'maxMemory' : memory [ 'template' ] [ 'maxMemory' ] , } for processors in response [ 'processors' ] : ret [ 'processors' ] [ processors [ 'itemPrice' ] [ 'item' ] [ 'description' ] ] = { 'name' : processors [ 'itemPrice' ] [ 'item' ] [ 'description' ] , 'start cpus' : processors [ 'template' ] [ 'startCpus' ] , } return ret
def _set_widths ( self , row , proc_group ) : """Update auto - width Fields based on ` row ` . Parameters row : dict proc _ group : { ' default ' , ' override ' } Whether to consider ' default ' or ' override ' key for pre - and post - format processors . Returns True if any widths required adjustment ."""
width_free = self . style [ "width_" ] - sum ( [ sum ( self . fields [ c ] . width for c in self . columns ) , self . width_separtor ] ) if width_free < 0 : width_fixed = sum ( [ sum ( self . fields [ c ] . width for c in self . columns if c not in self . autowidth_columns ) , self . width_separtor ] ) assert width_fixed > self . style [ "width_" ] , "bug in width logic" raise elements . StyleError ( "Fixed widths specified in style exceed total width" ) elif width_free == 0 : lgr . debug ( "Not checking widths; no free width left" ) return False lgr . debug ( "Checking width for row %r" , row ) adjusted = False for column in sorted ( self . columns , key = lambda c : self . fields [ c ] . width ) : # ^ Sorting the columns by increasing widths isn ' t necessary ; we do # it so that columns that already take up more of the screen don ' t # continue to grow and use up free width before smaller columns # have a chance to claim some . if width_free < 1 : lgr . debug ( "Giving up on checking widths; no free width left" ) break if column in self . autowidth_columns : field = self . fields [ column ] lgr . debug ( "Checking width of column %r " "(field width: %d, free width: %d)" , column , field . width , width_free ) # If we ' ve added any style transform functions as # pre - format processors , we want to measure the width # of their result rather than the raw value . if field . pre [ proc_group ] : value = field ( row [ column ] , keys = [ proc_group ] , exclude_post = True ) else : value = row [ column ] value = six . text_type ( value ) value_width = len ( value ) wmax = self . autowidth_columns [ column ] [ "max" ] if value_width > field . width : width_old = field . width width_available = width_free + field . width width_new = min ( value_width , wmax or width_available , width_available ) if width_new > width_old : adjusted = True field . width = width_new lgr . debug ( "Adjusting width of %r column from %d to %d " "to accommodate value %r" , column , width_old , field . width , value ) self . _truncaters [ column ] . length = field . width width_free -= field . width - width_old lgr . debug ( "Free width is %d after processing column %r" , width_free , column ) return adjusted
def getScale ( self , zoom ) : """Returns the scale at a given zoom level"""
if self . unit == 'degrees' : resolution = self . getResolution ( zoom ) * EPSG4326_METERS_PER_UNIT else : resolution = self . getResolution ( zoom ) return resolution / STANDARD_PIXEL_SIZE
def set_model ( self , model ) : """Set all levels \' model to the given one : param m : the model that the levels should use : type m : QtCore . QAbstractItemModel : returns : None : rtype : None : raises : None"""
# do the set model in reverse ! # set model might trigger an update for the lower levels # but the lower ones have a different model , so it will fail anyways # this way the initial state after set _ model is correct . self . model = model self . _levels [ 0 ] . set_model ( model )
def sum ( self ) : """Summary Returns : TYPE : Description"""
return NumpyArrayWeld ( numpy_weld_impl . aggr ( self . expr , "+" , 0 , self . weld_type ) , self . weld_type , 0 )
def to_date ( value , default = None ) : """Tries to convert the passed in value to Zope ' s DateTime : param value : The value to be converted to a valid DateTime : type value : str , DateTime or datetime : return : The DateTime representation of the value passed in or default"""
if isinstance ( value , DateTime ) : return value if not value : if default is None : return None return to_date ( default ) try : if isinstance ( value , str ) and '.' in value : # https : / / docs . plone . org / develop / plone / misc / datetime . html # datetime - problems - and - pitfalls return DateTime ( value , datefmt = 'international' ) return DateTime ( value ) except ( TypeError , ValueError , DateTimeError ) : return to_date ( default )
def physicalMemory ( ) : """> > > n = physicalMemory ( ) > > > n > 0 True > > > n = = physicalMemory ( ) True"""
try : return os . sysconf ( 'SC_PAGE_SIZE' ) * os . sysconf ( 'SC_PHYS_PAGES' ) except ValueError : return int ( subprocess . check_output ( [ 'sysctl' , '-n' , 'hw.memsize' ] ) . decode ( 'utf-8' ) . strip ( ) )
def edit_message_reply_markup ( self , * args , ** kwargs ) : """See : func : ` edit _ message _ reply _ markup `"""
return edit_message_reply_markup ( * args , ** self . _merge_overrides ( ** kwargs ) ) . run ( )
def _gen_4spec ( op , path , value , create_path = False , xattr = False , _expand_macros = False ) : """Like ` _ gen _ 3spec ` , but also accepts a mandatory value as its third argument : param bool _ expand _ macros : Whether macros in the value should be expanded . The macros themselves are defined at the server side"""
flags = 0 if create_path : flags |= _P . SDSPEC_F_MKDIR_P if xattr : flags |= _P . SDSPEC_F_XATTR if _expand_macros : flags |= _P . SDSPEC_F_EXPANDMACROS return Spec ( op , path , flags , value )
def distinct ( self , numPartitions = None ) : """Return a new RDD containing the distinct elements in this RDD . > > > sorted ( sc . parallelize ( [ 1 , 1 , 2 , 3 ] ) . distinct ( ) . collect ( ) ) [1 , 2 , 3]"""
return self . map ( lambda x : ( x , None ) ) . reduceByKey ( lambda x , _ : x , numPartitions ) . map ( lambda x : x [ 0 ] )
def get_mean_and_stddevs ( self , sites , rup , dists , imt , stddev_types ) : """See : meth : ` superclass method < . base . GroundShakingIntensityModel . get _ mean _ and _ stddevs > ` for spec of input and result values . Implements equation 14 of Hong & Goda ( 2007)"""
C = self . COEFFS [ imt ] C_PGA = self . COEFFS [ PGA ( ) ] C_AMP = self . AMP_COEFFS [ imt ] # Gets the PGA on rock - need to convert from g to cm / s / s pga_rock = self . _compute_pga_rock ( C_PGA , rup . mag , dists . rjb ) * 980.665 # Get the mean ground motion value mean = ( self . _compute_nonlinear_magnitude_term ( C , rup . mag ) + self . _compute_magnitude_distance_term ( C , dists . rjb , rup . mag ) + self . _get_site_amplification ( C_AMP , sites . vs30 , pga_rock ) ) # Get standard deviations stddevs = self . _get_stddevs ( C , stddev_types , dists . rjb . shape ) return mean , stddevs
def _manage_used_ips ( self , current_ip ) : """Handle registering and releasing used Tor IPs . : argument current _ ip : current Tor IP : type current _ ip : str"""
# Register current IP . self . used_ips . append ( current_ip ) # Release the oldest registred IP . if self . reuse_threshold : if len ( self . used_ips ) > self . reuse_threshold : del self . used_ips [ 0 ]
def query ( self , sql : str , args : tuple = None ) : """Execute a SQL query with a return value ."""
with self . _cursor ( ) as cursor : log . debug ( 'Running SQL: ' + str ( ( sql , args ) ) ) cursor . execute ( sql , args ) return cursor . fetchall ( )
def _matrix_grad ( q , h , h_dx , t , t_prime ) : '''Returns the gradient with respect to a single variable'''
N = len ( q ) W = np . zeros ( [ N , N ] ) Wprime = np . zeros ( [ N , N ] ) for i in range ( N ) : W [ i , i ] = 0.5 * ( h [ min ( i + 1 , N - 1 ) ] - h [ max ( i - 1 , 0 ) ] ) Wprime [ i , i ] = 0.5 * ( h_dx [ min ( i + 1 , N - 1 ) ] - h_dx [ max ( i - 1 , 0 ) ] ) tgrad = np . array ( [ t_prime [ i ] * h_dx [ i ] for i in np . arange ( N ) ] ) grad = 2.0 * ( q - t ) . T . dot ( W ) . dot ( - 1.0 * tgrad ) + ( q - t ) . T . dot ( Wprime ) . dot ( q - t ) return grad
def process_status ( self , helper , session , check ) : """" process a single status"""
snmp_result_status = helper . get_snmp_value ( session , helper , DEVICE_GLOBAL_OIDS [ 'oid_' + check ] ) if check == "system_lcd" : helper . update_status ( helper , normal_check ( "global" , snmp_result_status , "LCD status" ) ) elif check == "global_storage" : helper . update_status ( helper , normal_check ( "global" , snmp_result_status , "Storage status" ) ) elif check == "system_power" : helper . update_status ( helper , self . check_system_power_status ( snmp_result_status ) ) elif check == "global_system" : helper . update_status ( helper , normal_check ( "global" , snmp_result_status , "Device status" ) )
def get_sensor_reading ( self , sensorname ) : """Get a sensor reading by name Returns a single decoded sensor reading per the name passed in : param sensorname : Name of the desired sensor : returns : sdr . SensorReading object"""
self . init_sdr ( ) for sensor in self . _sdr . get_sensor_numbers ( ) : if self . _sdr . sensors [ sensor ] . name == sensorname : rsp = self . raw_command ( command = 0x2d , netfn = 4 , data = ( sensor , ) ) if 'error' in rsp : raise exc . IpmiException ( rsp [ 'error' ] , rsp [ 'code' ] ) return self . _sdr . sensors [ sensor ] . decode_sensor_reading ( rsp [ 'data' ] ) self . oem_init ( ) return self . _oem . get_sensor_reading ( sensorname )
def noisy_layer ( self , prefix , action_in , out_size , sigma0 , non_linear = True ) : """a common dense layer : y = w ^ { T } x + b a noisy layer : y = ( w + \ epsilon _ w * \ sigma _ w ) ^ { T } x + ( b + \ epsilon _ b * \ sigma _ b ) where \ epsilon are random variables sampled from factorized normal distributions and \ sigma are trainable variables which are expected to vanish along the training procedure"""
in_size = int ( action_in . shape [ 1 ] ) epsilon_in = tf . random_normal ( shape = [ in_size ] ) epsilon_out = tf . random_normal ( shape = [ out_size ] ) epsilon_in = self . f_epsilon ( epsilon_in ) epsilon_out = self . f_epsilon ( epsilon_out ) epsilon_w = tf . matmul ( a = tf . expand_dims ( epsilon_in , - 1 ) , b = tf . expand_dims ( epsilon_out , 0 ) ) epsilon_b = epsilon_out sigma_w = tf . get_variable ( name = prefix + "_sigma_w" , shape = [ in_size , out_size ] , dtype = tf . float32 , initializer = tf . random_uniform_initializer ( minval = - 1.0 / np . sqrt ( float ( in_size ) ) , maxval = 1.0 / np . sqrt ( float ( in_size ) ) ) ) # TF noise generation can be unreliable on GPU # If generating the noise on the CPU , # lowering sigma0 to 0.1 may be helpful sigma_b = tf . get_variable ( name = prefix + "_sigma_b" , shape = [ out_size ] , dtype = tf . float32 , # 0.5 ~ GPU , 0.1 ~ CPU initializer = tf . constant_initializer ( sigma0 / np . sqrt ( float ( in_size ) ) ) ) w = tf . get_variable ( name = prefix + "_fc_w" , shape = [ in_size , out_size ] , dtype = tf . float32 , initializer = layers . xavier_initializer ( ) ) b = tf . get_variable ( name = prefix + "_fc_b" , shape = [ out_size ] , dtype = tf . float32 , initializer = tf . zeros_initializer ( ) ) action_activation = tf . nn . xw_plus_b ( action_in , w + sigma_w * epsilon_w , b + sigma_b * epsilon_b ) if not non_linear : return action_activation return tf . nn . relu ( action_activation )
def render_tree ( tree , list_all = True , show_only = None , frozen = False , exclude = None ) : """Convert tree to string representation : param dict tree : the package tree : param bool list _ all : whether to list all the pgks at the root level or only those that are the sub - dependencies : param set show _ only : set of select packages to be shown in the output . This is optional arg , default : None . : param bool frozen : whether or not show the names of the pkgs in the output that ' s favourable to pip - - freeze : param set exclude : set of select packages to be excluded from the output . This is optional arg , default : None . : returns : string representation of the tree : rtype : str"""
tree = sorted_tree ( tree ) branch_keys = set ( r . key for r in flatten ( tree . values ( ) ) ) nodes = tree . keys ( ) use_bullets = not frozen key_tree = dict ( ( k . key , v ) for k , v in tree . items ( ) ) get_children = lambda n : key_tree . get ( n . key , [ ] ) if show_only : nodes = [ p for p in nodes if p . key in show_only or p . project_name in show_only ] elif not list_all : nodes = [ p for p in nodes if p . key not in branch_keys ] def aux ( node , parent = None , indent = 0 , chain = None ) : if exclude and ( node . key in exclude or node . project_name in exclude ) : return [ ] if chain is None : chain = [ node . project_name ] node_str = node . render ( parent , frozen ) if parent : prefix = ' ' * indent + ( '- ' if use_bullets else '' ) node_str = prefix + node_str result = [ node_str ] children = [ aux ( c , node , indent = indent + 2 , chain = chain + [ c . project_name ] ) for c in get_children ( node ) if c . project_name not in chain ] result += list ( flatten ( children ) ) return result lines = flatten ( [ aux ( p ) for p in nodes ] ) return '\n' . join ( lines )
def inset_sizes ( cls , original_width , original_height , target_width , target_height ) : """Calculate new image sizes for inset mode : param original _ width : int : param original _ height : int : param target _ width : int : param target _ height : int : return : tuple ( int , int )"""
if target_width >= original_width and target_height >= original_height : target_width = float ( original_width ) target_height = original_height elif target_width <= original_width and target_height >= original_height : k = original_width / float ( target_width ) target_height = int ( original_height / k ) elif target_width >= original_width and target_height <= original_height : k = original_height / float ( target_height ) target_width = int ( original_width / k ) elif target_width < original_width and target_height < original_height : k = original_width / float ( original_height ) k_w = original_width / float ( target_width ) k_h = original_height / float ( target_height ) if k_w >= k_h : target_height = int ( target_width / k ) else : target_width = int ( target_height * k ) return target_width , target_height
def _jmomentsurfaceIntegrand ( vz , vR , vT , R , z , df , sigmaR1 , gamma , sigmaz1 , n , m , o ) : # pragma : no cover because this is too slow ; a warning is shown """Internal function that is the integrand for the vmomentsurface mass integration"""
return df ( R , vR * sigmaR1 , vT * sigmaR1 * gamma , z , vz * sigmaz1 , use_physical = False , func = ( lambda x , y , z : x ** n * y ** m * z ** o ) )
def _bits_to_geohash ( value ) : """Convert a list of GeoHash bits to a GeoHash ."""
ret = [ ] # Get 5 bits at a time for i in ( value [ i : i + 5 ] for i in xrange ( 0 , len ( value ) , 5 ) ) : # Convert binary to integer # Note : reverse here , the slice above doesn ' t work quite right in reverse . total = sum ( [ ( bit * 2 ** count ) for count , bit in enumerate ( i [ : : - 1 ] ) ] ) ret . append ( BASE32MAPR [ total ] ) # Join the string and return return "" . join ( ret )
def _encode_params ( data ) : """Encode parameters in a piece of data . If the data supplied is a dictionary , encodes each parameter in it , and returns a list of tuples containing the encoded parameters , and a urlencoded version of that . Otherwise , assumes the data is already encoded appropriately , and returns it twice ."""
if hasattr ( data , '__iter__' ) : data = dict ( data ) if hasattr ( data , 'items' ) : result = [ ] for k , vs in data . items ( ) : for v in isinstance ( vs , list ) and vs or [ vs ] : result . append ( ( k . encode ( 'utf-8' ) if isinstance ( k , unicode ) else k , v . encode ( 'utf-8' ) if isinstance ( v , unicode ) else v ) ) return result , urllib . urlencode ( result , doseq = True ) else : return data , data
def add_command ( self , command , * args , ** kwargs ) : """add a command . This is basically a wrapper for add _ parser ( )"""
cmd = self . add_parser ( command , * args , ** kwargs )
def showBindingsForActionSet ( self , unSizeOfVRSelectedActionSet_t , unSetCount , originToHighlight ) : """Shows the current binding all the actions in the specified action sets"""
fn = self . function_table . showBindingsForActionSet pSets = VRActiveActionSet_t ( ) result = fn ( byref ( pSets ) , unSizeOfVRSelectedActionSet_t , unSetCount , originToHighlight ) return result , pSets
def default_setup ( ) : """The default API setup for lxc4u This is the API that you access globally from lxc4u ."""
service = LXCService lxc_types = dict ( LXC = LXC , LXCWithOverlays = LXCWithOverlays , __default__ = UnmanagedLXC ) loader = LXCLoader ( lxc_types , service ) manager = LXCManager ( loader , service ) return LXCAPI ( manager = manager , service = service )
def __get_charset ( self ) : '''Return the character encoding ( charset ) used internally by MeCab . Charset is that of the system dictionary used by MeCab . Will defer to the user - specified MECAB _ CHARSET environment variable , if set . Defaults to shift - jis on Windows . Defaults to utf - 8 on Mac OS . Defaults to euc - jp , as per MeCab documentation , when all else fails . Returns : Character encoding ( charset ) used by MeCab .'''
cset = os . getenv ( self . MECAB_CHARSET ) if cset : logger . debug ( self . _DEBUG_CSET_DEFAULT . format ( cset ) ) return cset else : try : res = Popen ( [ 'mecab' , '-D' ] , stdout = PIPE ) . communicate ( ) lines = res [ 0 ] . decode ( ) if not lines . startswith ( 'unrecognized' ) : dicinfo = lines . split ( os . linesep ) t = [ t for t in dicinfo if t . startswith ( 'charset' ) ] if len ( t ) > 0 : cset = t [ 0 ] . split ( ) [ 1 ] . lower ( ) logger . debug ( self . _DEBUG_CSET_DEFAULT . format ( cset ) ) return cset else : logger . error ( '{}\n' . format ( self . _ERROR_NODIC ) ) raise EnvironmentError ( self . _ERROR_NODIC ) else : logger . error ( '{}\n' . format ( self . _ERROR_NOCMD ) ) raise EnvironmentError ( self . _ERROR_NOCMD ) except OSError : cset = 'euc-jp' if sys . platform == 'win32' : cset = 'shift-jis' elif sys . platform == 'darwin' : cset = 'utf8' logger . debug ( self . _DEBUG_CSET_DEFAULT . format ( cset ) ) return cset
def popall ( self , key ) : """Remove specified key and return all corresponding values . If they key is not found , an empty list is return . > > > m = MutableMultiMap ( [ ( ' a ' , 1 ) , ( ' b ' , 2 ) , ( ' b ' , 3 ) , ( ' c ' , 4 ) ] ) > > > m . popall ( ' a ' ) > > > m . popall ( ' b ' ) [2 , 3] > > > m . popall ( ' x ' )"""
values = self . getall ( key ) try : del self [ key ] except KeyError : pass return values
def load_plugins ( self , plugin_class_name ) : """load all available plugins : param plugin _ class _ name : str , name of plugin class ( e . g . ' PreBuildPlugin ' ) : return : dict , bindings for plugins of the plugin _ class _ name class"""
# imp . findmodule ( ' atomic _ reactor ' ) doesn ' t work plugins_dir = os . path . join ( os . path . dirname ( __file__ ) , 'plugins' ) logger . debug ( "loading plugins from dir '%s'" , plugins_dir ) files = [ os . path . join ( plugins_dir , f ) for f in os . listdir ( plugins_dir ) if f . endswith ( ".py" ) ] if self . plugin_files : logger . debug ( "loading additional plugins from files '%s'" , self . plugin_files ) files += self . plugin_files plugin_class = globals ( ) [ plugin_class_name ] plugin_classes = { } for f in files : module_name = os . path . basename ( f ) . rsplit ( '.' , 1 ) [ 0 ] # Do not reload plugins if module_name in sys . modules : f_module = sys . modules [ module_name ] else : try : logger . debug ( "load file '%s'" , f ) f_module = imp . load_source ( module_name , f ) except ( IOError , OSError , ImportError , SyntaxError ) as ex : logger . warning ( "can't load module '%s': %r" , f , ex ) continue for name in dir ( f_module ) : binding = getattr ( f_module , name , None ) try : # if you try to compare binding and PostBuildPlugin , python won ' t match them # if you call this script directly b / c : # ! < class ' plugins . plugin _ rpmqa . PostBuildRPMqaPlugin ' > < = < class # ' _ _ main _ _ . PostBuildPlugin ' > # but # < class ' plugins . plugin _ rpmqa . PostBuildRPMqaPlugin ' > < = < class # ' atomic _ reactor . plugin . PostBuildPlugin ' > is_sub = issubclass ( binding , plugin_class ) except TypeError : is_sub = False if binding and is_sub and plugin_class . __name__ != binding . __name__ : plugin_classes [ binding . key ] = binding return plugin_classes
def _finalize_upload ( self , ud ) : # type : ( Uploader , blobxfer . models . upload . Descriptor ) - > None """Finalize file upload : param Uploader self : this : param blobxfer . models . upload . Descriptor ud : upload descriptor"""
metadata = ud . generate_metadata ( ) if ud . requires_put_block_list : # put block list for non one - shot block blobs self . _finalize_block_blob ( ud , metadata ) elif ud . remote_is_page_blob or ud . remote_is_append_blob : # append and page blob finalization self . _finalize_nonblock_blob ( ud , metadata ) elif ud . remote_is_file : # azure file finalization self . _finalize_azure_file ( ud , metadata ) # set access tier if ud . requires_access_tier_set : blobxfer . operations . azure . blob . block . set_blob_access_tier ( ud . entity )
def download_package ( self , feed_id , group_id , artifact_id , version , file_name ) : """DownloadPackage . [ Preview API ] Fulfills maven package file download requests by returning the url of the package file requested . : param str feed _ id : Name or ID of the feed . : param str group _ id : GroupId of the maven package : param str artifact _ id : ArtifactId of the maven package : param str version : Version of the package : param str file _ name : File name to download : rtype : object"""
route_values = { } if feed_id is not None : route_values [ 'feedId' ] = self . _serialize . url ( 'feed_id' , feed_id , 'str' ) if group_id is not None : route_values [ 'groupId' ] = self . _serialize . url ( 'group_id' , group_id , 'str' ) if artifact_id is not None : route_values [ 'artifactId' ] = self . _serialize . url ( 'artifact_id' , artifact_id , 'str' ) if version is not None : route_values [ 'version' ] = self . _serialize . url ( 'version' , version , 'str' ) if file_name is not None : route_values [ 'fileName' ] = self . _serialize . url ( 'file_name' , file_name , 'str' ) response = self . _send ( http_method = 'GET' , location_id = 'c338d4b5-d30a-47e2-95b7-f157ef558833' , version = '5.1-preview.1' , route_values = route_values ) return self . _deserialize ( 'object' , response )
def find_identifier ( self ) : """Find a unique identifier for each feature , create it if needed ."""
features = self . data [ 'features' ] n = len ( features ) feature = features [ 0 ] if 'id' in feature and len ( set ( feat [ 'id' ] for feat in features ) ) == n : return 'feature.id' for key in feature . get ( 'properties' , [ ] ) : if len ( set ( feat [ 'properties' ] [ key ] for feat in features ) ) == n : return 'feature.properties.{}' . format ( key ) if self . embed : for i , feature in enumerate ( self . data [ 'features' ] ) : feature [ 'id' ] = str ( i ) return 'feature.id' raise ValueError ( 'There is no unique identifier for each feature and because ' '`embed=False` it cannot be added. Consider adding an `id` ' 'field to your geojson data or set `embed=True`. ' )