signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def merge ( self , merge_area , tab ) : """Merges top left cell with all cells until bottom _ right"""
top , left , bottom , right = merge_area cursor = self . grid . actions . cursor top_left_code = self . code_array ( ( top , left , cursor [ 2 ] ) ) selection = Selection ( [ ( top , left ) ] , [ ( bottom , right ) ] , [ ] , [ ] , [ ] ) # Check if the merge area overlaps another merge area error_msg = _ ( "Overlapping merge area at {} prevents merge." ) for row in xrange ( top , bottom + 1 ) : for col in xrange ( left , right + 1 ) : key = row , col , tab if self . code_array . cell_attributes [ key ] [ "merge_area" ] : post_command_event ( self . main_window , self . StatusBarMsg , text = error_msg . format ( str ( key ) ) ) return self . delete_selection ( selection ) self . set_code ( ( top , left , cursor [ 2 ] ) , top_left_code ) attr = { "merge_area" : merge_area , "locked" : True } self . _set_cell_attr ( selection , tab , attr ) tl_selection = Selection ( [ ] , [ ] , [ ] , [ ] , [ ( top , left ) ] ) attr = { "locked" : False } self . _set_cell_attr ( tl_selection , tab , attr )
def _get_input_data ( self , var , start_date , end_date ) : """Get the data for a single variable over the desired date range ."""
logging . info ( self . _print_verbose ( "Getting input data:" , var ) ) if isinstance ( var , ( float , int ) ) : return var else : cond_pfull = ( ( not hasattr ( self , internal_names . PFULL_STR ) ) and var . def_vert and self . dtype_in_vert == internal_names . ETA_STR ) data = self . data_loader . recursively_compute_variable ( var , start_date , end_date , self . time_offset , self . model , ** self . data_loader_attrs ) name = data . name data = self . _add_grid_attributes ( data . to_dataset ( name = data . name ) ) data = data [ name ] if cond_pfull : try : self . pfull_coord = data [ internal_names . PFULL_STR ] except KeyError : pass # Force all data to be at full pressure levels , not half levels . bool_to_pfull = ( self . dtype_in_vert == internal_names . ETA_STR and var . def_vert == internal_names . PHALF_STR ) if bool_to_pfull : data = utils . vertcoord . to_pfull_from_phalf ( data , self . pfull_coord ) if var . def_time : # Restrict to the desired dates within each year . if self . dtype_in_time != 'av' : return self . _to_desired_dates ( data ) else : return data
def orthonormal_initializer ( output_size , input_size , debug = False ) : """adopted from Timothy Dozat https : / / github . com / tdozat / Parser / blob / master / lib / linalg . py Parameters output _ size : int input _ size : int debug : bool Whether to skip this initializer Returns Q : np . ndarray The orthonormal weight matrix of input _ size x output _ size"""
print ( ( output_size , input_size ) ) if debug : Q = np . random . randn ( input_size , output_size ) / np . sqrt ( output_size ) return np . transpose ( Q . astype ( np . float32 ) ) I = np . eye ( output_size ) lr = .1 eps = .05 / ( output_size + input_size ) success = False tries = 0 while not success and tries < 10 : Q = np . random . randn ( input_size , output_size ) / np . sqrt ( output_size ) for i in range ( 100 ) : QTQmI = Q . T . dot ( Q ) - I loss = np . sum ( QTQmI ** 2 / 2 ) Q2 = Q ** 2 Q -= lr * Q . dot ( QTQmI ) / ( np . abs ( Q2 + Q2 . sum ( axis = 0 , keepdims = True ) + Q2 . sum ( axis = 1 , keepdims = True ) - 1 ) + eps ) if np . max ( Q ) > 1e6 or loss > 1e6 or not np . isfinite ( loss ) : tries += 1 lr /= 2 break success = True if success : print ( ( 'Orthogonal pretrainer loss: %.2e' % loss ) ) else : print ( 'Orthogonal pretrainer failed, using non-orthogonal random matrix' ) Q = np . random . randn ( input_size , output_size ) / np . sqrt ( output_size ) return np . transpose ( Q . astype ( np . float32 ) )
def push_intent ( self , intent ) : """Registers or updates an intent and returns the intent _ json with an ID"""
if intent . id : print ( 'Updating {} intent' . format ( intent . name ) ) self . update ( intent ) else : print ( 'Registering {} intent' . format ( intent . name ) ) intent = self . register ( intent ) return intent
def parse_extension_item ( header : str , pos : int , header_name : str ) -> Tuple [ ExtensionHeader , int ] : """Parse an extension definition from ` ` header ` ` at the given position . Return an ` ` ( extension name , parameters ) ` ` pair , where ` ` parameters ` ` is a list of ` ` ( name , value ) ` ` pairs , and the new position . Raise : exc : ` ~ websockets . exceptions . InvalidHeaderFormat ` on invalid inputs ."""
# Extract extension name . name , pos = parse_token ( header , pos , header_name ) pos = parse_OWS ( header , pos ) # Extract all parameters . parameters = [ ] while peek_ahead ( header , pos ) == ";" : pos = parse_OWS ( header , pos + 1 ) parameter , pos = parse_extension_item_param ( header , pos , header_name ) parameters . append ( parameter ) return ( name , parameters ) , pos
def setTimeout ( self , time ) : """Set global timeout value , in seconds , for all DDE calls"""
self . conversation . SetDDETimeout ( round ( time ) ) return self . conversation . GetDDETimeout ( )
def get_decoder_component ( name , input_layer , encoded_layer , head_num , hidden_dim , attention_activation = None , feed_forward_activation = 'relu' , dropout_rate = 0.0 , trainable = True ) : """Multi - head self - attention , multi - head query attention and feed - forward layer . : param name : Prefix of names for internal layers . : param input _ layer : Input layer . : param encoded _ layer : Encoded layer from encoder . : param head _ num : Number of heads in multi - head self - attention . : param hidden _ dim : Hidden dimension of feed forward layer . : param attention _ activation : Activation for multi - head self - attention . : param feed _ forward _ activation : Activation for feed - forward layer . : param dropout _ rate : Dropout rate . : param trainable : Whether the layers are trainable . : return : Output layer ."""
self_attention_name = '%s-MultiHeadSelfAttention' % name query_attention_name = '%s-MultiHeadQueryAttention' % name feed_forward_name = '%s-FeedForward' % name self_attention_layer = _wrap_layer ( name = self_attention_name , input_layer = input_layer , build_func = attention_builder ( name = self_attention_name , head_num = head_num , activation = attention_activation , history_only = True , trainable = trainable , ) , dropout_rate = dropout_rate , trainable = trainable , ) query_attention_layer = _wrap_layer ( name = query_attention_name , input_layer = [ self_attention_layer , encoded_layer , encoded_layer ] , build_func = attention_builder ( name = query_attention_name , head_num = head_num , activation = attention_activation , history_only = False , trainable = trainable , ) , dropout_rate = dropout_rate , trainable = trainable , ) feed_forward_layer = _wrap_layer ( name = feed_forward_name , input_layer = query_attention_layer , build_func = feed_forward_builder ( name = feed_forward_name , hidden_dim = hidden_dim , activation = feed_forward_activation , trainable = trainable , ) , dropout_rate = dropout_rate , trainable = trainable , ) return feed_forward_layer
def attach ( cls , tuning_job_name , sagemaker_session = None , job_details = None , estimator_cls = None ) : """Attach to an existing hyperparameter tuning job . Create a HyperparameterTuner bound to an existing hyperparameter tuning job . After attaching , if there exists a best training job ( or any other completed training job ) , that can be deployed to create an Amazon SageMaker Endpoint and return a ` ` Predictor ` ` . Args : tuning _ job _ name ( str ) : The name of the hyperparameter tuning job to attach to . sagemaker _ session ( sagemaker . session . Session ) : Session object which manages interactions with Amazon SageMaker APIs and any other AWS services needed . If not specified , one is created using the default AWS configuration chain . job _ details ( dict ) : The response to a ` ` DescribeHyperParameterTuningJob ` ` call . If not specified , the ` ` HyperparameterTuner ` ` will perform one such call with the provided hyperparameter tuning job name . estimator _ cls ( str ) : The estimator class name associated with the training jobs , e . g . ' sagemaker . estimator . Estimator ' . If not specified , the ` ` HyperparameterTuner ` ` will try to derive the correct estimator class from training job metadata , defaulting to : class : ~ ` sagemaker . estimator . Estimator ` if it is unable to determine a more specific class . Examples : > > > my _ tuner . fit ( ) > > > job _ name = my _ tuner . latest _ tuning _ job . name Later on : > > > attached _ tuner = HyperparameterTuner . attach ( job _ name ) > > > attached _ tuner . deploy ( ) Returns : sagemaker . tuner . HyperparameterTuner : A ` ` HyperparameterTuner ` ` instance with the attached hyperparameter tuning job ."""
sagemaker_session = sagemaker_session or Session ( ) if job_details is None : job_details = sagemaker_session . sagemaker_client . describe_hyper_parameter_tuning_job ( HyperParameterTuningJobName = tuning_job_name ) estimator_cls = cls . _prepare_estimator_cls ( estimator_cls , job_details [ 'TrainingJobDefinition' ] ) estimator = cls . _prepare_estimator_from_job_description ( estimator_cls , job_details [ 'TrainingJobDefinition' ] , sagemaker_session ) init_params = cls . _prepare_init_params_from_job_description ( job_details ) tuner = cls ( estimator = estimator , ** init_params ) tuner . latest_tuning_job = _TuningJob ( sagemaker_session = sagemaker_session , job_name = tuning_job_name ) return tuner
def license_fallback ( vendor_dir , sdist_name ) : """Hardcoded license URLs . Check when updating if those are still needed"""
libname = libname_from_dir ( sdist_name ) if libname not in HARDCODED_LICENSE_URLS : raise ValueError ( 'No hardcoded URL for {} license' . format ( libname ) ) url = HARDCODED_LICENSE_URLS [ libname ] _ , _ , name = url . rpartition ( '/' ) dest = license_destination ( vendor_dir , libname , name ) r = requests . get ( url , allow_redirects = True ) log ( 'Downloading {}' . format ( url ) ) r . raise_for_status ( ) dest . write_bytes ( r . content )
def help_center_article_subscription_show ( self , article_id , id , locale = None , ** kwargs ) : "https : / / developer . zendesk . com / rest _ api / docs / help _ center / subscriptions # show - article - subscription"
api_path = "/api/v2/help_center/articles/{article_id}/subscriptions/{id}.json" api_path = api_path . format ( article_id = article_id , id = id ) if locale : api_opt_path = "/api/v2/help_center/{locale}/articles/{article_id}/subscriptions/{id}.json" api_path = api_opt_path . format ( article_id = article_id , id = id , locale = locale ) return self . call ( api_path , ** kwargs )
def decorate ( self , pos , widget , is_first = True ) : """builds a list element for given position in the tree . It consists of the original widget taken from the Tree and some decoration columns depending on the existence of parent and sibling positions . The result is a urwid . Columns widget ."""
line = None if pos is not None : original_widget = widget cols = self . _construct_spacer ( pos , [ ] ) # Construct arrow leading from parent here , # if we have a parent and indentation is turned on if self . _indent > 0 : if is_first : indent = self . _construct_first_indent ( pos ) if indent is not None : cols = cols + indent else : parent = self . _tree . parent_position ( pos ) if self . _indent > 0 and parent is not None : parent_sib = self . _tree . next_sibling_position ( pos ) draw_vbar = parent_sib is not None void = urwid . AttrMap ( urwid . SolidFill ( ' ' ) , self . _arrow_att ) if self . _childbar_offset > 0 : cols . append ( ( self . _childbar_offset , void ) ) if draw_vbar : barw = urwid . SolidFill ( self . _arrow_vbar_char ) bar = urwid . AttrMap ( barw , self . _arrow_vbar_att or self . _arrow_att ) rspace_width = self . _indent - 1 - self . _childbar_offset cols . append ( ( 1 , bar ) ) cols . append ( ( rspace_width , void ) ) else : cols . append ( ( self . _indent , void ) ) # add the original widget for this line cols . append ( original_widget ) # construct a Columns , defining all spacer as Box widgets line = urwid . Columns ( cols , box_columns = range ( len ( cols ) ) [ : - 1 ] ) return line
def shape_based_slice_insertation ( sl1 , sl2 , dim , nslices , order = 3 ) : """Insert ` nslices ` new slices between ` sl1 ` and ` sl2 ` along dimension ` dim ` using shape based binary interpolation . Extrapolation is handled adding ` nslices ` / 2 step - wise eroded copies of the last slice in each direction . Parameters sl1 : array _ like First slice . Treated as binary data . sl2 : array _ like Second slice . Treated as binary data . dim : int The new dimension along which to add the new slices . nslices : int The number of slices to add . order : int The b - spline interpolation order for re - sampling the distance maps . Returns out : ndarray A binary image of size ` sl1 ` . shape ( ) extend by ` nslices ` + 2 along the new dimension ` dim ` . The border slices are the original slices ` sl1 ` and ` sl2 ` ."""
sl1 = sl1 . astype ( numpy . bool ) sl2 = sl2 . astype ( numpy . bool ) # extrapolation through erosion if 0 == numpy . count_nonzero ( sl1 ) : slices = [ sl1 ] for _ in range ( nslices / 2 ) : slices . append ( numpy . zeros_like ( sl1 ) ) for i in range ( 1 , nslices / 2 + nslices % 2 + 1 ) [ : : - 1 ] : slices . append ( binary_erosion ( sl2 , iterations = i ) ) slices . append ( sl2 ) return numpy . rollaxis ( numpy . asarray ( slices ) , 0 , dim + 1 ) # return numpy . asarray ( [ sl . T for sl in slices ] ) . T elif 0 == numpy . count_nonzero ( sl2 ) : slices = [ sl1 ] for i in range ( 1 , nslices / 2 + 1 ) : slices . append ( binary_erosion ( sl1 , iterations = i ) ) for _ in range ( 0 , nslices / 2 + nslices % 2 ) : slices . append ( numpy . zeros_like ( sl2 ) ) slices . append ( sl2 ) return numpy . rollaxis ( numpy . asarray ( slices ) , 0 , dim + 1 ) # return numpy . asarray ( [ sl . T for sl in slices ] ) . T # interpolation shape based # note : distance _ transform _ edt shows strange behaviour for ones - arrays dt1 = distance_transform_edt ( ~ sl1 ) - distance_transform_edt ( sl1 ) dt2 = distance_transform_edt ( ~ sl2 ) - distance_transform_edt ( sl2 ) slicer = [ slice ( None ) ] * dt1 . ndim slicer = slicer [ : dim ] + [ numpy . newaxis ] + slicer [ dim : ] out = numpy . concatenate ( ( dt1 [ slicer ] , dt2 [ slicer ] ) , axis = dim ) zoom_factors = [ 1 ] * dt1 . ndim zoom_factors = zoom_factors [ : dim ] + [ ( nslices + 2 ) / 2. ] + zoom_factors [ dim : ] out = zoom ( out , zoom_factors , order = order ) return out <= 0
def gep ( self , i ) : """Resolve the type of the i - th element ( for getelementptr lookups ) ."""
if not isinstance ( i . type , IntType ) : raise TypeError ( i . type ) return self . pointee
def _proxy ( self ) : """Generate an instance context for the instance , the context is capable of performing various actions . All instance actions are proxied to the context : returns : TaskQueueContext for this TaskQueueInstance : rtype : twilio . rest . taskrouter . v1 . workspace . task _ queue . TaskQueueContext"""
if self . _context is None : self . _context = TaskQueueContext ( self . _version , workspace_sid = self . _solution [ 'workspace_sid' ] , sid = self . _solution [ 'sid' ] , ) return self . _context
def _check_min_max_indices ( self , min_index = None , max_index = None ) : """Ensure the given start / end fragment indices make sense : if one of them is ` ` None ` ` ( i . e . , not specified ) , then set it to ` ` 0 ` ` or ` ` len ( self ) ` ` ."""
min_index = min_index or 0 max_index = max_index or len ( self ) if min_index < 0 : self . log_exc ( u"min_index is negative" , None , True , ValueError ) if max_index > len ( self ) : self . log_exc ( u"max_index is bigger than the number of intervals in the list" , None , True , ValueError ) return min_index , max_index
def point ( self , t ) : """returns the coordinates of the Bezier curve evaluated at t ."""
distance = self . end - self . start return self . start + distance * t
def supply_and_demand ( lcm , choosers , alternatives , alt_segmenter , price_col , base_multiplier = None , clip_change_low = 0.75 , clip_change_high = 1.25 , iterations = 5 , multiplier_func = None ) : """Adjust real estate prices to compensate for supply and demand effects . Parameters lcm : LocationChoiceModel Used to calculate the probability of agents choosing among alternatives . Must be fully configured and fitted . choosers : pandas . DataFrame alternatives : pandas . DataFrame alt _ segmenter : str , array , or pandas . Series Will be used to segment alternatives and probabilities to do comparisons of supply and demand by submarket . If a string , it is expected to be the name of a column in ` alternatives ` . If a Series it should have the same index as ` alternatives ` . price _ col : str The name of the column in ` alternatives ` that corresponds to price . This column is what is adjusted by this model . base _ multiplier : pandas . Series , optional A series describing a starting multiplier for submarket prices . Index should be submarket IDs . clip _ change _ low : float , optional The minimum amount by which to multiply prices each iteration . clip _ change _ high : float , optional The maximum amount by which to multiply prices each iteration . iterations : int , optional Number of times to update prices based on supply / demand comparisons . multiplier _ func : function ( returns Series , boolean ) A function which takes separate demand and supply Series and returns a tuple where the first item is a Series with the ratio of new price to old price ( all indexes should be the same ) - by default the ratio of demand to supply is the ratio of the new price to the old price . The second return value is a boolean which when True tells this module to stop looping ( that convergence has been satisfied ) Returns new _ prices : pandas . Series Equivalent of the ` price _ col ` in ` alternatives ` . submarkets _ ratios : pandas . Series Price adjustment ratio for each submarket . If ` base _ multiplier ` is given this will be a cummulative multiplier including the ` base _ multiplier ` and the multipliers calculated for this year ."""
logger . debug ( 'start: calculating supply and demand price adjustment' ) # copy alternatives so we don ' t modify the user ' s original alternatives = alternatives . copy ( ) # if alt _ segmenter is a string , get the actual column for segmenting demand if isinstance ( alt_segmenter , str ) : alt_segmenter = alternatives [ alt_segmenter ] elif isinstance ( alt_segmenter , np . array ) : alt_segmenter = pd . Series ( alt_segmenter , index = alternatives . index ) choosers , alternatives = lcm . apply_predict_filters ( choosers , alternatives ) alt_segmenter = alt_segmenter . loc [ alternatives . index ] # check base ratio and apply it to prices if given if base_multiplier is not None : bm = base_multiplier . loc [ alt_segmenter ] bm . index = alt_segmenter . index alternatives [ price_col ] = alternatives [ price_col ] * bm base_multiplier = base_multiplier . copy ( ) for _ in range ( iterations ) : alts_muliplier , submarkets_multiplier , finished = _calculate_adjustment ( lcm , choosers , alternatives , alt_segmenter , clip_change_low , clip_change_high , multiplier_func = multiplier_func ) alternatives [ price_col ] = alternatives [ price_col ] * alts_muliplier # might need to initialize this for holding cumulative multiplier if base_multiplier is None : base_multiplier = pd . Series ( np . ones ( len ( submarkets_multiplier ) ) , index = submarkets_multiplier . index ) base_multiplier *= submarkets_multiplier if finished : break logger . debug ( 'finish: calculating supply and demand price adjustment' ) return alternatives [ price_col ] , base_multiplier
def emit ( self , span_datas ) : """: type span _ datas : list of : class : ` ~ opencensus . trace . span _ data . SpanData ` : param list of opencensus . trace . span _ data . SpanData span _ datas : SpanData tuples to emit"""
try : # TODO : keep the stream alive . # The stream is terminated after iteration completes . # To keep it alive , we can enqueue proto spans here # and asyncronously read them and send to the agent . responses = self . client . Export ( self . generate_span_requests ( span_datas ) ) # read response for _ in responses : pass except grpc . RpcError : pass
def _get_dataruns ( self ) : '''Returns a list of dataruns , in order .'''
if self . _data_runs is None : raise DataStreamError ( "Resident datastream don't have dataruns" ) if not self . _data_runs_sorted : self . _data_runs . sort ( key = _itemgetter ( 0 ) ) self . _data_runs_sorted = True return [ data [ 1 ] for data in self . _data_runs ]
def updateidf ( idf , dct ) : """update idf using dct"""
for key in list ( dct . keys ( ) ) : if key . startswith ( 'idf.' ) : idftag , objkey , objname , field = key2elements ( key ) if objname == '' : try : idfobj = idf . idfobjects [ objkey . upper ( ) ] [ 0 ] except IndexError as e : idfobj = idf . newidfobject ( objkey . upper ( ) ) else : idfobj = idf . getobject ( objkey . upper ( ) , objname ) if idfobj == None : idfobj = idf . newidfobject ( objkey . upper ( ) , Name = objname ) idfobj [ field ] = dct [ key ]
def _freecpu ( conn ) : '''Internal variant of freecpu taking a libvirt connection as parameter'''
cpus = conn . getInfo ( ) [ 2 ] for dom in _get_domain ( conn , iterable = True ) : if dom . ID ( ) > 0 : cpus -= dom . info ( ) [ 3 ] return cpus
def value_for_keypath ( obj , path ) : """Get value from walking key path with start object obj ."""
val = obj for part in path . split ( '.' ) : match = re . match ( list_index_re , part ) if match is not None : val = _extract ( val , match . group ( 1 ) ) if not isinstance ( val , list ) and not isinstance ( val , tuple ) : raise TypeError ( 'expected list/tuple' ) index = int ( match . group ( 2 ) ) val = val [ index ] else : val = _extract ( val , part ) if val is None : return None return val
def add_to ( self , parent , additions ) : "Modify parent to include all elements in additions"
for x in additions : if x not in parent : parent . append ( x ) self . changed ( )
def set_ylim ( self , xlims , dx , xscale , reverse = False ) : """Set y limits for plot . This will set the limits for the y axis for the specific plot . Args : ylims ( len - 2 list of floats ) : The limits for the axis . dy ( float ) : Amount to increment by between the limits . yscale ( str ) : Scale of the axis . Either ` log ` or ` lin ` . reverse ( bool , optional ) : If True , reverse the axis tick marks . Default is False ."""
self . _set_axis_limits ( 'y' , xlims , dx , xscale , reverse ) return
def _read_modeling_results ( self , directory , silent = False ) : """Read modeling results from a given mod / directory . Possible values to read in are : * voltages * potentials * sensitivities"""
voltage_file = directory + os . sep + 'volt.dat' if os . path . isfile ( voltage_file ) : if not silent : print ( 'reading voltages' ) self . read_voltages ( voltage_file ) sens_files = sorted ( glob ( directory + os . sep + 'sens' + os . sep + 'sens*.dat' ) ) # check if there are sensitivity files , and that the nr corresponds to # the nr of configs if ( len ( sens_files ) > 0 and len ( sens_files ) == self . configs . nr_of_configs ) : print ( 'reading sensitivities' ) self . _read_sensitivities ( directory + os . sep + 'sens' ) # same for potentials pot_files = sorted ( glob ( directory + os . sep + 'pot' + os . sep + 'pot*.dat' ) ) # check if there are sensitivity files , and that the nr corresponds to # the nr of configs if ( len ( pot_files ) > 0 and len ( pot_files ) == self . configs . nr_of_configs ) : print ( 'reading potentials' ) self . _read_potentials ( directory + os . sep + 'pot' )
def extract_source_geom ( dstore , srcidxs ) : """Extract the geometry of a given sources Example : http : / / 127.0.0.1:8800 / v1 / calc / 30 / extract / source _ geom / 1,2,3"""
for i in srcidxs . split ( ',' ) : rec = dstore [ 'source_info' ] [ int ( i ) ] geom = dstore [ 'source_geom' ] [ rec [ 'gidx1' ] : rec [ 'gidx2' ] ] yield rec [ 'source_id' ] , geom
def tg90p ( tas , t90 , freq = 'YS' ) : r"""Number of days with daily mean temperature over the 90th percentile . Number of days with daily mean temperature over the 90th percentile . Parameters tas : xarray . DataArray Mean daily temperature [ ° C ] or [ K ] t90 : xarray . DataArray 90th percentile of daily mean temperature [ ° C ] or [ K ] freq : str , optional Resampling frequency Returns xarray . DataArray Count of days with daily mean temperature below the 10th percentile [ days ] Notes The 90th percentile should be computed for a 5 day window centered on each calendar day for a reference period . Example > > > t90 = percentile _ doy ( historical _ tas , per = 0.9) > > > hot _ days = tg90p ( tas , t90)"""
if 'dayofyear' not in t90 . coords . keys ( ) : raise AttributeError ( "t10 should have dayofyear coordinates." ) t90 = utils . convert_units_to ( t90 , tas ) # adjustment of t90 to tas doy range t90 = utils . adjust_doy_calendar ( t90 , tas ) # create array of percentile with tas shape and coords thresh = xr . full_like ( tas , np . nan ) doy = thresh . time . dt . dayofyear . values thresh . data = t90 . sel ( dayofyear = doy ) # compute the cold days over = ( tas > thresh ) return over . resample ( time = freq ) . sum ( dim = 'time' )
def do_preprocess ( defn ) : """Run a string through the C preprocessor that ships with pycparser but is weirdly inaccessible ?"""
from pycparser . ply import lex , cpp lexer = lex . lex ( cpp ) p = cpp . Preprocessor ( lexer ) # p . add _ path ( dir ) will add dir to the include search path p . parse ( defn ) return '' . join ( tok . value for tok in p . parser if tok . type not in p . ignore )
def _goto ( self , pose , duration , wait , accurate ) : """Goes to a given cartesian pose . : param matrix pose : homogeneous matrix representing the target position : param float duration : move duration : param bool wait : whether to wait for the end of the move : param bool accurate : trade - off between accurate solution and computation time . By default , use the not so accurate but fast version ."""
kwargs = { } if not accurate : kwargs [ 'max_iter' ] = 3 q0 = self . convert_to_ik_angles ( self . joints_position ) q = self . inverse_kinematics ( pose , initial_position = q0 , ** kwargs ) joints = self . convert_from_ik_angles ( q ) last = self . motors [ - 1 ] for m , pos in list ( zip ( self . motors , joints ) ) : m . goto_position ( pos , duration , wait = False if m != last else wait )
def set_cookie ( cookies , key , value = '' , max_age = None , expires = None , path = '/' , domain = None , secure = False , httponly = False ) : '''Set a cookie key into the cookies dictionary * cookies * .'''
cookies [ key ] = value if expires is not None : if isinstance ( expires , datetime ) : now = ( expires . now ( expires . tzinfo ) if expires . tzinfo else expires . utcnow ( ) ) delta = expires - now # Add one second so the date matches exactly ( a fraction of # time gets lost between converting to a timedelta and # then the date string ) . delta = delta + timedelta ( seconds = 1 ) # Just set max _ age - the max _ age logic will set expires . expires = None max_age = max ( 0 , delta . days * 86400 + delta . seconds ) else : cookies [ key ] [ 'expires' ] = expires if max_age is not None : cookies [ key ] [ 'max-age' ] = max_age # IE requires expires , so set it if hasn ' t been already . if not expires : cookies [ key ] [ 'expires' ] = http_date ( time . time ( ) + max_age ) if path is not None : cookies [ key ] [ 'path' ] = path if domain is not None : cookies [ key ] [ 'domain' ] = domain if secure : cookies [ key ] [ 'secure' ] = True if httponly : cookies [ key ] [ 'httponly' ] = True
def get_type_data ( name ) : """Return dictionary representation of type . Can be used to initialize primordium . type . primitives . Type"""
name = name . upper ( ) try : return { 'authority' : 'birdland.mit.edu' , 'namespace' : 'unit system' , 'identifier' : name , 'domain' : 'Unit System Types' , 'display_name' : JEFFS_UNIT_SYSTEM_TYPES [ name ] + ' Unit System Type' , 'display_label' : JEFFS_UNIT_SYSTEM_TYPES [ name ] , 'description' : ( 'The unit system type for the ' + JEFFS_UNIT_SYSTEM_TYPES [ name ] + ' System' ) } except KeyError : raise NotFound ( 'Unit System Type: ' + name )
def has_unclosed_brackets ( text ) : """Starting at the end of the string . If we find an opening bracket for which we didn ' t had a closing one yet , return True ."""
stack = [ ] # Ignore braces inside strings text = re . sub ( r'''('[^']*'|"[^"]*")''' , '' , text ) # XXX : handle escaped quotes . ! for c in reversed ( text ) : if c in '])}' : stack . append ( c ) elif c in '[({' : if stack : if ( ( c == '[' and stack [ - 1 ] == ']' ) or ( c == '{' and stack [ - 1 ] == '}' ) or ( c == '(' and stack [ - 1 ] == ')' ) ) : stack . pop ( ) else : # Opening bracket for which we didn ' t had a closing one . return True return False
def filter ( self , s , method = 'chebyshev' , order = 30 ) : r"""Filter signals ( analysis or synthesis ) . A signal is defined as a rank - 3 tensor of shape ` ` ( N _ NODES , N _ SIGNALS , N _ FEATURES ) ` ` , where ` ` N _ NODES ` ` is the number of nodes in the graph , ` ` N _ SIGNALS ` ` is the number of independent signals , and ` ` N _ FEATURES ` ` is the number of features which compose a graph signal , or the dimensionality of a graph signal . For example if you filter a signal with a filter bank of 8 filters , you ' re extracting 8 features and decomposing your signal into 8 parts . That is called analysis . Your are thus transforming your signal tensor from ` ` ( G . N , 1 , 1 ) ` ` to ` ` ( G . N , 1, 8 ) ` ` . Now you may want to combine back the features to form an unique signal . For this you apply again 8 filters , one filter per feature , and sum the result up . As such you ' re transforming your ` ` ( G . N , 1 , 8 ) ` ` tensor signal back to ` ` ( G . N , 1 , 1 ) ` ` . That is known as synthesis . More generally , you may want to map a set of features to another , though that is not implemented yet . The method computes the transform coefficients of a signal : math : ` s ` , where the atoms of the transform dictionary are generalized translations of each graph spectral filter to each vertex on the graph : . . math : : c = D ^ * s , where the columns of : math : ` D ` are : math : ` g _ { i , m } = T _ i g _ m ` and : math : ` T _ i ` is a generalized translation operator applied to each filter : math : ` \ hat { g } _ m ( \ cdot ) ` . Each column of : math : ` c ` is the response of the signal to one filter . In other words , this function is applying the analysis operator : math : ` D ^ * ` , respectively the synthesis operator : math : ` D ` , associated with the frame defined by the filter bank to the signals . Parameters s : array _ like Graph signals , a tensor of shape ` ` ( N _ NODES , N _ SIGNALS , N _ FEATURES ) ` ` , where ` ` N _ NODES ` ` is the number of nodes in the graph , ` ` N _ SIGNALS ` ` the number of independent signals you want to filter , and ` ` N _ FEATURES ` ` is either 1 ( analysis ) or the number of filters in the filter bank ( synthesis ) . method : { ' exact ' , ' chebyshev ' } Whether to use the exact method ( via the graph Fourier transform ) or the Chebyshev polynomial approximation . A Lanczos approximation is coming . order : int Degree of the Chebyshev polynomials . Returns s : ndarray Graph signals , a tensor of shape ` ` ( N _ NODES , N _ SIGNALS , N _ FEATURES ) ` ` , where ` ` N _ NODES ` ` and ` ` N _ SIGNALS ` ` are the number of nodes and signals of the signal tensor that pas passed in , and ` ` N _ FEATURES ` ` is either 1 ( synthesis ) or the number of filters in the filter bank ( analysis ) . References See : cite : ` hammond2011wavelets ` for details on filtering graph signals . Examples Create a bunch of smooth signals by low - pass filtering white noise : > > > import matplotlib . pyplot as plt > > > G = graphs . Ring ( N = 60) > > > G . estimate _ lmax ( ) > > > s = np . random . RandomState ( 42 ) . uniform ( size = ( G . N , 10 ) ) > > > taus = [ 1 , 10 , 100] > > > s = filters . Heat ( G , taus ) . filter ( s ) > > > s . shape (60 , 10 , 3) Plot the 3 smoothed versions of the 10th signal : > > > fig , ax = plt . subplots ( ) > > > G . set _ coordinates ( ' line1D ' ) # To visualize multiple signals in 1D . > > > _ = G . plot ( s [ : , 9 , : ] , ax = ax ) > > > legend = [ r ' $ \ tau = { } $ ' . format ( t ) for t in taus ] > > > ax . legend ( legend ) # doctest : + ELLIPSIS < matplotlib . legend . Legend object at . . . > Low - pass filter a delta to create a localized smooth signal : > > > G = graphs . Sensor ( 30 , seed = 42) > > > G . compute _ fourier _ basis ( ) # Reproducible computation of lmax . > > > s1 = np . zeros ( G . N ) > > > s1[13 ] = 1 > > > s1 = filters . Heat ( G , 3 ) . filter ( s1) > > > s1 . shape (30 , ) Filter and reconstruct our signal : > > > g = filters . MexicanHat ( G , Nf = 4) > > > s2 = g . analyze ( s1) > > > s2 . shape (30 , 4) > > > s2 = g . synthesize ( s2) > > > s2 . shape (30 , ) Look how well we were able to reconstruct : > > > fig , axes = plt . subplots ( 1 , 2) > > > _ = G . plot ( s1 , ax = axes [ 0 ] ) > > > _ = G . plot ( s2 , ax = axes [ 1 ] ) > > > print ( ' { : . 5f } ' . format ( np . linalg . norm ( s1 - s2 ) ) ) 0.26808 Perfect reconstruction with Itersine , a tight frame : > > > g = filters . Itersine ( G ) > > > s2 = g . analyze ( s1 , method = ' exact ' ) > > > s2 = g . synthesize ( s2 , method = ' exact ' ) > > > np . linalg . norm ( s1 - s2 ) < 1e - 10 True"""
s = self . G . _check_signal ( s ) # TODO : not in self . Nin ( Nf = Nin x Nout ) . if s . ndim == 1 or s . shape [ - 1 ] not in [ 1 , self . Nf ] : if s . ndim == 3 : raise ValueError ( 'Third dimension (#features) should be ' 'either 1 or the number of filters Nf = {}, ' 'got {}.' . format ( self . Nf , s . shape ) ) s = np . expand_dims ( s , - 1 ) n_features_in = s . shape [ - 1 ] if s . ndim < 3 : s = np . expand_dims ( s , 1 ) n_signals = s . shape [ 1 ] if s . ndim > 3 : raise ValueError ( 'At most 3 dimensions: ' '#nodes x #signals x #features.' ) assert s . ndim == 3 # TODO : generalize to 2D ( m - - > n ) filter banks . # Only 1 - - > Nf ( analysis ) and Nf - - > 1 ( synthesis ) for now . n_features_out = self . Nf if n_features_in == 1 else 1 if method == 'exact' : # TODO : will be handled by g . adjoint ( ) . axis = 1 if n_features_in == 1 else 2 f = self . evaluate ( self . G . e ) f = np . expand_dims ( f . T , axis ) assert f . shape == ( self . G . N , n_features_in , n_features_out ) s = self . G . gft ( s ) s = np . matmul ( s , f ) s = self . G . igft ( s ) elif method == 'chebyshev' : # TODO : update Chebyshev implementation ( after 2D filter banks ) . c = approximations . compute_cheby_coeff ( self , m = order ) if n_features_in == 1 : # Analysis . s = s . squeeze ( axis = 2 ) s = approximations . cheby_op ( self . G , c , s ) s = s . reshape ( ( self . G . N , n_features_out , n_signals ) , order = 'F' ) s = s . swapaxes ( 1 , 2 ) elif n_features_in == self . Nf : # Synthesis . s = s . swapaxes ( 1 , 2 ) s_in = s . reshape ( ( self . G . N * n_features_in , n_signals ) , order = 'F' ) s = np . zeros ( ( self . G . N , n_signals ) ) tmpN = np . arange ( self . G . N , dtype = int ) for i in range ( n_features_in ) : s += approximations . cheby_op ( self . G , c [ i ] , s_in [ i * self . G . N + tmpN ] ) s = np . expand_dims ( s , 2 ) else : raise ValueError ( 'Unknown method {}.' . format ( method ) ) # Return a 1D signal if e . g . a 1D signal was filtered by one filter . return s . squeeze ( )
def from_pure ( cls , z ) : """Creates a pure composition . Args : z ( int ) : atomic number"""
return cls ( cls . _key , { z : 1.0 } , { z : 1.0 } , pyxray . element_symbol ( z ) )
def open ( self ) : """Open the connection with the device ."""
try : self . device . open ( ) except ConnectTimeoutError as cte : raise ConnectionException ( cte . msg ) self . device . timeout = self . timeout self . device . _conn . _session . transport . set_keepalive ( self . keepalive ) if hasattr ( self . device , "cu" ) : # make sure to remove the cu attr from previous session # ValueError : requested attribute name cu already exists del self . device . cu self . device . bind ( cu = Config ) if not self . lock_disable and self . session_config_lock : self . _lock ( )
def apply_obfuscation ( source ) : """Returns ' source ' all obfuscated ."""
global keyword_args global imported_modules tokens = token_utils . listified_tokenizer ( source ) keyword_args = analyze . enumerate_keyword_args ( tokens ) imported_modules = analyze . enumerate_imports ( tokens ) variables = find_obfuscatables ( tokens , obfuscatable_variable ) classes = find_obfuscatables ( tokens , obfuscatable_class ) functions = find_obfuscatables ( tokens , obfuscatable_function ) for variable in variables : replace_obfuscatables ( tokens , obfuscate_variable , variable , name_generator ) for function in functions : replace_obfuscatables ( tokens , obfuscate_function , function , name_generator ) for _class in classes : replace_obfuscatables ( tokens , obfuscate_class , _class , name_generator ) return token_utils . untokenize ( tokens )
def main ( ) : """This is an example of project documentation using AIKIF It documents the project itself , including requirements , design , test , goals ,"""
print ( 'Initialising AIKIF Project...' ) name = 'AIKIF' type = 'Software' desc = """ Artificial Intelligence Knowledge Information Framework - Project Overview This document was autogenerated via aikif/examples/AIKIF_project.py """ desc += '\n Last updated ' + mod_dt . TodayAsString ( ) fldr = os . getcwd ( ) # ' T : \ \ user \ \ dev \ \ src \ \ python \ \ AIKIF \ \ aikif \ \ examples \ \ AIKIF _ project ' report_file_base = fldr + os . sep + 'aikif_report' p = mod_prj . Project ( name , type , desc , fldr ) project_setup ( p ) requirements = mod_dt . DataTable ( 'requirements.csv' , ',' , col_names = [ 'id' , 'dep_id' , 'name' , 'details' ] ) p . log_table ( requirements ) p . record ( requirements , '' , [ 'a' , '' , 'process data' , 'automatically process source files to tables based on rules' ] ) p . record ( requirements , '' , [ 'b' , '' , 'define structures' , 'use mappings and ontology to specify what to do' ] ) p . record ( requirements , '' , [ 'c' , '' , 'log intent' , 'log key events' ] ) p . record ( requirements , '' , [ 'd' , '' , 'methods toolbox' , 'implement a set of programs that can be used generically' ] ) p . record ( requirements , '' , [ 'e' , '' , 'Command Line Interface' , 'CLI to query and update datasets and control processes' ] ) p . record ( requirements , '' , [ 'a01' , 'a' , 'download CSV' , 'download CSV file from website' ] ) p . record ( requirements , '' , [ 'a02' , 'a' , 'load CSV to table' , 'import CSV file to Database table' ] ) p . record ( requirements , '' , [ 'a03' , 'a' , 'find blank rows in CSV' , 'read CSV file and count DQ issues' ] ) p . record ( requirements , '' , [ 'a04' , 'a' , 'aggregate table' , 'summarise Database table by col(n)' ] ) p . record ( requirements , '' , [ 'e01' , 'e' , 'CLI commands' , 'CLI to manage commands and modes of operation' ] ) p . record ( requirements , '' , [ 'e02' , 'e' , 'CLI query' , 'query functions of datasets in AIKIF using basic fixed commands' ] ) p . record ( requirements , '' , [ 'e03' , 'e' , 'CLI NLP' , 'integrate NLP to allow english questions and data addition in add/query mode' ] ) p . record ( requirements , '' , [ 'e04' , 'e' , 'CLI add' , 'allows user to add data to general or specific datasets' ] ) p . record ( requirements , '' , [ 'e05' , 'e' , 'CLI process' , 'allows managing of all processes in AIKIF' ] ) progress = mod_dt . DataTable ( 'progress.csv' , ',' , col_names = [ 'program' , 'percent' , 'details' ] ) p . log_table ( progress ) p . record ( progress , '' , [ 'knowledge' , '1%' , 'class to manage raw data to information' ] ) p . record ( progress , '' , [ 'mapper' , '20%' , 'mapping columns to data structures, with business rules' ] ) p . record ( progress , '' , [ 'sql_code_generator' , '90%' , 'generates SQL to transform data external to AIKIF' ] ) issues = mod_dt . DataTable ( 'issues.csv' , ',' , col_names = [ 'id' , 'name' , 'details' ] ) p . log_table ( issues ) p . record ( issues , '' , [ '01' , 'In Progress' , 'implement AIKIF project logging' ] ) p . record ( issues , '' , [ '02' , 'todo' , 'implement Knowledge mapping' ] ) p . record ( issues , '' , [ '03' , 'Testing' , 'data mapping of columns' ] ) # p . build _ report ( report _ file _ base + ' . md ' , type = ' md ' ) p . build_report ( report_file_base + '.rst' , type = 'rst' ) print ( 'Done...' )
def titletable ( html_doc , tofloat = True ) : """return a list of [ ( title , table ) , . . . . . ] title = previous item with a < b > tag table = rows - > [ [ cell1 , cell2 , . . ] , [ cell1 , cell2 , . . ] , . . ]"""
soup = BeautifulSoup ( html_doc , "html.parser" ) btables = soup . find_all ( [ 'b' , 'table' ] ) # find all the < b > and < table > titletables = [ ] for i , item in enumerate ( btables ) : if item . name == 'table' : for j in range ( i + 1 ) : if btables [ i - j ] . name == 'b' : # step back to find a < b > break titletables . append ( ( btables [ i - j ] , item ) ) if tofloat : t2m = table2val_matrix else : t2m = table2matrix titlerows = [ ( tl . contents [ 0 ] , t2m ( tb ) ) for tl , tb in titletables ] return titlerows
def parse_free_space_response ( content , hostname ) : """Parses of response content XML from WebDAV server and extract an amount of free space . : param content : the XML content of HTTP response from WebDAV server for getting free space . : param hostname : the server hostname . : return : an amount of free space in bytes ."""
try : tree = etree . fromstring ( content ) node = tree . find ( './/{DAV:}quota-available-bytes' ) if node is not None : return int ( node . text ) else : raise MethodNotSupported ( name = 'free' , server = hostname ) except TypeError : raise MethodNotSupported ( name = 'free' , server = hostname ) except etree . XMLSyntaxError : return str ( )
def add_commit ( self , commit ) : """Adds the commit to the commits array if it doesn ' t already exist , and returns the commit ' s index in the array ."""
sha1 = commit . hex if sha1 in self . _commits : return self . _commits [ sha1 ] title , separator , body = commit . message . partition ( "\n" ) commit = { 'explored' : False , 'sha1' : sha1 , 'name' : GitUtils . abbreviate_sha1 ( sha1 ) , 'describe' : GitUtils . describe ( sha1 ) , 'refs' : GitUtils . refs_to ( sha1 , self . repo ( ) ) , 'author_name' : commit . author . name , 'author_mail' : commit . author . email , 'author_time' : commit . author . time , 'author_offset' : commit . author . offset , 'committer_name' : commit . committer . name , 'committer_mail' : commit . committer . email , 'committer_time' : commit . committer . time , 'committer_offset' : commit . committer . offset , # ' message ' : commit . message , 'title' : title , 'separator' : separator , 'body' : body . lstrip ( "\n" ) , } self . _json [ 'commits' ] . append ( commit ) self . _commits [ sha1 ] = len ( self . _json [ 'commits' ] ) - 1 return self . _commits [ sha1 ]
def retrieve_remote_content ( id : str , guid : str = None , handle : str = None , entity_type : str = None , sender_key_fetcher : Callable [ [ str ] , str ] = None , ) : """Retrieve remote content and return an Entity object . Currently , due to no other protocols supported , always use the Diaspora protocol . : param sender _ key _ fetcher : Function to use to fetch sender public key . If not given , network will be used to fetch the profile and the key . Function must take handle as only parameter and return a public key . : returns : Entity class instance or ` ` None ` `"""
# TODO add support for AP protocol_name = "diaspora" if not guid : guid = id utils = importlib . import_module ( "federation.utils.%s" % protocol_name ) return utils . retrieve_and_parse_content ( guid = guid , handle = handle , entity_type = entity_type , sender_key_fetcher = sender_key_fetcher , )
def water_self_diffusion_coefficient ( T = None , units = None , warn = True , err_mult = None ) : """Temperature - dependent self - diffusion coefficient of water . Parameters T : float Temperature ( default : in Kelvin ) units : object ( optional ) object with attributes : Kelvin , meter , kilogram warn : bool ( default : True ) Emit UserWarning when outside temperature range . err _ mult : length 2 array _ like ( default : None ) Perturb paramaters D0 and TS with err _ mult [ 0 ] * dD0 and err _ mult [ 1 ] * dTS respectively , where dD0 and dTS are the reported uncertainties in the fitted paramters . Useful for estimating error in diffusion coefficient . References Temperature - dependent self - diffusion coefficients of water and six selected molecular liquids for calibration in accurate 1H NMR PFG measurements Manfred Holz , Stefan R . Heila , Antonio Saccob ; Phys . Chem . Chem . Phys . , 2000,2 , 4740-4742 http : / / pubs . rsc . org / en / Content / ArticleLanding / 2000 / CP / b005319h DOI : 10.1039 / B005319H"""
if units is None : K = 1 m = 1 s = 1 else : K = units . Kelvin m = units . meter s = units . second if T is None : T = 298.15 * K _D0 = D0 * m ** 2 * s ** - 1 _TS = TS * K if err_mult is not None : _dD0 = dD0 * m ** 2 * s ** - 1 _dTS = dTS * K _D0 += err_mult [ 0 ] * _dD0 _TS += err_mult [ 1 ] * _dTS if warn and ( _any ( T < low_t_bound * K ) or _any ( T > high_t_bound * K ) ) : warnings . warn ( "Temperature is outside range (0-100 degC)" ) return _D0 * ( ( T / _TS ) - 1 ) ** gamma
def handle ( self , * args , ** options ) : """Generates image thumbnails NOTE : To keep memory consumption stable avoid iteration over the Image queryset"""
pks = Image . objects . all ( ) . values_list ( 'id' , flat = True ) total = len ( pks ) for idx , pk in enumerate ( pks ) : image = None try : image = Image . objects . get ( pk = pk ) self . stdout . write ( u'Processing image {0} / {1} {2}' . format ( idx + 1 , total , image ) ) self . stdout . flush ( ) image . thumbnails image . icons except IOError as e : self . stderr . write ( 'Failed to generate thumbnails: {0}' . format ( str ( e ) ) ) self . stderr . flush ( ) finally : del image
def download_ftp_url ( source_url , target_uri , buffer_size = 8192 ) : """Uses urllib . thread safe ?"""
ensure_file_directory ( target_uri ) with urllib . request . urlopen ( source_url ) as source_file : with open ( target_uri , 'wb' ) as target_file : shutil . copyfileobj ( source_file , target_file , buffer_size )
def is_assignment_allowed ( self ) : """Check if analyst assignment is allowed"""
if not self . is_manage_allowed ( ) : return False review_state = api . get_workflow_status_of ( self . context ) edit_states = [ "open" , "attachment_due" , "to_be_verified" ] return review_state in edit_states
def get_source_pars ( src ) : """Extract the parameters associated with a pyLikelihood Source object ."""
fnmap = src . getSrcFuncs ( ) keys = fnmap . keys ( ) if 'Position' in keys : ppars = get_function_pars ( src . getSrcFuncs ( ) [ str ( 'Position' ) ] ) elif 'SpatialDist' in keys : ppars = get_function_pars ( src . getSrcFuncs ( ) [ str ( 'SpatialDist' ) ] ) else : raise Exception ( 'Failed to extract spatial parameters.' ) fn = src . getSrcFuncs ( ) [ str ( 'Spectrum' ) ] spars = get_function_pars ( fn ) for i , p in enumerate ( ppars ) : ppars [ i ] [ 'is_norm' ] = False for i , p in enumerate ( spars ) : if fn . normPar ( ) . getName ( ) == p [ 'name' ] : spars [ i ] [ 'is_norm' ] = True else : spars [ i ] [ 'is_norm' ] = False return spars , ppars
def delete_collection_namespaced_lease ( self , namespace , ** kwargs ) : """delete collection of Lease This method makes a synchronous HTTP request by default . To make an asynchronous HTTP request , please pass async _ req = True > > > thread = api . delete _ collection _ namespaced _ lease ( namespace , async _ req = True ) > > > result = thread . get ( ) : param async _ req bool : param str namespace : object name and auth scope , such as for teams and projects ( required ) : param str pretty : If ' true ' , then the output is pretty printed . : param str _ continue : The continue option should be set when retrieving more results from the server . Since this value is server defined , clients may only use the continue value from a previous query result with identical query parameters ( except for the value of continue ) and the server may reject a continue value it does not recognize . If the specified continue value is no longer valid whether due to expiration ( generally five to fifteen minutes ) or a configuration change on the server , the server will respond with a 410 ResourceExpired error together with a continue token . If the client needs a consistent list , it must restart their list without the continue field . Otherwise , the client may send another list request with the token received with the 410 error , the server will respond with a list starting from the next key , but from the latest snapshot , which is inconsistent from the previous list results - objects that are created , modified , or deleted after the first list request will be included in the response , as long as their keys are after the \" next key \" . This field is not supported when watch is true . Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications . : param str field _ selector : A selector to restrict the list of returned objects by their fields . Defaults to everything . : param str label _ selector : A selector to restrict the list of returned objects by their labels . Defaults to everything . : param int limit : limit is a maximum number of responses to return for a list call . If more items exist , the server will set the ` continue ` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results . Setting a limit may return fewer than the requested amount of items ( up to zero items ) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available . Servers may choose not to support the limit argument and will return all of the available results . If limit is specified and the continue field is empty , clients may assume that no more results are available . This field is not supported if watch is true . The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is , no objects created , modified , or deleted after the first request is issued will be included in any subsequent continued requests . This is sometimes referred to as a consistent snapshot , and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects . If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned . : param str resource _ version : When specified with a watch call , shows changes that occur after that particular version of a resource . Defaults to changes from the beginning of history . When specified for list : - if unset , then the result is returned from remote storage based on quorum - read flag ; - if it ' s 0 , then we simply return what we currently have in cache , no guarantee ; - if set to non zero , then the result is at least as fresh as given rv . : param int timeout _ seconds : Timeout for the list / watch call . This limits the duration of the call , regardless of any activity or inactivity . : param bool watch : Watch for changes to the described resources and return them as a stream of add , update , and remove notifications . Specify resourceVersion . : return : V1Status If the method is called asynchronously , returns the request thread ."""
kwargs [ '_return_http_data_only' ] = True if kwargs . get ( 'async_req' ) : return self . delete_collection_namespaced_lease_with_http_info ( namespace , ** kwargs ) else : ( data ) = self . delete_collection_namespaced_lease_with_http_info ( namespace , ** kwargs ) return data
def close ( self ) : """Closes this : class : ` ` PCapStream ` ` by closing the underlying Python stream ."""
if self . _stream : values = ( self . _total . nbytes , self . _total . npackets , int ( math . ceil ( self . _total . nseconds ) ) , self . _filename ) if self . _dryrun : msg = 'Would write %d bytes, %d packets, %d seconds to %s.' else : msg = 'Wrote %d bytes, %d packets, %d seconds to %s.' self . _stream . close ( ) log . info ( msg % values ) self . _filename = None self . _startTime = None self . _stream = None self . _total = PCapFileStats ( 0 , 0 , 0 )
def _remove_network ( network ) : '''Remove network , including all connected containers'''
ret = { 'name' : network [ 'Name' ] , 'changes' : { } , 'result' : False , 'comment' : '' } errors = [ ] for cid in network [ 'Containers' ] : try : cinfo = __salt__ [ 'docker.inspect_container' ] ( cid ) except CommandExecutionError : # Fall back to container ID cname = cid else : cname = cinfo . get ( 'Name' , '' ) . lstrip ( '/' ) try : __salt__ [ 'docker.disconnect_container_from_network' ] ( cid , network [ 'Name' ] ) except CommandExecutionError as exc : errors = 'Failed to disconnect container \'{0}\' : {1}' . format ( cname , exc ) else : ret [ 'changes' ] . setdefault ( 'disconnected' , [ ] ) . append ( cname ) if errors : ret [ 'comment' ] = '\n' . join ( errors ) return ret try : __salt__ [ 'docker.remove_network' ] ( network [ 'Name' ] ) except CommandExecutionError as exc : ret [ 'comment' ] = 'Failed to remove network: {0}' . format ( exc ) else : ret [ 'changes' ] [ 'removed' ] = True ret [ 'result' ] = True ret [ 'comment' ] = 'Removed network \'{0}\'' . format ( network [ 'Name' ] ) return ret
def detect_suicidal_func ( func ) : """Detect if the function is suicidal Detect the public functions calling suicide / selfdestruct without protection Returns : ( bool ) : True if the function is suicidal"""
if func . is_constructor : return False if func . visibility != 'public' : return False calls = [ c . name for c in func . internal_calls ] if not ( 'suicide(address)' in calls or 'selfdestruct(address)' in calls ) : return False if func . is_protected ( ) : return False return True
def Nu_horizontal_cylinder ( Pr , Gr , Method = None , AvailableMethods = False ) : r'''This function handles choosing which horizontal cylinder free convection correlation is used . Generally this is used by a helper class , but can be used directly . Will automatically select the correlation to use if none is provided ; returns None if insufficient information is provided . Prefered functions are ' Morgan ' when discontinuous results are acceptable and ' Churchill - Chu ' otherwise . Parameters Pr : float Prandtl number [ - ] Gr : float Grashof number [ - ] Returns Nu : float Nusselt number , [ - ] methods : list , only returned if AvailableMethods = = True List of methods which can be used to calculate Nu with the given inputs Other Parameters Method : string , optional A string of the function name to use , as in the dictionary horizontal _ cylinder _ correlations AvailableMethods : bool , optional If True , function will consider which methods which can be used to calculate Nu with the given inputs Examples > > > Nu _ horizontal _ cylinder ( 0.72 , 1E7) 24.864192615468973'''
def list_methods ( ) : methods = [ ] for key , values in horizontal_cylinder_correlations . items ( ) : methods . append ( key ) if 'Morgan' in methods : methods . remove ( 'Morgan' ) methods . insert ( 0 , 'Morgan' ) return methods if AvailableMethods : return list_methods ( ) if not Method : Method = list_methods ( ) [ 0 ] if Method in horizontal_cylinder_correlations : return horizontal_cylinder_correlations [ Method ] ( Pr = Pr , Gr = Gr ) else : raise Exception ( "Correlation name not recognized; see the " "documentation for the available options." )
def _gen_exclusion_paths ( ) : """Generate file paths to be excluded for namespace packages ( bytecode cache files ) ."""
# always exclude the package module itself yield '__init__.py' yield '__init__.pyc' yield '__init__.pyo' if not hasattr ( imp , 'get_tag' ) : return base = os . path . join ( '__pycache__' , '__init__.' + imp . get_tag ( ) ) yield base + '.pyc' yield base + '.pyo' yield base + '.opt-1.pyc' yield base + '.opt-2.pyc'
def generate_alchemy_graph ( alchemy_uri , prefixes = None , identifier = "NautilusSparql" ) : """Generate a graph and change the global graph to this one : param alchemy _ uri : A Uri for the graph : param prefixes : A dictionary of prefixes and namespaces to bind to the graph : param identifier : An identifier that will identify the Graph root"""
registerplugins ( ) ident = URIRef ( identifier ) uri = Literal ( alchemy_uri ) store = plugin . get ( "SQLAlchemy" , Store ) ( identifier = ident ) graph = Graph ( store , identifier = ident ) graph . open ( uri , create = True ) for prefix , ns in ( prefixes or GRAPH_BINDINGS ) . items ( ) : if prefix == "" : prefix = "cts" # Fix until ALchemy Store accepts empty prefixes graph . bind ( prefix , ns ) return graph , identifier , uri
def GetArtifactPathDependencies ( rdf_artifact ) : """Return a set of knowledgebase path dependencies . Args : rdf _ artifact : RDF artifact object . Returns : A set of strings for the required kb objects e . g . [ " users . appdata " , " systemroot " ]"""
deps = set ( ) for source in rdf_artifact . sources : for arg , value in iteritems ( source . attributes ) : paths = [ ] if arg in [ "path" , "query" ] : paths . append ( value ) if arg == "key_value_pairs" : # This is a REGISTRY _ VALUE { key : blah , value : blah } dict . paths . extend ( [ x [ "key" ] for x in value ] ) if arg in [ "keys" , "paths" , "path_list" , "content_regex_list" ] : paths . extend ( value ) for path in paths : for match in artifact_utils . INTERPOLATED_REGEX . finditer ( path ) : deps . add ( match . group ( ) [ 2 : - 2 ] ) # Strip off % % . deps . update ( GetArtifactParserDependencies ( rdf_artifact ) ) return deps
def find_changed ( ) : """Find changes since the revision it is currently holding"""
session_token = request . headers [ 'session_token' ] repository = request . headers [ 'repository' ] current_user = have_authenticated_user ( request . environ [ 'REMOTE_ADDR' ] , repository , session_token ) if current_user is False : return fail ( user_auth_fail_msg ) repository_path = config [ 'repositories' ] [ repository ] [ 'path' ] body_data = request . get_json ( ) data_store = versioned_storage ( repository_path ) head = data_store . get_head ( ) if head == 'root' : return success ( { } , { 'head' : 'root' , 'sorted_changes' : { 'none' : [ ] } } ) # Find changed items client_changes = json . loads ( body_data [ 'client_changes' ] ) server_changes = data_store . get_changes_since ( request . headers [ "previous_revision" ] , head ) # Resolve conflicts conflict_resolutions = json . loads ( body_data [ 'conflict_resolutions' ] ) if conflict_resolutions != [ ] : resolutions = { 'server' : { } , 'client' : { } } for r in conflict_resolutions : if len ( r [ '4_resolution' ] ) != 1 or r [ '4_resolution' ] [ 0 ] not in [ 'client' , 'server' ] : return fail ( conflict_msg ) resolutions [ r [ '4_resolution' ] [ 0 ] ] [ r [ '1_path' ] ] = None client_changes = { k : v for k , v in client_changes . iteritems ( ) if v [ 'path' ] not in resolutions [ 'server' ] } server_changes = { k : v for k , v in server_changes . iteritems ( ) if v [ 'path' ] not in resolutions [ 'client' ] } sorted_changes = merge_client_and_server_changes ( server_changes , client_changes ) return success ( { } , { 'head' : head , 'sorted_changes' : sorted_changes } )
def intersect ( self , x1 , x2 = None ) : """Returns a list of all segments intersected by [ x1 , x2]"""
def condition ( x1 , x2 , tree ) : # print self . id , tree . x1 , tree . x2 , x1 , x2 if ( tree . x1 != None and tree . x2 != None ) and ( tree . x1 <= x1 and x1 < tree . x2 or tree . x1 <= x2 and x2 < tree . x2 ) : return True return False if x2 == None : xx1 , xx2 = x1 , x1 elif x1 > x2 : xx1 , xx2 = x2 , x1 else : xx1 , xx2 = x1 , x2 c1 = self . __dichotomicSearch ( xx1 ) c2 = self . __dichotomicSearch ( xx2 ) if c1 == - 1 or c2 == - 1 : return [ ] if xx1 < self . children [ c1 ] . x1 : c1 -= 1 inter = self . __radiateDown ( x1 , x2 , c1 , condition ) if self . children [ c1 ] . id == self . children [ c2 ] . id : inter . extend ( self . __radiateUp ( x1 , x2 , c2 + 1 , condition ) ) else : inter . extend ( self . __radiateUp ( x1 , x2 , c2 , condition ) ) ret = [ ] for c in inter : ret . extend ( c . intersect ( x1 , x2 ) ) inter . extend ( ret ) return inter
def ends_with_path_separator ( self , file_path ) : """Return True if ` ` file _ path ` ` ends with a valid path separator ."""
if is_int_type ( file_path ) : return False file_path = make_string_path ( file_path ) return ( file_path and file_path not in ( self . path_separator , self . alternative_path_separator ) and ( file_path . endswith ( self . _path_separator ( file_path ) ) or self . alternative_path_separator is not None and file_path . endswith ( self . _alternative_path_separator ( file_path ) ) ) )
def round_to_multiple ( number , multiple ) : '''Rounding up to the nearest multiple of any positive integer Parameters number : int , float Input number . multiple : int Round up to multiple of multiple . Will be converted to int . Must not be equal zero . Returns ceil _ mod _ number : int Rounded up number . Example round _ to _ multiple ( maximum , math . floor ( math . log10 ( maximum ) ) )'''
multiple = int ( multiple ) if multiple == 0 : multiple = 1 ceil_mod_number = number - number % ( - multiple ) return int ( ceil_mod_number )
def __reply ( self , reply , args , kwargs ) : """simulate the reply"""
binding = self . method . binding . input msg = binding . get_message ( self . method , args , kwargs ) log . debug ( 'inject (simulated) send message:\n%s' , msg ) binding = self . method . binding . output return self . succeeded ( binding , reply )
def box ( n_traces = 5 , n = 100 , mode = None ) : """Returns a DataFrame with the required format for a box plot Parameters : n _ traces : int Number of traces n : int Number of points for each trace mode : string Format for each item ' abc ' for alphabet columns ' stocks ' for random stock names"""
df = pd . DataFrame ( [ np . random . chisquare ( np . random . randint ( 2 , 10 ) , n_traces ) for _ in range ( n ) ] , columns = getName ( n_traces , mode = mode ) ) return df
def update_0100 ( self ) : """CC bits " HNZVC " : - 0100"""
self . N = 0 self . Z = 1 self . V = 0 self . C = 0
def can_vote_in_poll ( self , poll , user ) : """Given a poll , checks whether the user can answer to it ."""
# First we have to check if the poll is curently open if poll . duration : poll_dtend = poll . created + dt . timedelta ( days = poll . duration ) if poll_dtend < now ( ) : return False # Is this user allowed to vote in polls in the current forum ? can_vote = ( self . _perform_basic_permission_check ( poll . topic . forum , user , 'can_vote_in_polls' ) and not poll . topic . is_locked ) # Retrieve the user votes for the considered poll user_votes = TopicPollVote . objects . filter ( poll_option__poll = poll ) if user . is_anonymous : forum_key = get_anonymous_user_forum_key ( user ) if forum_key : user_votes = user_votes . filter ( anonymous_key = forum_key ) else : # If the forum key of the anonymous user cannot be retrieved , the user should not be # allowed to vote in the considered poll . user_votes = user_votes . none ( ) can_vote = False else : user_votes = user_votes . filter ( voter = user ) # If the user has already voted , they can vote again if the vote changes are allowed if user_votes . exists ( ) and can_vote : can_vote = poll . user_changes return can_vote
def to_JSON ( self ) : """Dumps object fields into a JSON formatted string : returns : the JSON string"""
return json . dumps ( { "reception_time" : self . _reception_time , "Location" : json . loads ( self . _location . to_JSON ( ) ) , "Weather" : json . loads ( self . _weather . to_JSON ( ) ) } )
def set_value ( self , value ) : """Sets the user value ( mode ) of the choice . Like for Symbol . set _ value ( ) , the visibility might truncate the value . Choices without the ' optional ' attribute ( is _ optional ) can never be in n mode , but 0 / " n " is still accepted since it ' s not a malformed value ( though it will have no effect ) . Returns True if the value is valid for the type of the choice , and False otherwise . This only looks at the form of the value . Check the Choice . assignable attribute to see what values are currently in range and would actually be reflected in the mode of the choice ."""
if value == self . user_value : # We know the value must be valid if it was successfully set # previously self . _was_set = True return True if not ( ( self . orig_type is BOOL and value in ( 2 , 0 , "y" , "n" ) ) or ( self . orig_type is TRISTATE and value in ( 2 , 1 , 0 , "y" , "m" , "n" ) ) ) : # Display tristate values as n , m , y in the warning self . kconfig . _warn ( "the value {} is invalid for {}, which has type {} -- " "assignment ignored" . format ( TRI_TO_STR [ value ] if value in ( 0 , 1 , 2 ) else "'{}'" . format ( value ) , _name_and_loc ( self ) , TYPE_TO_STR [ self . orig_type ] ) ) return False if value in ( "y" , "m" , "n" ) : value = STR_TO_TRI [ value ] self . user_value = value self . _was_set = True self . _rec_invalidate ( ) return True
def wallet_contains ( self , wallet , account ) : """Check whether * * wallet * * contains * * account * * : param wallet : Wallet to check contains * * account * * : type wallet : str : param account : Account to check exists in * * wallet * * : type account : str : raises : : py : exc : ` nano . rpc . RPCException ` > > > rpc . wallet _ contains ( . . . wallet = " 000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F " , . . . account = " xrb _ 3e3j5tkog48pnny9dmfzj1r16pg8t1e76dz5tmac6iq689wyjfpi00000" True"""
wallet = self . _process_value ( wallet , 'wallet' ) account = self . _process_value ( account , 'account' ) payload = { "wallet" : wallet , "account" : account } resp = self . call ( 'wallet_contains' , payload ) return resp [ 'exists' ] == '1'
def add_initial ( initial_input ) : """Constructs a stateful object for handling vensim ' s ' Initial ' functionality Parameters initial _ input : basestring The expression which will be evaluated , and the first value of which returned Returns reference : basestring reference to the Initial object ` _ _ call _ _ ` method , which will return the first calculated value of ` initial _ input ` new _ structure : list list of element construction dictionaries for the builder to assemble"""
stateful = { 'py_name' : utils . make_python_identifier ( '_initial_%s' % initial_input ) [ 0 ] , 'real_name' : 'Smooth of %s' % initial_input , 'doc' : 'Returns the value taken on during the initialization phase' , 'py_expr' : 'functions.Initial(lambda: %s)' % ( initial_input ) , 'unit' : 'None' , 'lims' : 'None' , 'eqn' : 'None' , 'subs' : '' , 'kind' : 'stateful' , 'arguments' : '' } return "%s()" % stateful [ 'py_name' ] , [ stateful ]
def is_local ( self ) : """Whether this is a local variable . In general , a variable is * local * if its containing scope is a statement ( e . g . a block ) , or a function , given that the variable is not one of the function ' s parameters ."""
return ( isinstance ( self . scope , CodeStatement ) or ( isinstance ( self . scope , CodeFunction ) and self not in self . scope . parameters ) )
def select_one_user ( users ) : """Display the users returned by search api . : params users : API [ ' result ' ] [ ' userprofiles ' ] : return : a User object ."""
if len ( users ) == 1 : select_i = 0 else : table = PrettyTable ( [ 'Sequence' , 'Name' ] ) for i , user in enumerate ( users , 1 ) : table . add_row ( [ i , user [ 'nickname' ] ] ) click . echo ( table ) select_i = click . prompt ( 'Select one user' , type = int , default = 1 ) while select_i < 1 or select_i > len ( users ) : select_i = click . prompt ( 'Error Select! Select Again' , type = int ) user_id = users [ select_i - 1 ] [ 'userId' ] user_name = users [ select_i - 1 ] [ 'nickname' ] user = User ( user_id , user_name ) return user
def read_response ( self ) : """Read the response from a previously sent command"""
try : response = self . _parser . read_response ( ) except : self . disconnect ( ) raise if isinstance ( response , ResponseError ) : raise response # print ( response ) return response
def search_unique_identities ( db , term , source = None ) : """Look for unique identities . This function returns those unique identities which match with the given ' term ' . The term will be compated with name , email , username and source values of each identity . When ` source ` is given , this search will be only performed on identities linked to this source . : param db : database manater : param term : term to match with unique identities data : param source : search only on identities from this source : raises NotFoundError : raised when the given term is not found on any unique identity from the registry"""
uidentities = [ ] pattern = '%' + term + '%' if term else None with db . connect ( ) as session : query = session . query ( UniqueIdentity ) . join ( Identity ) . filter ( UniqueIdentity . uuid == Identity . uuid ) if source : query = query . filter ( Identity . source == source ) if pattern : query = query . filter ( Identity . name . like ( pattern ) | Identity . email . like ( pattern ) | Identity . username . like ( pattern ) | Identity . source . like ( pattern ) ) else : query = query . filter ( ( Identity . name == None ) | ( Identity . email == None ) | ( Identity . username == None ) | ( Identity . source == None ) ) uidentities = query . order_by ( UniqueIdentity . uuid ) . all ( ) if not uidentities : raise NotFoundError ( entity = term ) # Detach objects from the session session . expunge_all ( ) return uidentities
def new_post ( GITDIRECTORY = CONFIG [ 'output_to' ] , kind = KINDS [ 'writing' ] ) : # pragma : no coverage # noqa """This function should create a template for a new post with a title read from the user input . Most other fields should be defaults . TODO : update this function"""
title = input ( "Give the title of the post: " ) while ':' in title : title = input ( "Give the title of the post (':' not allowed): " ) author = CONFIG [ 'author' ] date = datetime . datetime . strftime ( datetime . datetime . now ( ) , '%Y-%m-%d' ) tags = input ( "Give the tags, separated by ', ':" ) published = 'yes' chronological = 'yes' summary = ( "summary: Type your summary here." ) # make file name fname = os . path . join ( os . getcwd ( ) , 'content' , kind [ 'name_plural' ] , datetime . datetime . strftime ( datetime . datetime . now ( ) , '%Y' ) , date + '-' + title . replace ( ' ' , '-' ) + '.markdown' ) # first post every year need to create a new directory if not os . path . exists ( os . path . dirname ( fname ) ) : os . makedirs ( os . path . dirname ( fname ) ) with open ( fname , 'w' ) as npost : npost . write ( '---\n' ) npost . write ( 'title: %s\n' % title ) npost . write ( 'author: %s\n' % author ) npost . write ( 'published: %s\n' % date ) npost . write ( 'tags: %s\n' % tags ) npost . write ( 'public: %s\n' % published ) npost . write ( 'chronological: %s\n' % chronological ) npost . write ( 'kind: %s\n' % kind [ 'name' ] ) npost . write ( '%s\n' % summary ) npost . write ( '---\n' ) os . system ( '%s %s' % ( CONFIG [ 'editor' ] , fname ) )
def fileopenbox ( msg = None , title = None , default = "*" , filetypes = None ) : """A dialog to get a file name . About the " default " argument The " default " argument specifies a filepath that ( normally ) contains one or more wildcards . fileopenbox will display only files that match the default filepath . If omitted , defaults to " * " ( all files in the current directory ) . WINDOWS EXAMPLE : : . . . default = " c : / myjunk / * . py " will open in directory c : \ myjunk \ and show all Python files . WINDOWS EXAMPLE : : . . . default = " c : / myjunk / test * . py " will open in directory c : \ myjunk \ and show all Python files whose names begin with " test " . Note that on Windows , fileopenbox automatically changes the path separator to the Windows path separator ( backslash ) . About the " filetypes " argument If specified , it should contain a list of items , where each item is either : : - a string containing a filemask # e . g . " * . txt " - a list of strings , where all of the strings except the last one are filemasks ( each beginning with " * . " , such as " * . txt " for text files , " * . py " for Python files , etc . ) . and the last string contains a filetype description EXAMPLE : : filetypes = [ " * . css " , [ " * . htm " , " * . html " , " HTML files " ] ] NOTE THAT If the filetypes list does not contain ( " All files " , " * " ) , it will be added . If the filetypes list does not contain a filemask that includes the extension of the " default " argument , it will be added . For example , if default = " * abc . py " and no filetypes argument was specified , then " * . py " will automatically be added to the filetypes argument . @ rtype : string or None @ return : the name of a file , or None if user chose to cancel @ arg msg : the msg to be displayed . @ arg title : the window title @ arg default : filepath with wildcards @ arg filetypes : filemasks that a user can choose , e . g . " * . txt " """
if sys . platform == 'darwin' : _bring_to_front ( ) localRoot = Tk ( ) localRoot . withdraw ( ) initialbase , initialfile , initialdir , filetypes = fileboxSetup ( default , filetypes ) # if initialfile contains no wildcards ; we don ' t want an # initial file . It won ' t be used anyway . # Also : if initialbase is simply " * " , we don ' t want an # initialfile ; it is not doing any useful work . if ( initialfile . find ( "*" ) < 0 ) and ( initialfile . find ( "?" ) < 0 ) : initialfile = None elif initialbase == "*" : initialfile = None f = tk_FileDialog . askopenfilename ( parent = localRoot , title = getFileDialogTitle ( msg , title ) , initialdir = initialdir , initialfile = initialfile , filetypes = filetypes ) localRoot . destroy ( ) if not f : return None return os . path . normpath ( f )
def _create_record ( self , rtype , name , content ) : """Create record . If it already exists , do nothing ."""
if not self . _list_records ( rtype , name , content ) : self . _update_records ( [ { } ] , { 'type' : rtype , 'hostname' : self . _relative_name ( name ) , 'destination' : content , 'priority' : self . _get_lexicon_option ( 'priority' ) , } ) LOGGER . debug ( 'create_record: %s' , True ) return True
def setHTML_from_file ( self , filename , args ) : """Définition d ' un texte pour le cord du message"""
# vérification d ' usage if not os . path . isfile ( filename ) : return False with open ( filename ) as fp : # Create a text / plain message self . mail_html = fp . read ( ) . format ( ** args ) # retour ok return True
def returnFees ( self ) : """Returns a dictionary of all fees that apply through the network Example output : . . code - block : : js { ' proposal _ create ' : { ' fee ' : 400000.0 } , ' asset _ publish _ feed ' : { ' fee ' : 1000.0 } , ' account _ create ' : { ' basic _ fee ' : 950000.0 , ' price _ per _ kbyte ' : 20000.0, ' premium _ fee ' : 400000.0 } , ' custom ' : { ' fee ' : 20000.0 } , ' asset _ fund _ fee _ pool ' : { ' fee ' : 20000.0 } , ' override _ transfer ' : { ' fee ' : 400000.0 } , ' fill _ order ' : { } , ' asset _ update ' : { ' price _ per _ kbyte ' : 20000.0 , ' fee ' : 200000.0 } , ' asset _ update _ feed _ producers ' : { ' fee ' : 100000.0 } , ' assert ' : { ' fee ' : 20000.0 } , ' committee _ member _ create ' : { ' fee ' : 100000.0 } }"""
from bitsharesbase . operations import operations r = { } obj , base = self . blockchain . rpc . get_objects ( [ "2.0.0" , "1.3.0" ] ) fees = obj [ "parameters" ] [ "current_fees" ] [ "parameters" ] scale = float ( obj [ "parameters" ] [ "current_fees" ] [ "scale" ] ) for f in fees : op_name = "unknown %d" % f [ 0 ] for name in operations : if operations [ name ] == f [ 0 ] : op_name = name fs = f [ 1 ] for _type in fs : fs [ _type ] = float ( fs [ _type ] ) * scale / 1e4 / 10 ** base [ "precision" ] r [ op_name ] = fs return r
def listdir ( self ) : """HACK around not having a beaver _ config stanza TODO : Convert this to a glob"""
ls = os . listdir ( self . _folder ) return [ x for x in ls if os . path . splitext ( x ) [ 1 ] [ 1 : ] == "log" ]
def _post ( self , xml_query ) : '''POST the request .'''
req = urllib2 . Request ( url = 'http://www.rcsb.org/pdb/rest/search' , data = xml_query ) f = urllib2 . urlopen ( req ) return f . read ( ) . strip ( )
def show_delvol_on_destroy ( name , kwargs = None , call = None ) : '''Do not delete all / specified EBS volumes upon instance termination CLI Example : . . code - block : : bash salt - cloud - a show _ delvol _ on _ destroy mymachine'''
if call != 'action' : raise SaltCloudSystemExit ( 'The show_delvol_on_destroy action must be called ' 'with -a or --action.' ) if not kwargs : kwargs = { } instance_id = kwargs . get ( 'instance_id' , None ) device = kwargs . get ( 'device' , None ) volume_id = kwargs . get ( 'volume_id' , None ) if instance_id is None : instance_id = _get_node ( name ) [ 'instanceId' ] params = { 'Action' : 'DescribeInstances' , 'InstanceId.1' : instance_id } data = aws . query ( params , location = get_location ( ) , provider = get_provider ( ) , opts = __opts__ , sigver = '4' ) blockmap = data [ 0 ] [ 'instancesSet' ] [ 'item' ] [ 'blockDeviceMapping' ] if not isinstance ( blockmap [ 'item' ] , list ) : blockmap [ 'item' ] = [ blockmap [ 'item' ] ] items = [ ] for idx , item in enumerate ( blockmap [ 'item' ] ) : device_name = item [ 'deviceName' ] if device is not None and device != device_name : continue if volume_id is not None and volume_id != item [ 'ebs' ] [ 'volumeId' ] : continue info = { 'device_name' : device_name , 'volume_id' : item [ 'ebs' ] [ 'volumeId' ] , 'deleteOnTermination' : item [ 'ebs' ] [ 'deleteOnTermination' ] } items . append ( info ) return items
def slug ( self ) : """Generate a short ( 7 - character ) cuid as a bytestring . While this is a convenient shorthand , this is much less likely to be unique and should not be relied on . Prefer full - size cuids where possible ."""
identifier = "" # use a truncated timestamp millis = int ( time . time ( ) * 1000 ) millis_string = _to_base36 ( millis ) identifier += millis_string [ - 2 : ] # use a truncated counter count = _pad ( _to_base36 ( self . counter ) , 1 ) identifier += count # use a truncated fingerprint identifier += self . fingerprint [ 0 ] identifier += self . fingerprint [ - 1 ] # use some truncated random data random_data = _random_block ( ) identifier += random_data [ - 2 : ] return identifier
def compile ( self , source , options = { } ) : """Compile stylus into css source : A string containing the stylus code options : A dictionary of arguments to pass to the compiler Returns a string of css resulting from the compilation"""
options = dict ( options ) if "paths" in options : options [ "paths" ] += self . paths else : options [ "paths" ] = self . paths if "compress" not in options : options [ "compress" ] = self . compress return self . context . call ( "compiler" , source , options , self . plugins , self . imports )
def _on_song_changed ( self , song ) : """播放列表 current _ song 发生变化后的回调 判断变化后的歌曲是否有效的 , 有效则播放 , 否则将它标记为无效歌曲 。 如果变化后的歌曲是 None , 则停止播放 。"""
logger . debug ( 'Player received song changed signal' ) if song is not None : logger . info ( 'Try to play song: %s' % song ) if song . url : self . play ( song . url ) else : self . _playlist . mark_as_bad ( song ) self . play_next ( ) else : self . stop ( ) logger . info ( 'No good song in player playlist anymore.' )
def hoverLeaveEvent ( self , event ) : """Processes the hovering information for this node . : param event | < QHoverEvent >"""
if self . _hoverSpot : if self . _hoverSpot . hoverLeaveEvent ( event ) : self . update ( ) self . _hoverSpot = None self . _hovered = False super ( XNode , self ) . setToolTip ( self . _toolTip ) super ( XNode , self ) . hoverLeaveEvent ( event )
def plot_scatter_matrix ( self , freq = None , title = None , figsize = ( 10 , 10 ) , ** kwargs ) : """Wrapper around pandas ' scatter _ matrix . Args : * freq ( str ) : Data frequency used for display purposes . Refer to pandas docs for valid freq strings . * figsize ( ( x , y ) ) : figure size * title ( str ) : Title if default not appropriate * kwargs : passed to pandas ' scatter _ matrix method"""
if title is None : title = self . _get_default_plot_title ( freq , 'Return Scatter Matrix' ) plt . figure ( ) ser = self . _get_series ( freq ) . to_returns ( ) . dropna ( ) pd . scatter_matrix ( ser , figsize = figsize , ** kwargs ) return plt . suptitle ( title )
def post ( self , bucket = None , key = None , uploads = missing , upload_id = None ) : """Upload a new object or start / complete a multipart upload . : param bucket : The bucket ( instance or id ) to get the object from . ( Default : ` ` None ` ` ) : param key : The file key . ( Default : ` ` None ` ` ) : param upload _ id : The upload ID . ( Default : ` ` None ` ` ) : returns : A Flask response ."""
if uploads is not missing : return self . multipart_init ( bucket , key ) elif upload_id is not None : return self . multipart_complete ( bucket , key , upload_id ) abort ( 403 )
def get_resource_bin_session ( self , proxy ) : """Gets the session for retrieving resource to bin mappings . arg : proxy ( osid . proxy . Proxy ) : a proxy return : ( osid . resource . ResourceBinSession ) - a ` ` ResourceBinSession ` ` raise : NullArgument - ` ` proxy ` ` is ` ` null ` ` raise : OperationFailed - unable to complete request raise : Unimplemented - ` ` supports _ resource _ bin ( ) ` ` is ` ` false ` ` * compliance : optional - - This method must be implemented if ` ` supports _ resource _ bin ( ) ` ` is ` ` true ` ` . *"""
if not self . supports_resource_bin ( ) : raise errors . Unimplemented ( ) # pylint : disable = no - member return sessions . ResourceBinSession ( proxy = proxy , runtime = self . _runtime )
def set_default_interface ( etree ) : """Sets the default interface that PyAMF will use to deal with XML entities ( both objects and blobs ) ."""
global types , ET , modules t = _get_etree_type ( etree ) _types = set ( types or [ ] ) _types . update ( [ t ] ) types = tuple ( _types ) modules [ t ] = etree old , ET = ET , etree return old
def remove ( release ) : '''Remove a specific version of the kernel . release The release number of an installed kernel . This must be the entire release number as returned by : py : func : ` ~ salt . modules . kernelpkg _ linux _ yum . list _ installed ` , not the package name . CLI Example : . . code - block : : bash salt ' * ' kernelpkg . remove 3.10.0-327 . el7'''
if release not in list_installed ( ) : raise CommandExecutionError ( 'Kernel release \'{0}\' is not installed' . format ( release ) ) if release == active ( ) : raise CommandExecutionError ( 'Active kernel cannot be removed' ) target = '{0}-{1}' . format ( _package_name ( ) , release ) log . info ( 'Removing kernel package %s' , target ) old = __salt__ [ 'pkg.list_pkgs' ] ( ) # Build the command string cmd = [ ] if salt . utils . systemd . has_scope ( __context__ ) and __salt__ [ 'config.get' ] ( 'systemd.scope' , True ) : cmd . extend ( [ 'systemd-run' , '--scope' ] ) cmd . extend ( [ _yum ( ) , '-y' , 'remove' , target ] ) # Execute the command out = __salt__ [ 'cmd.run_all' ] ( cmd , output_loglevel = 'trace' , python_shell = False ) # Look for the changes in installed packages __context__ . pop ( 'pkg.list_pkgs' , None ) new = __salt__ [ 'pkg.list_pkgs' ] ( ) ret = salt . utils . data . compare_dicts ( old , new ) # Look for command execution errors if out [ 'retcode' ] != 0 : raise CommandExecutionError ( 'Error occurred removing package(s)' , info = { 'errors' : [ out [ 'stderr' ] ] , 'changes' : ret } ) return { 'removed' : [ target ] }
def tdecode ( pkt , * args ) : """Run tshark to decode and display the packet . If no args defined uses - V"""
if not args : args = [ "-V" ] fname = get_temp_file ( ) wrpcap ( fname , [ pkt ] ) subprocess . call ( [ "tshark" , "-r" , fname ] + list ( args ) )
def findBracketBackward ( self , block , column , bracket ) : """Search for a needle and return ( block , column ) Raise ValueError , if not found NOTE this method ignores comments"""
if bracket in ( '(' , ')' ) : opening = '(' closing = ')' elif bracket in ( '[' , ']' ) : opening = '[' closing = ']' elif bracket in ( '{' , '}' ) : opening = '{' closing = '}' else : raise AssertionError ( 'Invalid bracket "%s"' % bracket ) depth = 1 for foundBlock , foundColumn , char in self . iterateCharsBackwardFrom ( block , column ) : if not self . _qpart . isComment ( foundBlock . blockNumber ( ) , foundColumn ) : if char == opening : depth = depth - 1 elif char == closing : depth = depth + 1 if depth == 0 : return foundBlock , foundColumn else : raise ValueError ( 'Not found' )
def _show_corners ( self , image , corners ) : """Show chessboard corners found in image ."""
temp = image cv2 . drawChessboardCorners ( temp , ( self . rows , self . columns ) , corners , True ) window_name = "Chessboard" cv2 . imshow ( window_name , temp ) if cv2 . waitKey ( 0 ) : cv2 . destroyWindow ( window_name )
def take ( self , indexer , axis = 1 , verify = True , convert = True ) : """Take items along any axis ."""
self . _consolidate_inplace ( ) indexer = ( np . arange ( indexer . start , indexer . stop , indexer . step , dtype = 'int64' ) if isinstance ( indexer , slice ) else np . asanyarray ( indexer , dtype = 'int64' ) ) n = self . shape [ axis ] if convert : indexer = maybe_convert_indices ( indexer , n ) if verify : if ( ( indexer == - 1 ) | ( indexer >= n ) ) . any ( ) : raise Exception ( 'Indices must be nonzero and less than ' 'the axis length' ) new_labels = self . axes [ axis ] . take ( indexer ) return self . reindex_indexer ( new_axis = new_labels , indexer = indexer , axis = axis , allow_dups = True )
def back_slash_to_front_converter ( string ) : """Replacing all \ in the str to / : param string : single string to modify : type string : str"""
try : if not string or not isinstance ( string , str ) : return string return string . replace ( '\\' , '/' ) except Exception : return string
def read ( self , name , pwd = None ) : """Return file bytes ( as a string ) for name ."""
with self . open ( name , "r" , pwd ) as fp : return fp . read ( )
def _decode_sense_packet ( self , version , packet ) : """Decode a sense packet into the list of sensors ."""
data = self . _sense_packet_to_data ( packet ) offset = 4 i = 0 datalen = len ( data ) - offset - 6 temp_count = int ( datalen / 2 ) temp = [ ] for i in range ( temp_count ) : temp_index = i * 2 + offset temp . append ( self . _decode_temp ( data [ temp_index ] , data [ temp_index + 1 ] ) ) self . _debug ( PROP_LOGLEVEL_DEBUG , "T: " + str ( temp ) ) for sensor in self . _sense_sensor : if ( sensor . sensor_type == PROP_SENSOR_TEMPERATURE ) : sensor . value = temp [ sensor . index ] elif ( sensor . sensor_type == PROP_SENSOR_RAW ) : sensor . value = packet self . _debug ( PROP_LOGLEVEL_DEBUG , str ( self ) )
def parsetime ( s ) : """Internal function to parse the time parses time in HH : MM : SS . mmm format . Returns : a four - tuple ` ` ( hours , minutes , seconds , milliseconds ) ` `"""
try : fields = s . split ( '.' ) subfields = fields [ 0 ] . split ( ':' ) H = int ( subfields [ 0 ] ) M = int ( subfields [ 1 ] ) S = int ( subfields [ 2 ] ) if len ( subfields ) > 3 : m = int ( subfields [ 3 ] ) else : m = 0 if len ( fields ) > 1 : m = int ( fields [ 1 ] ) return ( H , M , S , m ) except : raise ValueError ( "Invalid timestamp, must be in HH:MM:SS.mmm format: " + s )
def _notify ( self , func , * args , ** kw ) : """Internal helper . Calls the IStreamListener function ' func ' with the given args , guarding around errors ."""
for x in self . listeners : try : getattr ( x , func ) ( * args , ** kw ) except Exception : log . err ( )
def version_variants ( version ) : """Given an igraph version number , returns a list of possible version number variants to try when looking for a suitable nightly build of the C core to download from igraph . org ."""
result = [ version ] # Add trailing " . 0 " as needed to ensure that we have at least # major . minor . patch parts = version . split ( "." ) while len ( parts ) < 3 : parts . append ( "0" ) result . append ( "." . join ( parts ) ) return result
def retry_erroneous ( self , name , properties_update = None ) : # type : ( str , dict ) - > int """Removes the ERRONEOUS state of the given component , and retries a validation : param name : Name of the component to retry : param properties _ update : A dictionary to update the initial properties of the component : return : The new state of the component : raise ValueError : Invalid component name"""
with self . __instances_lock : try : stored_instance = self . __instances [ name ] except KeyError : raise ValueError ( "Unknown component instance '{0}'" . format ( name ) ) else : return stored_instance . retry_erroneous ( properties_update )
def super_lm_base ( ) : """Set of hyperparameters ."""
hparams = common_hparams . basic_params1 ( ) hparams . hidden_size = 512 hparams . moe_hidden_sizes = "512" hparams . batch_size = 16384 hparams . max_length = 0 # All hyperparameters ending in " dropout " are automatically set to 0.0 # when not in training mode . hparams . layer_prepostprocess_dropout = 0.0 hparams . symbol_dropout = 0.1 hparams . add_hparam ( "attention_dropout" , 0.0 ) hparams . label_smoothing = 0.0 hparams . clip_grad_norm = 0. # i . e . no gradient clipping hparams . optimizer = "Adafactor" hparams . learning_rate_decay_scheme = "noam" hparams . learning_rate = 0.1 hparams . learning_rate_warmup_steps = 8000 hparams . initializer_gain = 1.0 hparams . initializer = "uniform_unit_scaling" hparams . weight_decay = 0.0 hparams . shared_embedding_and_softmax_weights = False hparams . layer_preprocess_sequence = "n" hparams . layer_postprocess_sequence = "da" # we only want one data shard . hparams . no_data_parallelism = True # bypass the symbol modality so that we can use model parallelism . hparams . bottom = { "inputs" : modalities . identity_bottom , "targets" : modalities . identity_bottom , } hparams . top = { "targets" : modalities . identity_top , } hparams . add_hparam ( "filter_size" , 512 ) hparams . add_hparam ( "mix_fraction" , 0.5 ) # attention - related flags hparams . add_hparam ( "multihead_attention_num_heads" , 4 ) hparams . add_hparam ( "multihead_attention_key_channels" , 0 ) hparams . add_hparam ( "multihead_attention_value_channels" , 0 ) hparams . add_hparam ( "pos" , "timing" ) # timing , none hparams . add_hparam ( "layers" , ( "n,att,m,d,a," "n,ffn,m,d,a," ) * 4 + "n,ffn,d" ) # Number of model shards - each one has separate parameters . # Changing this number invalidates checkpoints . hparams . add_hparam ( "num_model_shards" , 8 ) hparams . add_hparam ( "diet_experts" , False ) return hparams
def fetch_objects ( self , oids ) : '''This methods is used to fetch models from a list of identifiers . Default implementation performs a bulk query on identifiers . Override this method to customize the objects retrieval .'''
objects = self . model . objects . in_bulk ( oids ) if len ( objects . keys ( ) ) != len ( oids ) : non_existants = set ( oids ) - set ( objects . keys ( ) ) msg = _ ( 'Unknown identifiers: {identifiers}' ) . format ( identifiers = ', ' . join ( str ( ne ) for ne in non_existants ) ) raise validators . ValidationError ( msg ) return [ objects [ id ] for id in oids ]