idx int64 0 63k | question stringlengths 61 4.03k | target stringlengths 6 1.23k |
|---|---|---|
47,000 | def add_segment ( self , name , model_expression = None , ytransform = 'default' ) : if not model_expression : if self . default_model_expr is None : raise ValueError ( 'No default model available, ' 'you must supply a model experssion.' ) model_expression = self . default_model_expr if ytransform == 'default' : ytransform = self . default_ytransform self . _group . add_model_from_params ( name , None , None , model_expression , ytransform ) logger . debug ( 'added segment {} to model {}' . format ( name , self . name ) ) | Add a new segment with its own model expression and ytransform . |
47,001 | def fit ( self , data , debug = False ) : data = util . apply_filter_query ( data , self . fit_filters ) unique = data [ self . segmentation_col ] . unique ( ) value_counts = data [ self . segmentation_col ] . value_counts ( ) gone = set ( self . _group . models ) - set ( unique ) for g in gone : del self . _group . models [ g ] for x in unique : if x not in self . _group . models and value_counts [ x ] > self . min_segment_size : self . add_segment ( x ) with log_start_finish ( 'fitting models in segmented model {}' . format ( self . name ) , logger ) : return self . _group . fit ( data , debug = debug ) | Fit each segment . Segments that have not already been explicitly added will be automatically added with default model and ytransform . |
47,002 | def columns_used ( self ) : return list ( tz . unique ( tz . concatv ( util . columns_in_filters ( self . fit_filters ) , util . columns_in_filters ( self . predict_filters ) , util . columns_in_formula ( self . default_model_expr ) , self . _group . columns_used ( ) , [ self . segmentation_col ] ) ) ) | Returns all the columns used across all models in the group for filtering and in the model expression . |
47,003 | def find_movers ( choosers , rates , rate_column ) : logger . debug ( 'start: find movers for relocation' ) relocation_rates = pd . Series ( np . zeros ( len ( choosers ) ) , index = choosers . index ) for _ , row in rates . iterrows ( ) : indexes = util . filter_table ( choosers , row , ignore = { rate_column } ) . index relocation_rates . loc [ indexes ] = row [ rate_column ] movers = relocation_rates . index [ relocation_rates > np . random . random ( len ( choosers ) ) ] logger . debug ( 'picked {} movers for relocation' . format ( len ( movers ) ) ) logger . debug ( 'finish: find movers for relocation' ) return movers | Returns an array of the indexes of the choosers that are slated to move . |
47,004 | def _calculate_adjustment ( lcm , choosers , alternatives , alt_segmenter , clip_change_low , clip_change_high , multiplier_func = None ) : logger . debug ( 'start: calculate supply and demand price adjustment ratio' ) demand = lcm . summed_probabilities ( choosers , alternatives ) demand = demand . groupby ( alt_segmenter . loc [ demand . index ] . values ) . sum ( ) supply = alt_segmenter . value_counts ( ) if multiplier_func is not None : multiplier , finished = multiplier_func ( demand , supply ) else : multiplier , finished = ( demand / supply ) , False multiplier = multiplier . clip ( clip_change_low , clip_change_high ) alts_muliplier = multiplier . loc [ alt_segmenter ] alts_muliplier . index = alt_segmenter . index logger . debug ( ( 'finish: calculate supply and demand price adjustment multiplier ' 'with mean multiplier {}' ) . format ( multiplier . mean ( ) ) ) return alts_muliplier , multiplier , finished | Calculate adjustments to prices to compensate for supply and demand effects . |
47,005 | def supply_and_demand ( lcm , choosers , alternatives , alt_segmenter , price_col , base_multiplier = None , clip_change_low = 0.75 , clip_change_high = 1.25 , iterations = 5 , multiplier_func = None ) : logger . debug ( 'start: calculating supply and demand price adjustment' ) alternatives = alternatives . copy ( ) if isinstance ( alt_segmenter , str ) : alt_segmenter = alternatives [ alt_segmenter ] elif isinstance ( alt_segmenter , np . array ) : alt_segmenter = pd . Series ( alt_segmenter , index = alternatives . index ) choosers , alternatives = lcm . apply_predict_filters ( choosers , alternatives ) alt_segmenter = alt_segmenter . loc [ alternatives . index ] if base_multiplier is not None : bm = base_multiplier . loc [ alt_segmenter ] bm . index = alt_segmenter . index alternatives [ price_col ] = alternatives [ price_col ] * bm base_multiplier = base_multiplier . copy ( ) for _ in range ( iterations ) : alts_muliplier , submarkets_multiplier , finished = _calculate_adjustment ( lcm , choosers , alternatives , alt_segmenter , clip_change_low , clip_change_high , multiplier_func = multiplier_func ) alternatives [ price_col ] = alternatives [ price_col ] * alts_muliplier if base_multiplier is None : base_multiplier = pd . Series ( np . ones ( len ( submarkets_multiplier ) ) , index = submarkets_multiplier . index ) base_multiplier *= submarkets_multiplier if finished : break logger . debug ( 'finish: calculating supply and demand price adjustment' ) return alternatives [ price_col ] , base_multiplier | Adjust real estate prices to compensate for supply and demand effects . |
47,006 | def _max_form ( f , colname ) : df = f . stack ( level = 0 ) [ [ colname ] ] . stack ( ) . unstack ( level = 1 ) . reset_index ( level = 1 , drop = True ) return df . idxmax ( axis = 1 ) | Assumes dataframe with hierarchical columns with first index equal to the use and second index equal to the attribute . |
47,007 | def keep_form_with_max_profit ( self , forms = None ) : f = self . feasibility if forms is not None : f = f [ forms ] if len ( f ) > 0 : mu = self . _max_form ( f , "max_profit" ) indexes = [ tuple ( x ) for x in mu . reset_index ( ) . values ] else : indexes = [ ] df = f . stack ( level = 0 ) . loc [ indexes ] df . index . names = [ "parcel_id" , "form" ] df = df . reset_index ( level = 1 ) return df | This converts the dataframe which shows all profitable forms to the form with the greatest profit so that more profitable forms outcompete less profitable forms . |
47,008 | def compute_units_to_build ( num_agents , num_units , target_vacancy ) : print ( "Number of agents: {:,}" . format ( num_agents ) ) print ( "Number of agent spaces: {:,}" . format ( int ( num_units ) ) ) assert target_vacancy < 1.0 target_units = int ( max ( num_agents / ( 1 - target_vacancy ) - num_units , 0 ) ) print ( "Current vacancy = {:.2f}" . format ( 1 - num_agents / float ( num_units ) ) ) print ( "Target vacancy = {:.2f}, target of new units = {:,}" . format ( target_vacancy , target_units ) ) return target_units | Compute number of units to build to match target vacancy . |
47,009 | def pick ( self , form , target_units , parcel_size , ave_unit_size , current_units , max_parcel_size = 200000 , min_unit_size = 400 , drop_after_build = True , residential = True , bldg_sqft_per_job = 400.0 , profit_to_prob_func = None ) : if len ( self . feasibility ) == 0 : return if form is None : df = self . feasibility elif isinstance ( form , list ) : df = self . keep_form_with_max_profit ( form ) else : df = self . feasibility [ form ] df = df [ df . max_profit_far > 0 ] ave_unit_size [ ave_unit_size < min_unit_size ] = min_unit_size df [ "ave_unit_size" ] = ave_unit_size df [ "parcel_size" ] = parcel_size df [ 'current_units' ] = current_units df = df [ df . parcel_size < max_parcel_size ] df [ 'residential_units' ] = ( df . residential_sqft / df . ave_unit_size ) . round ( ) df [ 'job_spaces' ] = ( df . non_residential_sqft / bldg_sqft_per_job ) . round ( ) if residential : df [ 'net_units' ] = df . residential_units - df . current_units else : df [ 'net_units' ] = df . job_spaces - df . current_units df = df [ df . net_units > 0 ] if len ( df ) == 0 : print ( "WARNING THERE ARE NO FEASIBLE BUILDING TO CHOOSE FROM" ) return print ( "Sum of net units that are profitable: {:,}" . format ( int ( df . net_units . sum ( ) ) ) ) if profit_to_prob_func : p = profit_to_prob_func ( df ) else : df [ 'max_profit_per_size' ] = df . max_profit / df . parcel_size p = df . max_profit_per_size . values / df . max_profit_per_size . sum ( ) if df . net_units . sum ( ) < target_units : print ( "WARNING THERE WERE NOT ENOUGH PROFITABLE UNITS TO" , "MATCH DEMAND" ) build_idx = df . index . values elif target_units <= 0 : build_idx = [ ] else : choices = np . random . choice ( df . index . values , size = min ( len ( df . index ) , target_units ) , replace = False , p = p ) tot_units = df . net_units . loc [ choices ] . values . cumsum ( ) ind = int ( np . searchsorted ( tot_units , target_units , side = "left" ) ) + 1 build_idx = choices [ : ind ] if drop_after_build : self . feasibility = self . feasibility . drop ( build_idx ) new_df = df . loc [ build_idx ] new_df . index . name = "parcel_id" return new_df . reset_index ( ) | Choose the buildings from the list that are feasible to build in order to match the specified demand . |
47,010 | def _analyze_root_causes ( self ) : causes = { } for a in self . anomalies : try : causes [ a ] = self . correlations [ a ] [ 0 ] except IndexError : raise exceptions . InvalidDataFormat ( 'luminol.luminol: dict correlations contains empty list.' ) self . causes = causes | Conduct root cause analysis . The first metric of the list is taken as the root cause right now . |
47,011 | def _sanity_check ( self ) : if len ( self . time_series_a ) < 2 or len ( self . time_series_b ) < 2 : raise exceptions . NotEnoughDataPoints ( 'luminol.Correlator: Too few data points!' ) | Check if the time series have more than two data points . |
47,012 | def _correlate ( self ) : a = self . algorithm ( ** self . algorithm_params ) self . correlation_result = a . run ( ) | Run correlation algorithm . |
47,013 | def _analyze ( self ) : output = defaultdict ( list ) output_by_name = defaultdict ( list ) scores = self . anomaly_detector . get_all_scores ( ) if self . anomalies : for anomaly in self . anomalies : metrix_scores = scores start_t , end_t = anomaly . get_time_window ( ) t = anomaly . exact_timestamp room = ( end_t - start_t ) / 2 if not room : room = 30 extended_start_t = start_t - room extended_end_t = end_t + room metrix_scores_cropped = metrix_scores . crop ( extended_start_t , extended_end_t ) while len ( metrix_scores_cropped ) < 2 : extended_start_t = extended_start_t - room extended_end_t = extended_end_t + room metrix_scores_cropped = metrix_scores . crop ( extended_start_t , extended_end_t ) for entry in self . related_metrices : try : entry_correlation_result = Correlator ( self . metrix , entry , time_period = ( extended_start_t , extended_end_t ) , use_anomaly_score = True ) . get_correlation_result ( ) record = extended_start_t , extended_end_t , entry_correlation_result . __dict__ , entry record_by_name = extended_start_t , extended_end_t , entry_correlation_result . __dict__ output [ t ] . append ( record ) output_by_name [ entry ] . append ( record_by_name ) except exceptions . NotEnoughDataPoints : pass self . output = output self . output_by_name = output_by_name | Analyzes if a matrix has anomalies . If any anomaly is found determine if the matrix correlates with any other matrixes . To be implemented . |
47,014 | def _set_scores ( self ) : anom_scores_ema = self . exp_avg_detector . run ( ) anom_scores_deri = self . derivative_detector . run ( ) anom_scores = { } for timestamp in anom_scores_ema . timestamps : anom_scores [ timestamp ] = max ( anom_scores_ema [ timestamp ] , anom_scores_ema [ timestamp ] * DEFAULT_DETECTOR_EMA_WEIGHT + anom_scores_deri [ timestamp ] * ( 1 - DEFAULT_DETECTOR_EMA_WEIGHT ) ) if anom_scores_ema [ timestamp ] > DEFAULT_DETECTOR_EMA_SIGNIFICANT : anom_scores [ timestamp ] = max ( anom_scores [ timestamp ] , anom_scores_deri [ timestamp ] ) self . anom_scores = TimeSeries ( self . _denoise_scores ( anom_scores ) ) | Set anomaly scores using a weighted sum . |
47,015 | def _compute_derivatives ( self ) : derivatives = [ ] for i , ( timestamp , value ) in enumerate ( self . time_series_items ) : if i > 0 : pre_item = self . time_series_items [ i - 1 ] pre_timestamp = pre_item [ 0 ] pre_value = pre_item [ 1 ] td = timestamp - pre_timestamp derivative = ( value - pre_value ) / td if td != 0 else value - pre_value derivative = abs ( derivative ) derivatives . append ( derivative ) if derivatives : derivatives . insert ( 0 , derivatives [ 0 ] ) self . derivatives = derivatives | Compute derivatives of the time series . |
47,016 | def _sanity_check ( self ) : windows = self . lag_window_size + self . future_window_size if ( not self . lag_window_size or not self . future_window_size or self . time_series_length < windows or windows < DEFAULT_BITMAP_MINIMAL_POINTS_IN_WINDOWS ) : raise exceptions . NotEnoughDataPoints if self . lag_window_size > DEFAULT_BITMAP_MAXIMAL_POINTS_IN_WINDOWS : self . lag_window_size = DEFAULT_BITMAP_MAXIMAL_POINTS_IN_WINDOWS if self . future_window_size > DEFAULT_BITMAP_MAXIMAL_POINTS_IN_WINDOWS : self . future_window_size = DEFAULT_BITMAP_MAXIMAL_POINTS_IN_WINDOWS | Check if there are enough data points . |
47,017 | def _generate_SAX ( self ) : sections = { } self . value_min = self . time_series . min ( ) self . value_max = self . time_series . max ( ) section_height = ( self . value_max - self . value_min ) / self . precision for section_number in range ( self . precision ) : sections [ section_number ] = self . value_min + section_number * section_height self . sax = '' . join ( self . _generate_SAX_single ( sections , value ) for value in self . time_series . values ) | Generate SAX representation for all values of the time series . |
47,018 | def _set_scores ( self ) : anom_scores = { } self . _generate_SAX ( ) self . _construct_all_SAX_chunk_dict ( ) length = self . time_series_length lws = self . lag_window_size fws = self . future_window_size for i , timestamp in enumerate ( self . time_series . timestamps ) : if i < lws or i > length - fws : anom_scores [ timestamp ] = 0 else : anom_scores [ timestamp ] = self . _compute_anom_score_between_two_windows ( i ) self . anom_scores = TimeSeries ( self . _denoise_scores ( anom_scores ) ) | Compute anomaly scores for the time series by sliding both lagging window and future window . |
47,019 | def _detect_correlation ( self ) : correlations = [ ] shifted_correlations = [ ] self . time_series_a . normalize ( ) self . time_series_b . normalize ( ) a , b = self . time_series_a . align ( self . time_series_b ) a_values , b_values = a . values , b . values a_avg , b_avg = a . average ( ) , b . average ( ) a_stdev , b_stdev = a . stdev ( ) , b . stdev ( ) n = len ( a ) denom = a_stdev * b_stdev * n allowed_shift_step = self . _find_allowed_shift ( a . timestamps ) if allowed_shift_step : shift_upper_bound = allowed_shift_step shift_lower_bound = - allowed_shift_step else : shift_upper_bound = 1 shift_lower_bound = 0 for delay in range ( shift_lower_bound , shift_upper_bound ) : delay_in_seconds = a . timestamps [ abs ( delay ) ] - a . timestamps [ 0 ] if delay < 0 : delay_in_seconds = - delay_in_seconds s = 0 for i in range ( n ) : j = i + delay if j < 0 or j >= n : continue else : s += ( ( a_values [ i ] - a_avg ) * ( b_values [ j ] - b_avg ) ) r = s / denom if denom != 0 else s correlations . append ( [ delay_in_seconds , r ] ) if self . max_shift_milliseconds : shifted_correlations . append ( r * ( 1 + float ( delay_in_seconds ) / self . max_shift_milliseconds * self . shift_impact ) ) else : shifted_correlations . append ( r ) max_correlation = list ( max ( correlations , key = lambda k : k [ 1 ] ) ) max_shifted_correlation = max ( shifted_correlations ) max_correlation . append ( max_shifted_correlation ) self . correlation_result = CorrelationResult ( * max_correlation ) | Detect correlation by computing correlation coefficients for all allowed shift steps then take the maximum . |
47,020 | def _compute_anom_data_using_window ( self ) : anom_scores = { } values = self . time_series . values stdev = numpy . std ( values ) for i , ( timestamp , value ) in enumerate ( self . time_series_items ) : if i < self . lag_window_size : anom_score = self . _compute_anom_score ( values [ : i + 1 ] , value ) else : anom_score = self . _compute_anom_score ( values [ i - self . lag_window_size : i + 1 ] , value ) if stdev : anom_scores [ timestamp ] = anom_score / stdev else : anom_scores [ timestamp ] = anom_score self . anom_scores = TimeSeries ( self . _denoise_scores ( anom_scores ) ) | Compute anomaly scores using a lagging window . |
47,021 | def _compute_anom_data_decay_all ( self ) : anom_scores = { } values = self . time_series . values ema = utils . compute_ema ( self . smoothing_factor , values ) stdev = numpy . std ( values ) for i , ( timestamp , value ) in enumerate ( self . time_series_items ) : anom_score = abs ( ( value - ema [ i ] ) / stdev ) if stdev else value - ema [ i ] anom_scores [ timestamp ] = anom_score self . anom_scores = TimeSeries ( self . _denoise_scores ( anom_scores ) ) | Compute anomaly scores using a lagging window covering all the data points before . |
47,022 | def _generic_binary_op ( self , other , op ) : output = { } if isinstance ( other , TimeSeries ) : for key , value in self . items ( ) : if key in other : try : result = op ( value , other [ key ] ) if result is NotImplemented : other_type = type ( other [ key ] ) other_op = vars ( other_type ) . get ( op . __name__ ) if other_op : output [ key ] = other_op ( other_type ( value ) , other [ key ] ) else : output [ key ] = result except ZeroDivisionError : continue else : for key , value in self . items ( ) : try : result = op ( value , other ) if result is NotImplemented : other_type = type ( other ) other_op = vars ( other_type ) . get ( op . __name__ ) if other_op : output [ key ] = other_op ( other_type ( value ) , other ) else : output [ key ] = result except ZeroDivisionError : continue if output : return TimeSeries ( output ) else : raise ValueError ( 'TimeSeries data was empty or invalid.' ) | Perform the method operation specified in the op parameter on the values within the instance s time series values and either another time series or a constant number value . |
47,023 | def _get_value_type ( self , other ) : if self . values : return type ( self . values [ 0 ] ) elif isinstance ( other , TimeSeries ) and other . values : return type ( other . values [ 0 ] ) else : raise ValueError ( 'Cannot perform arithmetic on empty time series.' ) | Get the object type of the value within the values portion of the time series . |
47,024 | def smooth ( self , smoothing_factor ) : forward_smooth = { } backward_smooth = { } output = { } if self : pre = self . values [ 0 ] next = self . values [ - 1 ] for key , value in self . items ( ) : forward_smooth [ key ] = smoothing_factor * pre + ( 1 - smoothing_factor ) * value pre = forward_smooth [ key ] for key , value in reversed ( self . items ( ) ) : backward_smooth [ key ] = smoothing_factor * next + ( 1 - smoothing_factor ) * value next = backward_smooth [ key ] for key in forward_smooth . keys ( ) : output [ key ] = ( forward_smooth [ key ] + backward_smooth [ key ] ) / 2 return TimeSeries ( output ) | return a new time series which is a exponential smoothed version of the original data series . soomth forward once backward once and then take the average . |
47,025 | def add_offset ( self , offset ) : self . timestamps = [ ts + offset for ts in self . timestamps ] | Return a new time series with all timestamps incremented by some offset . |
47,026 | def normalize ( self ) : maximum = self . max ( ) if maximum : self . values = [ value / maximum for value in self . values ] | Return a new time series with all values normalized to 0 to 1 . |
47,027 | def crop ( self , start_timestamp , end_timestamp ) : output = { } for key , value in self . items ( ) : if key >= start_timestamp and key <= end_timestamp : output [ key ] = value if output : return TimeSeries ( output ) else : raise ValueError ( 'TimeSeries data was empty or invalid.' ) | Return a new TimeSeries object contains all the timstamps and values within the specified range . |
47,028 | def average ( self , default = None ) : return numpy . asscalar ( numpy . average ( self . values ) ) if self . values else default | Calculate the average value over the time series . |
47,029 | def median ( self , default = None ) : return numpy . asscalar ( numpy . median ( self . values ) ) if self . values else default | Calculate the median value over the time series . |
47,030 | def max ( self , default = None ) : return numpy . asscalar ( numpy . max ( self . values ) ) if self . values else default | Calculate the maximum value over the time series . |
47,031 | def min ( self , default = None ) : return numpy . asscalar ( numpy . min ( self . values ) ) if self . values else default | Calculate the minimum value over the time series . |
47,032 | def percentile ( self , n , default = None ) : return numpy . asscalar ( numpy . percentile ( self . values , n ) ) if self . values else default | Calculate the Nth Percentile value over the time series . |
47,033 | def stdev ( self , default = None ) : return numpy . asscalar ( numpy . std ( self . values ) ) if self . values else default | Calculate the standard deviation of the time series . |
47,034 | def sum ( self , default = None ) : return numpy . asscalar ( numpy . sum ( self . values ) ) if self . values else default | Calculate the sum of all the values in the times series . |
47,035 | def _detect_anomalies ( self ) : anom_scores = self . anom_scores max_anom_score = anom_scores . max ( ) anomalies = [ ] if max_anom_score : threshold = self . threshold or max_anom_score * self . score_percent_threshold intervals = [ ] start , end = None , None for timestamp , value in anom_scores . iteritems ( ) : if value > threshold : end = timestamp if not start : start = timestamp elif start and end is not None : intervals . append ( [ start , end ] ) start = None end = None if start is not None : intervals . append ( [ start , end ] ) for interval_start , interval_end in intervals : interval_series = anom_scores . crop ( interval_start , interval_end ) self . refine_algorithm_params [ 'time_series' ] = interval_series refine_algorithm = self . refine_algorithm ( ** self . refine_algorithm_params ) scores = refine_algorithm . run ( ) max_refine_score = scores . max ( ) max_refine_timestamp = scores . timestamps [ scores . values . index ( max_refine_score ) ] anomaly = Anomaly ( interval_start , interval_end , interval_series . max ( ) , max_refine_timestamp ) anomalies . append ( anomaly ) self . anomalies = anomalies | Detect anomalies using a threshold on anomaly scores . |
47,036 | def handle_response ( response ) : if response . status_code < 400 : return cls = _status_to_exception_type . get ( response . status_code , HttpError ) kwargs = { 'code' : response . status_code , 'method' : response . request . method , 'url' : response . request . url , 'details' : response . text , } if response . headers and 'retry-after' in response . headers : kwargs [ 'retry_after' ] = response . headers . get ( 'retry-after' ) raise cls ( ** kwargs ) | Given a requests . Response object throw the appropriate exception if applicable . |
47,037 | def create ( self , data , ** kwargs ) : self . client . post ( self . url , data = data ) | Create classifitions for specific entity |
47,038 | def create ( self , ** kwargs ) : raise exceptions . MethodNotImplemented ( method = self . create , url = self . url , details = 'GUID cannot be duplicated, to create a new GUID use the relationship resource' ) | Raise error since guid cannot be duplicated |
47,039 | def normalize_underscore_case ( name ) : normalized = name . lower ( ) normalized = re . sub ( r'_(\w)' , lambda match : ' ' + match . group ( 1 ) . upper ( ) , normalized ) return normalized [ 0 ] . upper ( ) + normalized [ 1 : ] | Normalize an underscore - separated descriptor to something more readable . |
47,040 | def normalize_camel_case ( name ) : normalized = re . sub ( '([a-z])([A-Z])' , lambda match : ' ' . join ( [ match . group ( 1 ) , match . group ( 2 ) ] ) , name ) return normalized [ 0 ] . upper ( ) + normalized [ 1 : ] | Normalize a camelCase descriptor to something more readable . |
47,041 | def version_tuple ( version ) : if isinstance ( version , str ) : return tuple ( int ( x ) for x in version . split ( '.' ) ) elif isinstance ( version , tuple ) : return version else : raise ValueError ( "Invalid version: %s" % version ) | Convert a version string or tuple to a tuple . |
47,042 | def version_str ( version ) : if isinstance ( version , str ) : return version elif isinstance ( version , tuple ) : return '.' . join ( [ str ( int ( x ) ) for x in version ] ) else : raise ValueError ( "Invalid version: %s" % version ) | Convert a version tuple or string to a string . |
47,043 | def generate_http_basic_token ( username , password ) : token = base64 . b64encode ( '{}:{}' . format ( username , password ) . encode ( 'utf-8' ) ) . decode ( 'utf-8' ) return token | Generates a HTTP basic token from username and password |
47,044 | def identifier ( self ) : if self . primary_key not in self . _data : return 'Unknown' return str ( self . _data [ self . primary_key ] ) | These models have server - generated identifiers . |
47,045 | def url ( self ) : if self . parent is None : pieces = [ self . client . base_url , 'api' , 'atlas' , 'v2' ] else : pieces = [ self . parent . url ] pieces . append ( self . model_class . path ) return '/' . join ( pieces ) | The url for this collection . |
47,046 | def inflate ( self ) : if not self . _is_inflated : self . check_version ( ) for k , v in self . _filter . items ( ) : if '[' in v : self . _filter [ k ] = ast . literal_eval ( v ) self . load ( self . client . get ( self . url , params = self . _filter ) ) self . _is_inflated = True return self | Load the collection from the server if necessary . |
47,047 | def load ( self , response ) : self . _models = [ ] if isinstance ( response , dict ) : for key in response . keys ( ) : model = self . model_class ( self , href = '' ) model . load ( response [ key ] ) self . _models . append ( model ) else : for item in response : model = self . model_class ( self , href = item . get ( 'href' ) ) model . load ( item ) self . _models . append ( model ) | Parse the GET response for the collection . |
47,048 | def create ( self , * args , ** kwargs ) : href = self . url if len ( args ) == 1 : kwargs [ self . model_class . primary_key ] = args [ 0 ] href = '/' . join ( [ href , args [ 0 ] ] ) model = self . model_class ( self , href = href . replace ( 'classifications/' , 'classification/' ) , data = kwargs ) model . create ( ** kwargs ) self . _models . append ( model ) return model | Add a resource to this collection . |
47,049 | def update ( self , ** kwargs ) : self . inflate ( ) for model in self . _models : model . update ( ** kwargs ) return self | Update all resources in this collection . |
47,050 | def delete ( self , ** kwargs ) : self . inflate ( ) for model in self . _models : model . delete ( ** kwargs ) return | Delete all resources in this collection . |
47,051 | def wait ( self , ** kwargs ) : if self . request : self . request . wait ( ** kwargs ) self . request = None return self . inflate ( ) | Wait until any pending asynchronous requests are finished for this collection . |
47,052 | def url ( self ) : if self . _href is not None : return self . _href if self . identifier : path = '/' . join ( [ self . parent . url . replace ( 'classifications/' , 'classficiation/' ) , self . identifier ] ) return path raise exceptions . ClientError ( "Not able to determine object URL" ) | Gets the url for the resource this model represents . |
47,053 | def inflate ( self ) : if not self . _is_inflated : if self . _is_inflating : msg = ( "There is not enough data to inflate this object. " "Need either an href: {} or a {}: {}" ) msg = msg . format ( self . _href , self . primary_key , self . _data . get ( self . primary_key ) ) raise exceptions . ClientError ( msg ) self . _is_inflating = True try : params = self . searchParameters if hasattr ( self , 'searchParameters' ) else { } self . load ( self . client . request ( self . method , self . url , ** params ) ) except Exception : self . load ( self . _data ) self . _is_inflated = True self . _is_inflating = False return self | Load the resource from the server if not already loaded . |
47,054 | def load ( self , response ) : if 'href' in response : self . _href = response . pop ( 'href' ) if self . data_key and self . data_key in response : self . _data . update ( response . pop ( self . data_key ) ) for rel in [ x for x in self . relationships if x in response and response [ x ] ] : rel_class = self . relationships [ rel ] collection = rel_class . collection_class ( self . client , rel_class , parent = self ) self . _relationship_cache [ rel ] = collection ( response [ rel ] ) else : self . _data . update ( response ) | The load method parses the raw JSON response from the server . |
47,055 | def delete ( self , ** kwargs ) : self . method = 'delete' if len ( kwargs ) > 0 : self . load ( self . client . delete ( self . url , params = kwargs ) ) else : self . load ( self . client . delete ( self . url ) ) self . parent . remove ( self ) return | Delete a resource by issuing a DELETE http request against it . |
47,056 | def publish ( obj , event , event_state , ** kwargs ) : if len ( EVENT_HANDLERS ) == 0 : return if inspect . isclass ( obj ) : pub_cls = obj else : pub_cls = obj . __class__ potential = [ x . __name__ for x in inspect . getmro ( pub_cls ) ] fallbacks = None callbacks = [ ] for cls in potential : event_key = '.' . join ( [ cls , event , event_state ] ) backup_key = '.' . join ( [ cls , event , states . ANY ] ) if event_key in EVENT_HANDLERS : callbacks = EVENT_HANDLERS [ event_key ] break elif fallbacks is None and backup_key in EVENT_HANDLERS : fallbacks = EVENT_HANDLERS [ backup_key ] if fallbacks is not None : callbacks = fallbacks for callback in callbacks : callback ( obj , ** kwargs ) return | Publish an event from an object . |
47,057 | def subscribe ( obj , event , callback , event_state = None ) : if inspect . isclass ( obj ) : cls = obj . __name__ else : cls = obj . __class__ . __name__ if event_state is None : event_state = states . ANY event_key = '.' . join ( [ cls , event , event_state ] ) if event_key not in EVENT_HANDLERS : EVENT_HANDLERS [ event_key ] = [ ] EVENT_HANDLERS [ event_key ] . append ( callback ) return | Subscribe an event from an class . |
47,058 | def clean_name ( self , suffix = True , prefix = False , middle = False , multi = False ) : "return cleared version of the business name" name = self . business_name name = self . string_stripper ( name ) loname = name . lower ( ) for item in suffix_sort : if suffix : if loname . endswith ( " " + item ) : start = loname . find ( item ) end = len ( item ) name = name [ 0 : - end - 1 ] name = self . string_stripper ( name ) if multi == False : break if prefix : if loname . startswith ( item + ' ' ) : name = name [ len ( item ) + 1 : ] if multi == False : break if middle : term = ' ' + item + ' ' if term in loname : start = loname . find ( term ) end = start + len ( term ) name = name [ : start ] + " " + name [ end : ] if multi == False : break return self . string_stripper ( name ) | return cleared version of the business name |
47,059 | def _add_nodes ( self ) : for n , atom in enumerate ( self . ast . select ( 'atom' ) ) : self . add_node ( n , atom = atom ) self . _atom_indices [ id ( atom ) ] = n | Add all atoms in the SMARTS string as nodes in the graph . |
47,060 | def _add_edges ( self , ast_node , trunk = None ) : atom_indices = self . _atom_indices for atom in ast_node . tail : if atom . head == 'atom' : atom_idx = atom_indices [ id ( atom ) ] if atom . is_first_kid and atom . parent ( ) . head == 'branch' : trunk_idx = atom_indices [ id ( trunk ) ] self . add_edge ( atom_idx , trunk_idx ) if not atom . is_last_kid : if atom . next_kid . head == 'atom' : next_idx = atom_indices [ id ( atom . next_kid ) ] self . add_edge ( atom_idx , next_idx ) elif atom . next_kid . head == 'branch' : trunk = atom else : return elif atom . head == 'branch' : self . _add_edges ( atom , trunk ) | Add all bonds in the SMARTS string as edges in the graph . |
47,061 | def _add_label_edges ( self ) : labels = self . ast . select ( 'atom_label' ) if not labels : return label_digits = defaultdict ( list ) for label in labels : digits = list ( label . tail [ 0 ] ) for digit in digits : label_digits [ digit ] . append ( label . parent ( ) ) for label , ( atom1 , atom2 ) in label_digits . items ( ) : atom1_idx = self . _atom_indices [ id ( atom1 ) ] atom2_idx = self . _atom_indices [ id ( atom2 ) ] self . add_edge ( atom1_idx , atom2_idx ) | Add edges between all atoms with the same atom_label in rings . |
47,062 | def find_matches ( self , topology ) : ring_tokens = [ 'ring_size' , 'ring_count' ] has_ring_rules = any ( self . ast . select ( token ) for token in ring_tokens ) _prepare_atoms ( topology , compute_cycles = has_ring_rules ) top_graph = nx . Graph ( ) top_graph . add_nodes_from ( ( ( a . index , { 'atom' : a } ) for a in topology . atoms ( ) ) ) top_graph . add_edges_from ( ( ( b [ 0 ] . index , b [ 1 ] . index ) for b in topology . bonds ( ) ) ) if self . _graph_matcher is None : atom = nx . get_node_attributes ( self , name = 'atom' ) [ 0 ] if len ( atom . select ( 'atom_symbol' ) ) == 1 and not atom . select ( 'not_expression' ) : try : element = atom . select ( 'atom_symbol' ) . strees [ 0 ] . tail [ 0 ] except IndexError : try : atomic_num = atom . select ( 'atomic_num' ) . strees [ 0 ] . tail [ 0 ] element = pt . Element [ int ( atomic_num ) ] except IndexError : element = None else : element = None self . _graph_matcher = SMARTSMatcher ( top_graph , self , node_match = self . _node_match , element = element ) matched_atoms = set ( ) for mapping in self . _graph_matcher . subgraph_isomorphisms_iter ( ) : mapping = { node_id : atom_id for atom_id , node_id in mapping . items ( ) } atom_index = mapping [ 0 ] if atom_index not in matched_atoms : matched_atoms . add ( atom_index ) yield atom_index | Return sets of atoms that match this SMARTS pattern in a topology . |
47,063 | def candidate_pairs_iter ( self ) : G2_nodes = self . G2_nodes T1_inout = set ( self . inout_1 . keys ( ) ) - set ( self . core_1 . keys ( ) ) T2_inout = set ( self . inout_2 . keys ( ) ) - set ( self . core_2 . keys ( ) ) if T1_inout and T2_inout : for node in T1_inout : yield node , min ( T2_inout ) else : other_node = min ( G2_nodes - set ( self . core_2 ) ) host_nodes = self . valid_nodes if other_node == 0 else self . G1 . nodes ( ) for node in host_nodes : if node not in self . core_1 : yield node , other_node | Iterator over candidate pairs of nodes in G1 and G2 . |
47,064 | def find_atomtypes ( topology , forcefield , max_iter = 10 ) : rules = _load_rules ( forcefield ) subrules = dict ( ) system_elements = { a . element . symbol for a in topology . atoms ( ) } for key , val in rules . items ( ) : atom = val . node [ 0 ] [ 'atom' ] if len ( atom . select ( 'atom_symbol' ) ) == 1 and not atom . select ( 'not_expression' ) : try : element = atom . select ( 'atom_symbol' ) . strees [ 0 ] . tail [ 0 ] except IndexError : try : atomic_num = atom . select ( 'atomic_num' ) . strees [ 0 ] . tail [ 0 ] element = pt . Element [ int ( atomic_num ) ] except IndexError : element = None else : element = None if element is None or element in system_elements : subrules [ key ] = val rules = subrules _iterate_rules ( rules , topology , max_iter = max_iter ) _resolve_atomtypes ( topology ) | Determine atomtypes for all atoms . |
47,065 | def _load_rules ( forcefield ) : rules = dict ( ) for rule_name , smarts in forcefield . atomTypeDefinitions . items ( ) : overrides = forcefield . atomTypeOverrides . get ( rule_name ) if overrides is not None : overrides = set ( overrides ) else : overrides = set ( ) rules [ rule_name ] = SMARTSGraph ( smarts_string = smarts , parser = forcefield . parser , name = rule_name , overrides = overrides ) return rules | Load atomtyping rules from a forcefield into SMARTSGraphs . |
47,066 | def _iterate_rules ( rules , topology , max_iter ) : atoms = list ( topology . atoms ( ) ) for _ in range ( max_iter ) : max_iter -= 1 found_something = False for rule in rules . values ( ) : for match_index in rule . find_matches ( topology ) : atom = atoms [ match_index ] if rule . name not in atom . whitelist : atom . whitelist . add ( rule . name ) atom . blacklist |= rule . overrides found_something = True if not found_something : break else : warn ( "Reached maximum iterations. Something probably went wrong." ) | Iteratively run all the rules until the white - and backlists converge . |
47,067 | def _resolve_atomtypes ( topology ) : for atom in topology . atoms ( ) : atomtype = [ rule_name for rule_name in atom . whitelist - atom . blacklist ] if len ( atomtype ) == 1 : atom . id = atomtype [ 0 ] elif len ( atomtype ) > 1 : raise FoyerError ( "Found multiple types for atom {} ({}): {}." . format ( atom . index , atom . element . name , atomtype ) ) else : raise FoyerError ( "Found no types for atom {} ({})." . format ( atom . index , atom . element . name ) ) | Determine the final atomtypes from the white - and blacklists . |
47,068 | def generate_topology ( non_omm_topology , non_element_types = None , residues = None ) : if non_element_types is None : non_element_types = set ( ) if isinstance ( non_omm_topology , pmd . Structure ) : return _topology_from_parmed ( non_omm_topology , non_element_types ) elif has_mbuild : mb = import_ ( 'mbuild' ) if ( non_omm_topology , mb . Compound ) : pmdCompoundStructure = non_omm_topology . to_parmed ( residues = residues ) return _topology_from_parmed ( pmdCompoundStructure , non_element_types ) else : raise FoyerError ( 'Unknown topology format: {}\n' 'Supported formats are: ' '"parmed.Structure", ' '"mbuild.Compound", ' '"openmm.app.Topology"' . format ( non_omm_topology ) ) | Create an OpenMM Topology from another supported topology structure . |
47,069 | def _topology_from_parmed ( structure , non_element_types ) : topology = app . Topology ( ) residues = dict ( ) for pmd_residue in structure . residues : chain = topology . addChain ( ) omm_residue = topology . addResidue ( pmd_residue . name , chain ) residues [ pmd_residue ] = omm_residue atoms = dict ( ) for pmd_atom in structure . atoms : name = pmd_atom . name if pmd_atom . name in non_element_types : element = non_element_types [ pmd_atom . name ] else : if ( isinstance ( pmd_atom . atomic_number , int ) and pmd_atom . atomic_number != 0 ) : element = elem . Element . getByAtomicNumber ( pmd_atom . atomic_number ) else : element = elem . Element . getBySymbol ( pmd_atom . name ) omm_atom = topology . addAtom ( name , element , residues [ pmd_atom . residue ] ) atoms [ pmd_atom ] = omm_atom omm_atom . bond_partners = [ ] for bond in structure . bonds : atom1 = atoms [ bond . atom1 ] atom2 = atoms [ bond . atom2 ] topology . addBond ( atom1 , atom2 ) atom1 . bond_partners . append ( atom2 ) atom2 . bond_partners . append ( atom1 ) if structure . box_vectors and np . any ( [ x . _value for x in structure . box_vectors ] ) : topology . setPeriodicBoxVectors ( structure . box_vectors ) positions = structure . positions return topology , positions | Convert a ParmEd Structure to an OpenMM Topology . |
47,070 | def _topology_from_residue ( res ) : topology = app . Topology ( ) chain = topology . addChain ( ) new_res = topology . addResidue ( res . name , chain ) atoms = dict ( ) for res_atom in res . atoms ( ) : topology_atom = topology . addAtom ( name = res_atom . name , element = res_atom . element , residue = new_res ) atoms [ res_atom ] = topology_atom topology_atom . bond_partners = [ ] for bond in res . bonds ( ) : atom1 = atoms [ bond . atom1 ] atom2 = atoms [ bond . atom2 ] topology . addBond ( atom1 , atom2 ) atom1 . bond_partners . append ( atom2 ) atom2 . bond_partners . append ( atom1 ) return topology | Converts a openmm . app . Topology . Residue to openmm . app . Topology . |
47,071 | def _check_independent_residues ( topology ) : for res in topology . residues ( ) : atoms_in_residue = set ( [ atom for atom in res . atoms ( ) ] ) bond_partners_in_residue = [ item for sublist in [ atom . bond_partners for atom in res . atoms ( ) ] for item in sublist ] if not bond_partners_in_residue : continue if set ( atoms_in_residue ) != set ( bond_partners_in_residue ) : return False return True | Check to see if residues will constitute independent graphs . |
47,072 | def _update_atomtypes ( unatomtyped_topology , res_name , prototype ) : for res in unatomtyped_topology . residues ( ) : if res . name == res_name : for old_atom , new_atom_id in zip ( [ atom for atom in res . atoms ( ) ] , [ atom . id for atom in prototype . atoms ( ) ] ) : old_atom . id = new_atom_id | Update atomtypes in residues in a topology using a prototype topology . |
47,073 | def registerAtomType ( self , parameters ) : name = parameters [ 'name' ] if name in self . _atomTypes : raise ValueError ( 'Found multiple definitions for atom type: ' + name ) atom_class = parameters [ 'class' ] mass = _convertParameterToNumber ( parameters [ 'mass' ] ) element = None if 'element' in parameters : element , custom = self . _create_element ( parameters [ 'element' ] , mass ) if custom : self . non_element_types [ element . symbol ] = element self . _atomTypes [ name ] = self . __class__ . _AtomType ( name , atom_class , mass , element ) if atom_class in self . _atomClasses : type_set = self . _atomClasses [ atom_class ] else : type_set = set ( ) self . _atomClasses [ atom_class ] = type_set type_set . add ( name ) self . _atomClasses [ '' ] . add ( name ) name = parameters [ 'name' ] if 'def' in parameters : self . atomTypeDefinitions [ name ] = parameters [ 'def' ] if 'overrides' in parameters : overrides = set ( atype . strip ( ) for atype in parameters [ 'overrides' ] . split ( "," ) ) if overrides : self . atomTypeOverrides [ name ] = overrides if 'des' in parameters : self . atomTypeDesc [ name ] = parameters [ 'desc' ] if 'doi' in parameters : dois = set ( doi . strip ( ) for doi in parameters [ 'doi' ] . split ( ',' ) ) self . atomTypeRefs [ name ] = dois | Register a new atom type . |
47,074 | def run_atomtyping ( self , topology , use_residue_map = True ) : if use_residue_map : independent_residues = _check_independent_residues ( topology ) if independent_residues : residue_map = dict ( ) for res in topology . residues ( ) : if res . name not in residue_map . keys ( ) : residue = _topology_from_residue ( res ) find_atomtypes ( residue , forcefield = self ) residue_map [ res . name ] = residue for key , val in residue_map . items ( ) : _update_atomtypes ( topology , key , val ) else : find_atomtypes ( topology , forcefield = self ) else : find_atomtypes ( topology , forcefield = self ) if not all ( [ a . id for a in topology . atoms ( ) ] [ 0 ] ) : raise ValueError ( 'Not all atoms in topology have atom types' ) return topology | Atomtype the topology |
47,075 | def cd ( directory ) : old_dir = os . getcwd ( ) try : os . chdir ( directory ) yield finally : os . chdir ( old_dir ) | Change the current working directory temporarily . |
47,076 | def mkdtemp ( hint = '' ) : dirname = tempfile . mkdtemp ( prefix = 'check-manifest-' , suffix = hint ) try : yield dirname finally : rmtree ( dirname ) | Create a temporary directory then clean it up . |
47,077 | def chmod_plus ( path , add_bits = stat . S_IWUSR ) : try : os . chmod ( path , stat . S_IMODE ( os . stat ( path ) . st_mode ) | add_bits ) except OSError : pass | Change a file s mode by adding a few bits . |
47,078 | def rmtree ( path ) : def onerror ( func , path , exc_info ) : if func is os . remove or func is os . unlink or func is os . rmdir : if sys . platform != 'win32' : chmod_plus ( os . path . dirname ( path ) , stat . S_IWUSR | stat . S_IXUSR ) chmod_plus ( path ) func ( path ) else : raise shutil . rmtree ( path , onerror = onerror ) | A version of rmtree that can deal with read - only files and directories . |
47,079 | def copy_files ( filelist , destdir ) : for filename in filelist : destfile = os . path . join ( destdir , filename ) assert destfile . startswith ( destdir + os . path . sep ) destfiledir = os . path . dirname ( destfile ) if not os . path . isdir ( destfiledir ) : os . makedirs ( destfiledir ) if os . path . isdir ( filename ) : os . mkdir ( destfile ) else : shutil . copy2 ( filename , destfile ) | Copy a list of files to destdir preserving directory structure . |
47,080 | def get_one_file_in ( dirname ) : files = os . listdir ( dirname ) if len ( files ) > 1 : raise Failure ( 'More than one file exists in %s:\n%s' % ( dirname , '\n' . join ( sorted ( files ) ) ) ) elif not files : raise Failure ( 'No files found in %s' % dirname ) return os . path . join ( dirname , files [ 0 ] ) | Return the pathname of the one file in a directory . |
47,081 | def unicodify ( filename ) : if isinstance ( filename , bytes ) : return filename . decode ( locale . getpreferredencoding ( ) ) else : return filename | Make sure filename is Unicode . |
47,082 | def get_archive_file_list ( archive_filename ) : if archive_filename . endswith ( '.zip' ) : with closing ( zipfile . ZipFile ( archive_filename ) ) as zf : return add_directories ( zf . namelist ( ) ) elif archive_filename . endswith ( ( '.tar.gz' , '.tar.bz2' , '.tar' ) ) : with closing ( tarfile . open ( archive_filename ) ) as tf : return add_directories ( list ( map ( unicodify , tf . getnames ( ) ) ) ) else : ext = os . path . splitext ( archive_filename ) [ - 1 ] raise Failure ( 'Unrecognized archive type: %s' % ext ) | Return the list of files in an archive . |
47,083 | def strip_toplevel_name ( filelist ) : if not filelist : return filelist prefix = filelist [ 0 ] if '/' in prefix : prefix = prefix . partition ( '/' ) [ 0 ] + '/' names = filelist else : prefix = prefix + '/' names = filelist [ 1 : ] for name in names : if not name . startswith ( prefix ) : raise Failure ( "File doesn't have the common prefix (%s): %s" % ( name , prefix ) ) return [ name [ len ( prefix ) : ] for name in names ] | Strip toplevel name from a file list . |
47,084 | def detect_vcs ( ) : location = os . path . abspath ( '.' ) while True : for vcs in Git , Mercurial , Bazaar , Subversion : if vcs . detect ( location ) : return vcs parent = os . path . dirname ( location ) if parent == location : raise Failure ( "Couldn't find version control data" " (git/hg/bzr/svn supported)" ) location = parent | Detect the version control system used for the current directory . |
47,085 | def normalize_name ( name ) : name = os . path . normpath ( name ) name = unicodify ( name ) if sys . platform == 'darwin' : name = unicodedata . normalize ( 'NFC' , name ) return name | Some VCS print directory names with trailing slashes . Strip them . |
47,086 | def read_config ( ) : config = _load_config ( ) if config . get ( CFG_IGNORE_DEFAULT_RULES [ 1 ] , False ) : del IGNORE [ : ] if CFG_IGNORE [ 1 ] in config : IGNORE . extend ( p for p in config [ CFG_IGNORE [ 1 ] ] if p ) if CFG_IGNORE_BAD_IDEAS [ 1 ] in config : IGNORE_BAD_IDEAS . extend ( p for p in config [ CFG_IGNORE_BAD_IDEAS [ 1 ] ] if p ) | Read configuration from file if possible . |
47,087 | def _load_config ( ) : if os . path . exists ( "pyproject.toml" ) : config = toml . load ( "pyproject.toml" ) if CFG_SECTION_CHECK_MANIFEST in config . get ( "tool" , { } ) : return config [ "tool" ] [ CFG_SECTION_CHECK_MANIFEST ] search_files = [ 'setup.cfg' , 'tox.ini' ] config_parser = ConfigParser . ConfigParser ( ) for filename in search_files : if ( config_parser . read ( [ filename ] ) and config_parser . has_section ( CFG_SECTION_CHECK_MANIFEST ) ) : config = { } if config_parser . has_option ( * CFG_IGNORE_DEFAULT_RULES ) : ignore_defaults = config_parser . getboolean ( * CFG_IGNORE_DEFAULT_RULES ) config [ CFG_IGNORE_DEFAULT_RULES [ 1 ] ] = ignore_defaults if config_parser . has_option ( * CFG_IGNORE ) : patterns = [ p . strip ( ) for p in config_parser . get ( * CFG_IGNORE ) . splitlines ( ) ] config [ CFG_IGNORE [ 1 ] ] = patterns if config_parser . has_option ( * CFG_IGNORE_BAD_IDEAS ) : patterns = [ p . strip ( ) for p in config_parser . get ( * CFG_IGNORE_BAD_IDEAS ) . splitlines ( ) ] config [ CFG_IGNORE_BAD_IDEAS [ 1 ] ] = patterns return config return { } | Searches for config files reads them and returns a dictionary |
47,088 | def read_manifest ( ) : if not os . path . isfile ( 'MANIFEST.in' ) : return ignore , ignore_regexps = _get_ignore_from_manifest ( 'MANIFEST.in' ) IGNORE . extend ( ignore ) IGNORE_REGEXPS . extend ( ignore_regexps ) | Read existing configuration from MANIFEST . in . |
47,089 | def file_matches ( filename , patterns ) : return any ( fnmatch . fnmatch ( filename , pat ) or fnmatch . fnmatch ( os . path . basename ( filename ) , pat ) for pat in patterns ) | Does this filename match any of the patterns? |
47,090 | def file_matches_regexps ( filename , patterns ) : return any ( re . match ( pat , filename ) for pat in patterns ) | Does this filename match any of the regular expressions? |
47,091 | def strip_sdist_extras ( filelist ) : return [ name for name in filelist if not file_matches ( name , IGNORE ) and not file_matches_regexps ( name , IGNORE_REGEXPS ) ] | Strip generated files that are only present in source distributions . |
47,092 | def find_suggestions ( filelist ) : suggestions = set ( ) unknowns = [ ] for filename in filelist : if os . path . isdir ( filename ) : continue for pattern , suggestion in SUGGESTIONS : m = pattern . match ( filename ) if m is not None : suggestions . add ( pattern . sub ( suggestion , filename ) ) break else : unknowns . append ( filename ) return sorted ( suggestions ) , unknowns | Suggest MANIFEST . in patterns for missing files . |
47,093 | def extract_version_from_filename ( filename ) : filename = os . path . splitext ( os . path . basename ( filename ) ) [ 0 ] if filename . endswith ( '.tar' ) : filename = os . path . splitext ( filename ) [ 0 ] return filename . partition ( '-' ) [ 2 ] | Extract version number from sdist filename . |
47,094 | def zest_releaser_check ( data ) : from zest . releaser . utils import ask source_tree = data [ 'workingdir' ] if not is_package ( source_tree ) : return if not ask ( "Do you want to run check-manifest?" ) : return try : if not check_manifest ( source_tree ) : if not ask ( "MANIFEST.in has problems. " " Do you want to continue despite that?" , default = False ) : sys . exit ( 1 ) except Failure as e : error ( str ( e ) ) if not ask ( "Something bad happened. " " Do you want to continue despite that?" , default = False ) : sys . exit ( 2 ) | Check the completeness of MANIFEST . in before the release . |
47,095 | def get_versioned_files ( cls ) : files = cls . _git_ls_files ( ) submodules = cls . _list_submodules ( ) for subdir in submodules : subdir = os . path . relpath ( subdir ) . replace ( os . path . sep , '/' ) files += add_prefix_to_each ( subdir , cls . _git_ls_files ( subdir ) ) return add_directories ( files ) | List all files versioned by git in the current directory . |
47,096 | def get_versioned_files ( cls ) : encoding = cls . _get_terminal_encoding ( ) output = run ( [ 'bzr' , 'ls' , '-VR' ] , encoding = encoding ) return output . splitlines ( ) | List all files versioned in Bazaar in the current directory . |
47,097 | def get_versioned_files ( cls ) : output = run ( [ 'svn' , 'st' , '-vq' , '--xml' ] , decode = False ) tree = ET . XML ( output ) return sorted ( entry . get ( 'path' ) for entry in tree . findall ( './/entry' ) if cls . is_interesting ( entry ) ) | List all files under SVN control in the current directory . |
47,098 | def is_interesting ( entry ) : if entry . get ( 'path' ) == '.' : return False status = entry . find ( 'wc-status' ) if status is None : warning ( 'svn status --xml parse error: <entry path="%s"> without' ' <wc-status>' % entry . get ( 'path' ) ) return False if status . get ( 'item' ) in ( 'unversioned' , 'external' ) : return False return True | Is this entry interesting? |
47,099 | def validate ( self ) : if not self . principals : raise InvalidApplicationPolicyError ( error_message = 'principals not provided' ) if not self . actions : raise InvalidApplicationPolicyError ( error_message = 'actions not provided' ) if any ( not self . _PRINCIPAL_PATTERN . match ( p ) for p in self . principals ) : raise InvalidApplicationPolicyError ( error_message = 'principal should be 12-digit AWS account ID or "*"' ) unsupported_actions = sorted ( set ( self . actions ) - set ( self . SUPPORTED_ACTIONS ) ) if unsupported_actions : raise InvalidApplicationPolicyError ( error_message = '{} not supported' . format ( ', ' . join ( unsupported_actions ) ) ) return True | Check if the formats of principals and actions are valid . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.