idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
15,200
def _create_config ( cls ) : return cls . app_config . objects . create ( namespace = cls . auto_setup [ 'namespace' ] , * * cls . auto_setup [ 'config_fields' ] )
Creates an ApphookConfig instance
53
7
15,201
def _create_config_translation ( cls , config , lang ) : config . set_current_language ( lang , initialize = True ) for field , data in cls . auto_setup [ 'config_translated_fields' ] . items ( ) : setattr ( config , field , data ) config . save_translations ( )
Creates a translation for the given ApphookConfig
73
10
15,202
def _setup_pages ( cls , config ) : from cms . exceptions import NoHomeFound from cms . models import Page from cms . utils import get_language_list from django . conf import settings from django . utils . translation import override app_page = None get_url = False if getattr ( settings , 'ALDRYN_SEARCH_CMS_PAGE' , False ) : from aldryn_search . search_indexes import TitleIndex def fake_url ( self , obj ) : return '' get_url = TitleIndex . get_url TitleIndex . get_url = fake_url site = Site . objects . get_current ( ) auto_sites = cls . auto_setup . get ( 'sites' , True ) if auto_sites is True or site . pk in auto_sites : if getattr ( cls , 'app_config' , False ) : configs = cls . app_config . objects . all ( ) if not configs . exists ( ) : config = cls . _create_config ( ) else : config = configs . first ( ) langs = get_language_list ( site . pk ) if not Page . objects . on_site ( site . pk ) . filter ( application_urls = cls . __name__ ) . exists ( ) : for lang in langs : with override ( lang ) : if config : if cls . auto_setup [ 'config_translated_fields' ] : cls . _create_config_translation ( config , lang ) namespace = config . namespace elif cls . app_name : namespace = cls . app_name else : namespace = None try : home = Page . objects . get_home ( site . pk ) . get_draft_object ( ) except NoHomeFound : home = None set_home = hasattr ( Page , 'set_as_homepage' ) home = cls . _create_page ( home , lang , cls . auto_setup [ 'home title' ] , site = site , set_home = set_home ) app_page = cls . _create_page ( app_page , lang , cls . auto_setup [ 'page title' ] , cls . __name__ , home , namespace , site = site ) if get_url : TitleIndex . get_url = get_url
Create the page structure .
518
5
15,203
def setup ( cls ) : try : if cls . auto_setup and cls . auto_setup . get ( 'enabled' , False ) : if not cls . auto_setup . get ( 'home title' , False ) : warnings . warn ( '"home title" is not set in {0}.auto_setup attribute' . format ( cls ) ) return if not cls . auto_setup . get ( 'page title' , False ) : warnings . warn ( '"page title" is not set in {0}.auto_setup attribute' . format ( cls ) ) return if cls . app_name and not cls . auto_setup . get ( 'namespace' , False ) : warnings . warn ( '"page title" is not set in {0}.auto_setup attribute' . format ( cls ) ) return config = None cls . _setup_pages ( config ) except Exception : # Ignore any error during setup. Worst case: pages are not created, but the instance # won't break pass
Main method to auto setup Apphook
223
7
15,204
def run ( self , verbose = True ) : self . results . clear ( ) for analysis_group in self . config . analysis_groups : if analysis_group . providers : for provider in analysis_group . providers : logger . info ( 'Run provider %s' , provider . identifier ) provider . run ( ) for checker in analysis_group . checkers : result = self . _get_checker_result ( analysis_group , checker , provider ) self . results . append ( result ) analysis_group . results . append ( result ) if verbose : result . print ( ) else : for checker in analysis_group . checkers : result = self . _get_checker_result ( analysis_group , checker , nd = 'no-data-' ) self . results . append ( result ) analysis_group . results . append ( result ) if verbose : result . print ( )
Run the analysis .
194
4
15,205
def output_tap ( self ) : tracker = Tracker ( streaming = True , stream = sys . stdout ) for group in self . config . analysis_groups : n_providers = len ( group . providers ) n_checkers = len ( group . checkers ) if not group . providers and group . checkers : test_suite = group . name description_lambda = lambda r : r . checker . name elif not group . checkers : logger . warning ( 'Invalid analysis group (no checkers), skipping' ) continue elif n_providers > n_checkers : test_suite = group . checkers [ 0 ] . name description_lambda = lambda r : r . provider . name else : test_suite = group . providers [ 0 ] . name description_lambda = lambda r : r . checker . name for result in group . results : description = description_lambda ( result ) if result . code == ResultCode . PASSED : tracker . add_ok ( test_suite , description ) elif result . code == ResultCode . IGNORED : tracker . add_ok ( test_suite , description + ' (ALLOWED FAILURE)' ) elif result . code == ResultCode . NOT_IMPLEMENTED : tracker . add_not_ok ( test_suite , description , 'TODO implement the test' ) elif result . code == ResultCode . FAILED : tracker . add_not_ok ( test_suite , description , diagnostics = ' ---\n message: %s\n hint: %s\n ...' % ( '\n message: ' . join ( result . messages . split ( '\n' ) ) , result . checker . hint ) )
Output analysis results in TAP format .
375
8
15,206
def find_latex_font_serif ( ) : import os , re import matplotlib . font_manager name = lambda font : os . path . splitext ( os . path . split ( font ) [ - 1 ] ) [ 0 ] . split ( ' - ' ) [ 0 ] fonts = matplotlib . font_manager . findSystemFonts ( fontpaths = None , fontext = 'ttf' ) matches = [ r'.*Computer\ Modern\ Roman.*' , r'.*CMU\ Serif.*' , r'.*CMU.*' , r'.*Times.*' , r'.*DejaVu.*' , r'.*Serif.*' , ] for match in matches : for font in fonts : if re . match ( match , font ) : return name ( font ) return None
r Find an available font to mimic LaTeX and return its name .
180
14
15,207
def copy_style ( ) : import os import matplotlib # style definitions # ----------------- styles = { } styles [ 'goose.mplstyle' ] = ''' figure.figsize : 8,6 font.weight : normal font.size : 16 axes.labelsize : medium axes.titlesize : medium xtick.labelsize : small ytick.labelsize : small xtick.top : True ytick.right : True axes.facecolor : none axes.prop_cycle : cycler('color',['k', 'r', 'g', 'b', 'y', 'c', 'm']) legend.fontsize : medium legend.fancybox : true legend.columnspacing : 1.0 legend.handletextpad : 0.2 lines.linewidth : 2 image.cmap : afmhot image.interpolation : nearest image.origin : lower savefig.facecolor : none figure.autolayout : True errorbar.capsize : 2 ''' styles [ 'goose-tick-in.mplstyle' ] = ''' xtick.direction : in ytick.direction : in ''' styles [ 'goose-tick-lower.mplstyle' ] = ''' xtick.top : False ytick.right : False axes.spines.top : False axes.spines.right : False ''' if find_latex_font_serif ( ) is not None : styles [ 'goose-latex.mplstyle' ] = r''' font.family : serif font.serif : {serif:s} font.weight : bold font.size : 18 text.usetex : true text.latex.preamble : \usepackage{{amsmath}},\usepackage{{amsfonts}},\usepackage{{amssymb}},\usepackage{{bm}} ''' . format ( serif = find_latex_font_serif ( ) ) else : styles [ 'goose-latex.mplstyle' ] = r''' font.family : serif font.weight : bold font.size : 18 text.usetex : true text.latex.preamble : \usepackage{{amsmath}},\usepackage{{amsfonts}},\usepackage{{amssymb}},\usepackage{{bm}} ''' # write style definitions # ----------------------- # directory name where the styles are stored dirname = os . path . abspath ( os . path . join ( matplotlib . get_configdir ( ) , 'stylelib' ) ) # make directory if it does not yet exist if not os . path . isdir ( dirname ) : os . makedirs ( dirname ) # write all styles for fname , style in styles . items ( ) : open ( os . path . join ( dirname , fname ) , 'w' ) . write ( style )
r Write all goose - styles to the relevant matplotlib configuration directory .
638
15
15,208
def scale_lim ( lim , factor = 1.05 ) : # convert string "[...,...]" if type ( lim ) == str : lim = eval ( lim ) # scale limits D = lim [ 1 ] - lim [ 0 ] lim [ 0 ] -= ( factor - 1. ) / 2. * D lim [ 1 ] += ( factor - 1. ) / 2. * D return lim
r Scale limits to be 5% wider to have a nice plot .
84
14
15,209
def abs2rel_y ( y , axis = None ) : # get current axis if axis is None : axis = plt . gca ( ) # get current limits ymin , ymax = axis . get_ylim ( ) # transform # - log scale if axis . get_xscale ( ) == 'log' : try : return [ ( np . log10 ( i ) - np . log10 ( ymin ) ) / ( np . log10 ( ymax ) - np . log10 ( ymin ) ) if i is not None else i for i in y ] except : return ( np . log10 ( y ) - np . log10 ( ymin ) ) / ( np . log10 ( ymax ) - np . log10 ( ymin ) ) # - normal scale else : try : return [ ( i - ymin ) / ( ymax - ymin ) if i is not None else i for i in y ] except : return ( y - ymin ) / ( ymax - ymin )
r Transform absolute y - coordinates to relative y - coordinates . Relative coordinates correspond to a fraction of the relevant axis . Be sure to set the limits and scale before calling this function!
218
36
15,210
def rel2abs_x ( x , axis = None ) : # get current axis if axis is None : axis = plt . gca ( ) # get current limits xmin , xmax = axis . get_xlim ( ) # transform # - log scale if axis . get_xscale ( ) == 'log' : try : return [ 10. ** ( np . log10 ( xmin ) + i * ( np . log10 ( xmax ) - np . log10 ( xmin ) ) ) if i is not None else i for i in x ] except : return 10. ** ( np . log10 ( xmin ) + x * ( np . log10 ( xmax ) - np . log10 ( xmin ) ) ) # - normal scale else : try : return [ xmin + i * ( xmax - xmin ) if i is not None else i for i in x ] except : return xmin + x * ( xmax - xmin )
r Transform relative x - coordinates to absolute x - coordinates . Relative coordinates correspond to a fraction of the relevant axis . Be sure to set the limits and scale before calling this function!
208
36
15,211
def subplots ( scale_x = None , scale_y = None , scale = None , * * kwargs ) : if 'figsize' in kwargs : return plt . subplots ( * * kwargs ) width , height = mpl . rcParams [ 'figure.figsize' ] if scale is not None : width *= scale height *= scale if scale_x is not None : width *= scale_x if scale_y is not None : height *= scale_y nrows = kwargs . pop ( 'nrows' , 1 ) ncols = kwargs . pop ( 'ncols' , 1 ) width = ncols * width height = nrows * height return plt . subplots ( nrows = nrows , ncols = ncols , figsize = ( width , height ) , * * kwargs )
r Run matplotlib . pyplot . subplots with figsize set to the correct multiple of the default .
196
24
15,212
def plot ( x , y , units = 'absolute' , axis = None , * * kwargs ) : # get current axis if axis is None : axis = plt . gca ( ) # transform if units . lower ( ) == 'relative' : x = rel2abs_x ( x , axis ) y = rel2abs_y ( y , axis ) # plot return axis . plot ( x , y , * * kwargs )
r Plot .
96
3
15,213
def plot_powerlaw ( exp , startx , starty , width = None , * * kwargs ) : # get options/defaults endx = kwargs . pop ( 'endx' , None ) endy = kwargs . pop ( 'endy' , None ) height = kwargs . pop ( 'height' , None ) units = kwargs . pop ( 'units' , 'relative' ) axis = kwargs . pop ( 'axis' , plt . gca ( ) ) # check if axis . get_xscale ( ) != 'log' or axis . get_yscale ( ) != 'log' : raise IOError ( 'This function only works on a log-log scale, where the power-law is a straight line' ) # apply width/height if width is not None : endx = startx + width endy = None elif height is not None : if exp > 0 : endy = starty + height elif exp == 0 : endy = starty else : endy = starty - height endx = None # transform if units . lower ( ) == 'relative' : [ startx , endx ] = rel2abs_x ( [ startx , endx ] , axis ) [ starty , endy ] = rel2abs_y ( [ starty , endy ] , axis ) # determine multiplication constant const = starty / ( startx ** exp ) # get end x/y-coordinate if endx is not None : endy = const * endx ** exp else : endx = ( endy / const ) ** ( 1 / exp ) # plot return axis . plot ( [ startx , endx ] , [ starty , endy ] , * * kwargs )
r Plot a power - law .
381
7
15,214
def histogram_bin_edges_minwidth ( min_width , bins ) : # escape if min_width is None : return bins if min_width is False : return bins # keep removing where needed while True : idx = np . where ( np . diff ( bins ) < min_width ) [ 0 ] if len ( idx ) == 0 : return bins idx = idx [ 0 ] if idx + 1 == len ( bins ) - 1 : bins = np . hstack ( ( bins [ : ( idx ) ] , bins [ - 1 ] ) ) else : bins = np . hstack ( ( bins [ : ( idx + 1 ) ] , bins [ ( idx + 2 ) : ] ) )
r Merge bins with right - neighbour until each bin has a minimum width .
157
15
15,215
def histogram_bin_edges_mincount ( data , min_count , bins ) : # escape if min_count is None : return bins if min_count is False : return bins # check if type ( min_count ) != int : raise IOError ( '"min_count" must be an integer number' ) # keep removing where needed while True : P , _ = np . histogram ( data , bins = bins , density = False ) idx = np . where ( P < min_count ) [ 0 ] if len ( idx ) == 0 : return bins idx = idx [ 0 ] if idx + 1 == len ( P ) : bins = np . hstack ( ( bins [ : ( idx ) ] , bins [ - 1 ] ) ) else : bins = np . hstack ( ( bins [ : ( idx + 1 ) ] , bins [ ( idx + 2 ) : ] ) )
r Merge bins with right - neighbour until each bin has a minimum number of data - points .
200
19
15,216
def histogram_bin_edges ( data , bins = 10 , mode = 'equal' , min_count = None , integer = False , remove_empty_edges = True , min_width = None ) : # determine the bin-edges if mode == 'equal' : bin_edges = np . linspace ( np . min ( data ) , np . max ( data ) , bins + 1 ) elif mode == 'log' : bin_edges = np . logspace ( np . log10 ( np . min ( data ) ) , np . log10 ( np . max ( data ) ) , bins + 1 ) elif mode == 'uniform' : # - check if hasattr ( bins , "__len__" ) : raise IOError ( 'Only the number of bins can be specified' ) # - use the minimum count to estimate the number of bins if min_count is not None and min_count is not False : if type ( min_count ) != int : raise IOError ( '"min_count" must be an integer number' ) bins = int ( np . floor ( float ( len ( data ) ) / float ( min_count ) ) ) # - number of data-points in each bin (equal for each) count = int ( np . floor ( float ( len ( data ) ) / float ( bins ) ) ) * np . ones ( bins , dtype = 'int' ) # - increase the number of data-points by one is an many bins as needed, # such that the total fits the total number of data-points count [ np . linspace ( 0 , bins - 1 , len ( data ) - np . sum ( count ) ) . astype ( np . int ) ] += 1 # - split the data idx = np . empty ( ( bins + 1 ) , dtype = 'int' ) idx [ 0 ] = 0 idx [ 1 : ] = np . cumsum ( count ) idx [ - 1 ] = len ( data ) - 1 # - determine the bin-edges bin_edges = np . unique ( np . sort ( data ) [ idx ] ) else : raise IOError ( 'Unknown option' ) # remove empty starting and ending bin (related to an unfortunate choice of bin-edges) if remove_empty_edges : N , _ = np . histogram ( data , bins = bin_edges , density = False ) idx = np . min ( np . where ( N > 0 ) [ 0 ] ) jdx = np . max ( np . where ( N > 0 ) [ 0 ] ) bin_edges = bin_edges [ ( idx ) : ( jdx + 2 ) ] # merge bins with too few data-points (if needed) bin_edges = histogram_bin_edges_mincount ( data , min_count = min_count , bins = bin_edges ) # merge bins that have too small of a width bin_edges = histogram_bin_edges_minwidth ( min_width = min_width , bins = bin_edges ) # select only bins that encompass an integer (and retain the original bounds) if integer : idx = np . where ( np . diff ( np . floor ( bin_edges ) ) >= 1 ) [ 0 ] idx = np . unique ( np . hstack ( ( 0 , idx , len ( bin_edges ) - 1 ) ) ) bin_edges = bin_edges [ idx ] # return return bin_edges
r Determine bin - edges .
765
7
15,217
def hist ( P , edges , * * kwargs ) : from matplotlib . collections import PatchCollection from matplotlib . patches import Polygon # extract local options axis = kwargs . pop ( 'axis' , plt . gca ( ) ) cindex = kwargs . pop ( 'cindex' , None ) autoscale = kwargs . pop ( 'autoscale' , True ) # set defaults kwargs . setdefault ( 'edgecolor' , 'k' ) # no color-index -> set transparent if cindex is None : kwargs . setdefault ( 'facecolor' , ( 0. , 0. , 0. , 0. ) ) # convert -> list of Polygons poly = [ ] for p , xl , xu in zip ( P , edges [ : - 1 ] , edges [ 1 : ] ) : coor = np . array ( [ [ xl , 0. ] , [ xu , 0. ] , [ xu , p ] , [ xl , p ] , ] ) poly . append ( Polygon ( coor ) ) args = ( poly ) # convert patches -> matplotlib-objects p = PatchCollection ( args , * * kwargs ) # add colors to patches if cindex is not None : p . set_array ( cindex ) # add patches to axis axis . add_collection ( p ) # rescale the axes manually if autoscale : # - get limits xlim = [ edges [ 0 ] , edges [ - 1 ] ] ylim = [ 0 , np . max ( P ) ] # - set limits +/- 10% extra margin axis . set_xlim ( [ xlim [ 0 ] - .1 * ( xlim [ 1 ] - xlim [ 0 ] ) , xlim [ 1 ] + .1 * ( xlim [ 1 ] - xlim [ 0 ] ) ] ) axis . set_ylim ( [ ylim [ 0 ] - .1 * ( ylim [ 1 ] - ylim [ 0 ] ) , ylim [ 1 ] + .1 * ( ylim [ 1 ] - ylim [ 0 ] ) ] ) return p
r Plot histogram .
464
5
15,218
def cdf ( data , mode = 'continuous' , * * kwargs ) : return ( np . linspace ( 0.0 , 1.0 , len ( data ) ) , np . sort ( data ) )
Return cumulative density .
49
4
15,219
def patch ( * args , * * kwargs ) : from matplotlib . collections import PatchCollection from matplotlib . patches import Polygon # check dependent options if ( 'coor' not in kwargs or 'conn' not in kwargs ) : raise IOError ( 'Specify both "coor" and "conn"' ) # extract local options axis = kwargs . pop ( 'axis' , plt . gca ( ) ) cindex = kwargs . pop ( 'cindex' , None ) coor = kwargs . pop ( 'coor' , None ) conn = kwargs . pop ( 'conn' , None ) autoscale = kwargs . pop ( 'autoscale' , True ) # set defaults kwargs . setdefault ( 'edgecolor' , 'k' ) # no color-index -> set transparent if cindex is None : kwargs . setdefault ( 'facecolor' , ( 0. , 0. , 0. , 0. ) ) # convert mesh -> list of Polygons if coor is not None and conn is not None : poly = [ ] for iconn in conn : poly . append ( Polygon ( coor [ iconn , : ] ) ) args = tuple ( poly , * args ) # convert patches -> matplotlib-objects p = PatchCollection ( args , * * kwargs ) # add colors to patches if cindex is not None : p . set_array ( cindex ) # add patches to axis axis . add_collection ( p ) # rescale the axes manually if autoscale : # - get limits xlim = [ np . min ( coor [ : , 0 ] ) , np . max ( coor [ : , 0 ] ) ] ylim = [ np . min ( coor [ : , 1 ] ) , np . max ( coor [ : , 1 ] ) ] # - set limits +/- 10% extra margin axis . set_xlim ( [ xlim [ 0 ] - .1 * ( xlim [ 1 ] - xlim [ 0 ] ) , xlim [ 1 ] + .1 * ( xlim [ 1 ] - xlim [ 0 ] ) ] ) axis . set_ylim ( [ ylim [ 0 ] - .1 * ( ylim [ 1 ] - ylim [ 0 ] ) , ylim [ 1 ] + .1 * ( ylim [ 1 ] - ylim [ 0 ] ) ] ) return p
Add patches to plot . The color of the patches is indexed according to a specified color - index .
530
20
15,220
def cmd ( send , msg , args ) : if args [ 'type' ] == 'privmsg' : send ( 'Filters must be set in channels, not via private message.' ) return isadmin = args [ 'is_admin' ] ( args [ 'nick' ] ) parser = arguments . ArgParser ( args [ 'config' ] ) parser . add_argument ( '--channel' , nargs = '?' , default = args [ 'target' ] ) group = parser . add_mutually_exclusive_group ( ) group . add_argument ( 'filter' , nargs = '?' ) group . add_argument ( '--show' , action = 'store_true' ) group . add_argument ( '--list' , action = 'store_true' ) group . add_argument ( '--reset' , '--clear' , action = 'store_true' ) group . add_argument ( '--chain' ) if not msg : send ( get_filters ( args [ 'handler' ] , args [ 'target' ] ) ) return try : cmdargs = parser . parse_args ( msg ) except arguments . ArgumentException as e : send ( str ( e ) ) return if cmdargs . list : send ( "Available filters are %s" % ", " . join ( textutils . output_filters . keys ( ) ) ) elif cmdargs . reset and isadmin : args [ 'handler' ] . outputfilter [ cmdargs . channel ] . clear ( ) send ( "Okay!" ) elif cmdargs . chain and isadmin : if not args [ 'handler' ] . outputfilter [ cmdargs . channel ] : send ( "Must have a filter set in order to chain." ) return filter_list , output = textutils . append_filters ( cmdargs . chain ) if filter_list is not None : args [ 'handler' ] . outputfilter [ cmdargs . channel ] . extend ( filter_list ) send ( output ) elif cmdargs . show : send ( get_filters ( args [ 'handler' ] , cmdargs . channel ) ) elif isadmin : # If we're just adding a filter without chain, blow away any existing filters. filter_list , output = textutils . append_filters ( cmdargs . filter ) if filter_list is not None : args [ 'handler' ] . outputfilter [ cmdargs . channel ] . clear ( ) args [ 'handler' ] . outputfilter [ cmdargs . channel ] . extend ( filter_list ) send ( output ) else : send ( 'This command requires admin privileges.' )
Changes the output filter .
557
5
15,221
def inner ( self ) : inner_array = nd . morphology . binary_erosion ( self . bitmap ) return Region ( inner_array )
Region formed by taking non - border elements .
32
9
15,222
def border ( self ) : border_array = self . bitmap - self . inner . bitmap return Region ( border_array )
Region formed by taking border elements .
28
7
15,223
def convex_hull ( self ) : hull_array = skimage . morphology . convex_hull_image ( self . bitmap ) return Region ( hull_array )
Region representing the convex hull .
39
7
15,224
def dilate ( self , iterations = 1 ) : dilated_array = nd . morphology . binary_dilation ( self . bitmap , iterations = iterations ) return Region ( dilated_array )
Return a dilated region .
43
6
15,225
def cmd ( send , _ , args ) : guarded = args [ 'handler' ] . guarded if not guarded : send ( "Nobody is guarded." ) else : send ( ", " . join ( guarded ) )
Shows the currently guarded nicks .
44
8
15,226
def mapColorRampToValues ( cls , colorRamp , minValue , maxValue , alpha = 1.0 ) : minRampIndex = 0 # Always zero maxRampIndex = float ( len ( colorRamp ) - 1 ) # Map color ramp indices to values using equation of a line # Resulting equation will be: # rampIndex = slope * value + intercept if minValue != maxValue : slope = ( maxRampIndex - minRampIndex ) / ( maxValue - minValue ) intercept = maxRampIndex - ( slope * maxValue ) else : slope = 0 intercept = minRampIndex # Return color ramp, slope, and intercept to interpolate by value mappedColorRamp = MappedColorRamp ( colorRamp = colorRamp , slope = slope , intercept = intercept , min = minValue , max = maxValue , alpha = alpha ) return mappedColorRamp
Creates color ramp based on min and max values of all the raster pixels from all rasters . If pixel value is one of the no data values it will be excluded in the color ramp interpolation . Returns colorRamp slope intercept
193
48
15,227
def cmd ( send , msg , args ) : user = choice ( get_users ( args ) ) if msg : msg = " for " + msg msg = "blames " + user + msg send ( msg , 'action' )
Blames a random user for something .
49
8
15,228
def set_default ( nick , location , session , send , apikey ) : if valid_location ( location , apikey ) : send ( "Setting default location" ) default = session . query ( Weather_prefs ) . filter ( Weather_prefs . nick == nick ) . first ( ) if default is None : default = Weather_prefs ( nick = nick , location = location ) session . add ( default ) else : default . location = location else : send ( "Invalid or Ambiguous Location" )
Sets nick s default location to location .
110
9
15,229
def cmd ( send , msg , _ ) : if not msg : msg = textutils . gen_word ( ) send ( textutils . gen_slogan ( msg ) )
Gets a slogan .
38
5
15,230
def evolve ( self , years ) : world_file = fldr + os . sep + self . name + '.txt' self . build_base ( ) self . world . add_mountains ( ) self . add_life ( ) self . world . grd . save ( world_file ) print ( 'TODO - run ' + str ( years ) + ' years' )
run the evolution of the planet to see how it looks after years
83
13
15,231
def build_base ( self ) : #print('Planet ' + self.name + ' has formed!') self . world = my_world . World ( self . grid_height , self . grid_width , [ ' ' , 'x' , '#' ] ) perc_land = ( self . lava + ( self . wind / 10 ) + ( self . rain / 20 ) + ( self . sun / 10 ) ) * 100 perc_sea = ( 100 - perc_land ) perc_blocked = ( self . lava / 10 ) * 100 #print('Calculating world : sea=', perc_sea, ' land=', perc_land, ' mountain=', perc_blocked, ) self . world . build_random ( self . num_seeds , perc_land , perc_sea , perc_blocked )
create a base random land structure using the AIKIF world model
189
13
15,232
def url ( self ) : url = u'{home_url}{permalink}' . format ( home_url = settings . HOME_URL , permalink = self . _permalink ) url = re . sub ( r'/{2,}' , r'/' , url ) return url
The site - relative URL to the post .
63
9
15,233
def content ( self ) : content_list = wrap_list ( self . _content_preprocessed ) content_list . extend ( self . _content_stash ) content_to_render = '\n' . join ( content_list ) return typogrify ( self . content_renderer . render ( content_to_render , self . format ) )
The post s content in HTML format .
80
8
15,234
def is_published ( self ) : return self . status == Status . published and self . timestamp <= arrow . now ( )
True if the post is published False otherwise .
26
9
15,235
def is_pending ( self ) : return self . status == Status . published and self . timestamp >= arrow . now ( )
True if the post is marked as published but has a timestamp set in the future .
27
17
15,236
def set_finalized_content ( self , content , caller_class ) : caller = caller_class . get_name ( ) if hasattr ( caller_class , 'get_name' ) else unicode ( caller_class ) if not FinalizationPlugin . is_enabled ( ) : logger . warning ( "A plugin is trying to modify the post content but the FINALIZE_METADATA setting is " "disabled. This setting must be enabled for plugins to modify post content. " "Plugin: %s" % caller ) return False perms = settings . PLUGIN_PERMISSIONS [ 'MODIFY_RAW_POST' ] if caller not in perms and '*' not in perms : logger . warning ( "A plugin is trying to modify the post content but does not have the " "MODIFY_RAW_POST permission. Plugin: %s" % caller ) return False else : logger . debug ( "%s is setting post source content." % caller ) self . _content_finalized = self . _remove_all_stashed_content ( ) return True
Plugins can call this method to modify post content that is written back to source post files . This method can be called at any time by anyone but it has no effect if the caller is not granted the MODIFY_RAW_POST permission in the Engineer configuration .
233
54
15,237
def all_tags ( self ) : tags = set ( ) for post in self : tags . update ( post . tags ) return list ( tags )
Returns a list of all the unique tags as strings that posts in the collection have .
31
17
15,238
def remove ( self , w ) : if w not in self . f2i : raise ValueError ( "'{}' does not exist." . format ( w ) ) if w in self . reserved : raise ValueError ( "'{}' is one of the reserved words, and thus" "cannot be removed." . format ( w ) ) index = self . f2i [ w ] del self . f2i [ w ] del self . i2f [ index ] self . words . remove ( w )
Removes a word from the vocab . The indices are unchanged .
109
14
15,239
def reconstruct_indices ( self ) : del self . i2f , self . f2i self . f2i , self . i2f = { } , { } for i , w in enumerate ( self . words ) : self . f2i [ w ] = i self . i2f [ i ] = w
Reconstruct word indices in case of word removals . Vocabulary does not handle empty indices when words are removed hence it need to be told explicity about when to reconstruct them .
71
38
15,240
def run ( self , data ) : result_type = namedtuple ( 'Result' , 'code messages' ) if self . passes is True : result = result_type ( Checker . Code . PASSED , '' ) elif self . passes is False : if self . allow_failure : result = result_type ( Checker . Code . IGNORED , '' ) else : result = result_type ( Checker . Code . FAILED , '' ) else : try : result = self . check ( data , * * self . arguments ) messages = '' if isinstance ( result , tuple ) : result , messages = result if result not in Checker . Code : result = Checker . Code . PASSED if bool ( result ) else Checker . Code . FAILED if result == Checker . Code . FAILED and self . allow_failure : result = Checker . Code . IGNORED result = result_type ( result , messages ) except NotImplementedError : result = result_type ( Checker . Code . NOT_IMPLEMENTED , '' ) self . result = result
Run the check method and format the result for analysis .
236
11
15,241
def is_numeric ( value ) : return type ( value ) in [ int , float , np . int8 , np . int16 , np . int32 , np . int64 , np . float16 , np . float32 , np . float64 , np . float128 ]
Test if a value is numeric .
60
7
15,242
def cmd ( send , msg , args ) : if msg == 'list' : fortunes = list_fortunes ( ) + list_fortunes ( True ) send ( " " . join ( fortunes ) , ignore_length = True ) else : output = get_fortune ( msg , args [ 'name' ] ) for line in output . splitlines ( ) : send ( line )
Returns a fortune .
82
4
15,243
def compile_theme ( theme_id = None ) : from engineer . processors import convert_less from engineer . themes import ThemeManager if theme_id is None : themes = ThemeManager . themes ( ) . values ( ) else : themes = [ ThemeManager . theme ( theme_id ) ] with ( indent ( 2 ) ) : puts ( colored . yellow ( "Compiling %s themes." % len ( themes ) ) ) for theme in themes : theme_output_path = ( theme . static_root / ( 'stylesheets/%s_precompiled.css' % theme . id ) ) . normpath ( ) puts ( colored . cyan ( "Compiling theme %s to %s" % ( theme . id , theme_output_path ) ) ) with indent ( 4 ) : puts ( "Compiling..." ) convert_less ( theme . static_root / ( 'stylesheets/%s.less' % theme . id ) , theme_output_path , minify = True ) puts ( colored . green ( "Done." , bold = True ) )
Compiles a theme .
229
5
15,244
def list_theme ( ) : from engineer . themes import ThemeManager themes = ThemeManager . themes ( ) col1 , col2 = map ( max , zip ( * [ ( len ( t . id ) + 2 , len ( t . root_path ) + 2 ) for t in themes . itervalues ( ) ] ) ) themes = ThemeManager . themes_by_finder ( ) for finder in sorted ( themes . iterkeys ( ) ) : if len ( themes [ finder ] ) > 0 : puts ( "%s: " % finder ) for theme in sorted ( themes [ finder ] , key = lambda _ : _ . id ) : with indent ( 4 ) : puts ( columns ( [ colored . cyan ( "%s:" % theme . id ) , col1 ] , [ colored . white ( theme . root_path , bold = True ) , col2 ] ) )
List all available Engineer themes .
189
6
15,245
def quantile_norm ( X ) : # compute the quantiles quantiles = np . mean ( np . sort ( X , axis = 0 ) , axis = 1 ) # compute the column-wise ranks. Each observation is replaced with its # rank in that column: the smallest observation is replaced by 1, the # second-smallest by 2, ..., and the largest by M, the number of rows. ranks = np . apply_along_axis ( stats . rankdata , 0 , X ) # convert ranks to integer indices from 0 to M-1 rank_indices = ranks . astype ( int ) - 1 # index the quantiles for each rank with the ranks matrix Xn = quantiles [ rank_indices ] return ( Xn )
Normalize the columns of X to each have the same distribution .
159
13
15,246
def corrdfs ( df1 , df2 , method ) : dcorr = pd . DataFrame ( columns = df1 . columns , index = df2 . columns ) dpval = pd . DataFrame ( columns = df1 . columns , index = df2 . columns ) for c1 in df1 : for c2 in df2 : if method == 'spearman' : dcorr . loc [ c2 , c1 ] , dpval . loc [ c2 , c1 ] = spearmanr ( df1 [ c1 ] , df2 [ c2 ] , nan_policy = 'omit' ) elif method == 'pearson' : dcorr . loc [ c2 , c1 ] , dpval . loc [ c2 , c1 ] = pearsonr ( df1 [ c1 ] , df2 [ c2 ] , # nan_policy='omit' ) if not df1 . columns . name is None : dcorr . columns . name = df1 . columns . name dpval . columns . name = df1 . columns . name if not df2 . columns . name is None : dcorr . index . name = df2 . columns . name dpval . index . name = df2 . columns . name return dcorr , dpval
df1 in columns df2 in rows
286
8
15,247
def pretty_description ( description , wrap_at = None , indent = 0 ) : if wrap_at is None or wrap_at < 0 : width = console_width ( default = 79 ) if wrap_at is None : wrap_at = width else : wrap_at += width indent = ' ' * indent text_wrapper = textwrap . TextWrapper ( width = wrap_at , replace_whitespace = False , initial_indent = indent , subsequent_indent = indent ) new_desc = [ ] for line in description . split ( '\n' ) : new_desc . append ( line . replace ( '\n' , '' ) . strip ( ) ) while not new_desc [ 0 ] : del new_desc [ 0 ] while not new_desc [ - 1 ] : del new_desc [ - 1 ] separators = [ i for i , l in enumerate ( new_desc ) if not l ] paragraphs = [ ] if separators : start , end = 0 , separators [ 0 ] paragraphs . append ( new_desc [ start : end ] ) for i in range ( len ( separators ) - 1 ) : start = end + 1 end = separators [ i + 1 ] paragraphs . append ( new_desc [ start : end ] ) paragraphs . append ( new_desc [ end + 1 : ] ) return '\n\n' . join ( text_wrapper . fill ( ' ' . join ( p ) ) for p in paragraphs ) return text_wrapper . fill ( ' ' . join ( new_desc ) )
Return a pretty formatted string given some text .
334
9
15,248
def print_name ( self , indent = 0 , end = '\n' ) : print ( Style . BRIGHT + ' ' * indent + self . name , end = end )
Print name with optional indent and end .
39
8
15,249
def print ( self ) : print ( '{dim}Identifier:{none} {cyan}{identifier}{none}\n' '{dim}Name:{none} {name}\n' '{dim}Description:{none}\n{description}' . format ( dim = Style . DIM , cyan = Fore . CYAN , none = Style . RESET_ALL , identifier = self . identifier , name = self . name , description = pretty_description ( self . description , indent = 2 ) ) ) if hasattr ( self , 'argument_list' ) and self . argument_list : print ( '{dim}Arguments:{none}' . format ( dim = Style . DIM , none = Style . RESET_ALL ) ) for argument in self . argument_list : argument . print ( indent = 2 )
Print self .
176
3
15,250
def filter ( self , value , table = None ) : if table is not None : filterable = self . filterable_func ( value , table ) else : filterable = self . filterable_func ( value ) return filterable
Return True if the value should be pruned ; False otherwise .
49
13
15,251
def get_data ( self , file_path = sys . stdin , delimiter = ',' , categories_delimiter = None ) : if file_path == sys . stdin : logger . info ( 'Read data from standard input' ) lines = [ line . replace ( '\n' , '' ) for line in file_path ] else : logger . info ( 'Read data from file ' + file_path ) with open ( file_path ) as file : lines = list ( file ) columns = lines [ 0 ] . rstrip ( '\n' ) . split ( delimiter ) [ 1 : ] categories = None if categories_delimiter : columns , categories = zip ( * [ c . split ( categories_delimiter , 1 ) for c in columns ] ) size = len ( columns ) data = [ list ( map ( int , l . split ( delimiter ) [ 1 : ] ) ) for l in lines [ 1 : size + 1 ] ] return DesignStructureMatrix ( data , columns , categories )
Implement get_dsm method from Provider class .
220
11
15,252
def parse_tags ( self ) : tags = [ ] try : for tag in self . _tag_group_dict [ "tags" ] : tags . append ( Tag ( tag ) ) except : return tags return tags
Parses tags in tag group
46
7
15,253
def update ( self ) : if self . _is_ignored or "tags" not in self . _tag_group_dict : return for i in range ( len ( self . _tag_group_dict [ "tags" ] ) ) : tag_dict = self . _tag_group_dict [ "tags" ] [ i ] for tag in self . _tags : if tag . name == tag_dict [ "common.ALLTYPES_NAME" ] : self . _tag_group_dict [ "tags" ] [ i ] = tag . as_dict ( ) break for i in range ( len ( self . _sub_groups ) ) : sub_group = self . _sub_groups [ i ] sub_group . update ( ) self . _tag_group_dict [ "tag_groups" ] [ i ] = sub_group . as_dict ( )
Updates the dictionary of the tag group
191
8
15,254
def cmd ( send , msg , args ) : if not msg : send ( "Evaluate what?" ) return params = { 'format' : 'plaintext' , 'reinterpret' : 'true' , 'input' : msg , 'appid' : args [ 'config' ] [ 'api' ] [ 'wolframapikey' ] } req = get ( 'http://api.wolframalpha.com/v2/query' , params = params ) if req . status_code == 403 : send ( "WolframAlpha is having issues." ) return if not req . content : send ( "WolframAlpha returned an empty response." ) return xml = fromstring ( req . content ) output = xml . findall ( './pod' ) key = args [ 'config' ] [ 'api' ] [ 'bitlykey' ] url = get_short ( "http://www.wolframalpha.com/input/?i=%s" % quote ( msg ) , key ) text = "No output found." for x in output : if 'primary' in x . keys ( ) : text = x . find ( './subpod/plaintext' ) . text if text is None : send ( "No Output parsable" ) else : # Only send the first three lines of output for t in text . splitlines ( ) [ : 3 ] : send ( t ) send ( "See %s for more info" % url )
Queries WolframAlpha .
311
6
15,255
def _get_soup ( page ) : request = requests . get ( page ) data = request . text return bs4 . BeautifulSoup ( data )
Return BeautifulSoup object for given page
34
8
15,256
def search ( term , category = Categories . ALL , pages = 1 , sort = None , order = None ) : s = Search ( ) s . search ( term = term , category = category , pages = pages , sort = sort , order = order ) return s
Return a search result for term in category . Can also be sorted and span multiple pages .
55
18
15,257
def popular ( category = None , sortOption = "title" ) : s = Search ( ) s . popular ( category , sortOption ) return s
Return a search result containing torrents appearing on the KAT home page . Can be categorized . Cannot be sorted or contain multiple pages
31
26
15,258
def recent ( category = None , pages = 1 , sort = None , order = None ) : s = Search ( ) s . recent ( category , pages , sort , order ) return s
Return most recently added torrents . Can be sorted and categorized and contain multiple pages .
39
17
15,259
def print_details ( self ) : print ( "Title:" , self . title ) print ( "Category:" , self . category ) print ( "Page: " , self . page ) print ( "Size: " , self . size ) print ( "Files: " , self . files ) print ( "Age: " , self . age ) print ( "Seeds:" , self . seeders ) print ( "Leechers: " , self . leechers ) print ( "Magnet: " , self . magnet ) print ( "Download: " , self . download ) print ( "Verified:" , self . isVerified )
Print torrent details
135
3
15,260
def search ( self , term = None , category = None , pages = 1 , url = search_url , sort = None , order = None ) : if not self . current_url : self . current_url = url if self . current_url == Search . base_url : # Searching home page so no formatting results = self . _get_results ( self . current_url ) self . _add_results ( results ) else : search = self . _format_search ( term , category ) sorting = self . _format_sort ( sort , order ) # Now get the results. for i in range ( pages ) : results = self . _get_results ( search + "/" + str ( self . _current_page ) + "/" + sorting ) self . _add_results ( results ) self . _current_page += 1 self . _current_page -= 1
Search a given URL for torrent results .
188
8
15,261
def _categorize ( self , category ) : self . torrents = [ result for result in self . torrents if result . category == category ]
Remove torrents with unwanted category from self . torrents
32
11
15,262
def page ( self , i ) : # Need to clear previous results. self . torrents = list ( ) self . _current_page = i self . search ( term = self . term , category = self . category , sort = self . sort , order = self . order )
Get page i of search results
59
6
15,263
def _get_results ( self , page ) : soup = _get_soup ( page ) details = soup . find_all ( "tr" , class_ = "odd" ) even = soup . find_all ( "tr" , class_ = "even" ) # Join the results for i in range ( len ( even ) ) : details . insert ( ( i * 2 ) + 1 , even [ i ] ) return self . _parse_details ( details )
Find every div tag containing torrent details on given page then parse the results into a list of Torrents and return them
101
23
15,264
def _parse_details ( self , tag_list ) : result = list ( ) for i , item in enumerate ( tag_list ) : title = item . find ( "a" , class_ = "cellMainLink" ) title_text = title . text link = title . get ( "href" ) tds = item . find_all ( "td" , class_ = "center" ) # Better name here. size = tds [ 0 ] . text files = tds [ 1 ] . text age = tds [ 2 ] . text seed = tds [ 3 ] . text leech = tds [ 4 ] . text magnet = item . find ( "a" , class_ = "imagnet icon16" ) download = item . find ( "a" , class_ = "idownload icon16" ) isVerified = item . find ( "a" , class_ = "iverify icon16" ) != None # Home page doesn't have magnet or download links if magnet : magnet = magnet . get ( "href" ) if download : download = download . get ( "href" ) # Get category changes depending on if we're parsing # the home page or a search page. if self . current_url == self . base_url : category = self . _get_torrent_category ( item , result = i ) else : category = self . _get_torrent_category ( item ) result . append ( Torrent ( title_text , category , link , size , seed , leech , magnet , download , files , age , isVerified ) ) return result
Given a list of tags from either a search page or the KAT home page parse the details and return a list of Torrents
341
26
15,265
def init_weights ( self ) : self . W = np . random . randn ( self . n_neurons , self . n_inputs ) * np . sqrt ( 2 / self . n_inputs ) self . b = np . zeros ( ( self . n_neurons , 1 ) )
Performs He initialization
70
4
15,266
def bootstrap_executive_office_states ( self , election ) : content_type = ContentType . objects . get_for_model ( election . race . office ) for division in Division . objects . filter ( level = self . STATE_LEVEL ) : PageContent . objects . get_or_create ( content_type = content_type , object_id = election . race . office . pk , election_day = election . election_day , division = division ) # Create national presidential page type page_type , created = PageType . objects . get_or_create ( model_type = ContentType . objects . get ( app_label = 'government' , model = 'office' ) , election_day = election . election_day , division_level = self . NATIONAL_LEVEL , jurisdiction = self . FEDERAL_JURISDICTION , office = election . race . office , ) PageContent . objects . get_or_create ( content_type = ContentType . objects . get_for_model ( page_type ) , object_id = page_type . pk , election_day = election . election_day , ) # Create state results for president page type page_type , created = PageType . objects . get_or_create ( model_type = ContentType . objects . get ( app_label = 'government' , model = 'office' ) , election_day = election . election_day , division_level = self . STATE_LEVEL , jurisdiction = self . FEDERAL_JURISDICTION , office = election . race . office , ) PageContent . objects . get_or_create ( content_type = ContentType . objects . get_for_model ( page_type ) , object_id = page_type . pk , election_day = election . election_day , )
Create state page content exclusively for the U . S . president .
399
13
15,267
def average_last_builds ( connection , package , limit = 5 ) : # TODO: take branches (targets, or tags, etc) into account when estimating # a package's build time. state = build_states . COMPLETE opts = { 'limit' : limit , 'order' : '-completion_time' } builds = yield connection . listBuilds ( package , state = state , queryOpts = opts ) if not builds : defer . returnValue ( None ) durations = [ build . duration for build in builds ] average = sum ( durations , timedelta ( ) ) / len ( durations ) # print('average duration for %s is %s' % (package, average)) defer . returnValue ( average )
Find the average duration time for the last couple of builds .
162
12
15,268
def model_resources ( self ) : response = jsonify ( { 'apiVersion' : '0.1' , 'swaggerVersion' : '1.1' , 'basePath' : '%s%s' % ( self . base_uri ( ) , self . api . url_prefix ) , 'apis' : self . get_model_resources ( ) } ) response . headers . add ( 'Cache-Control' , 'max-age=0' ) return response
Listing of all supported resources .
105
7
15,269
def model_resource ( self , resource_name ) : resource = first ( [ resource for resource in self . api . _registry . values ( ) if resource . get_api_name ( ) == resource_name ] ) data = { 'apiVersion' : '0.1' , 'swaggerVersion' : '1.1' , 'basePath' : '%s%s' % ( self . base_uri ( ) , self . api . url_prefix ) , 'resourcePath' : '/meta/%s' % resource . get_api_name ( ) , 'apis' : self . get_model_apis ( resource ) , 'models' : self . get_model ( resource ) } response = jsonify ( data ) response . headers . add ( 'Cache-Control' , 'max-age=0' ) return response
Details of a specific model resource .
185
7
15,270
def _high_dim_sim ( self , v , w , normalize = False , X = None , idx = 0 ) : sim = np . exp ( ( - np . linalg . norm ( v - w ) ** 2 ) / ( 2 * self . _sigma [ idx ] ** 2 ) ) if normalize : return sim / sum ( map ( lambda x : x [ 1 ] , self . _knn ( idx , X , high_dim = True ) ) ) else : return sim
Similarity measurement based on Gaussian Distribution
111
8
15,271
def init ( confdir = "/etc/cslbot" ) : multiprocessing . set_start_method ( 'spawn' ) parser = argparse . ArgumentParser ( ) parser . add_argument ( '-d' , '--debug' , help = 'Enable debug logging.' , action = 'store_true' ) parser . add_argument ( '--validate' , help = 'Initialize the db and perform other sanity checks.' , action = 'store_true' ) args = parser . parse_args ( ) loglevel = logging . DEBUG if args . debug else logging . INFO logging . basicConfig ( level = loglevel , format = "%(asctime)s %(levelname)s:%(module)s:%(message)s" ) # We don't need a bunch of output from the requests module. logging . getLogger ( "requests" ) . setLevel ( logging . WARNING ) cslbot = IrcBot ( confdir ) if args . validate : cslbot . shutdown_mp ( ) print ( "Everything is ready to go!" ) return try : cslbot . start ( ) except KeyboardInterrupt : # KeyboardInterrupt means someone tried to ^C, so shut down the bot cslbot . disconnect ( 'Bot received a Ctrl-C' ) cslbot . shutdown_mp ( ) sys . exit ( 0 ) except Exception as ex : cslbot . shutdown_mp ( False ) logging . error ( "The bot died! %s" , ex ) output = "" . join ( traceback . format_exc ( ) ) . strip ( ) for line in output . split ( '\n' ) : logging . error ( line ) sys . exit ( 1 )
The bot s main entry point .
372
7
15,272
def get_version ( self ) : _ , version = misc . get_version ( self . confdir ) if version is None : return "Can't get the version." else : return "cslbot - %s" % version
Get the version .
49
4
15,273
def shutdown_mp ( self , clean = True ) : # The server runs on a worker thread, so we need to shut it down first. if hasattr ( self , 'server' ) : # Shutdown the server quickly. try : # For some strange reason, this throws an OSError on windows. self . server . socket . shutdown ( socket . SHUT_RDWR ) except OSError : pass self . server . socket . close ( ) self . server . shutdown ( ) if hasattr ( self , 'handler' ) : self . handler . workers . stop_workers ( clean )
Shutdown all the multiprocessing .
127
9
15,274
def handle_msg ( self , c , e ) : try : self . handler . handle_msg ( c , e ) except Exception as ex : backtrace . handle_traceback ( ex , c , self . get_target ( e ) , self . config )
Handles all messages .
56
5
15,275
def reload_handler ( self , c , e ) : cmd = self . is_reload ( e ) cmdchar = self . config [ 'core' ] [ 'cmdchar' ] if cmd is not None : # If we're in a minimal reload state, only the owner can do stuff, as we can't rely on the db working. if self . reload_event . set ( ) : admins = [ self . config [ 'auth' ] [ 'owner' ] ] else : with self . handler . db . session_scope ( ) as session : admins = [ x . nick for x in session . query ( orm . Permissions ) . all ( ) ] if e . source . nick not in admins : c . privmsg ( self . get_target ( e ) , "Nope, not gonna do it." ) return importlib . reload ( reloader ) self . reload_event . set ( ) cmdargs = cmd [ len ( '%sreload' % cmdchar ) + 1 : ] try : if reloader . do_reload ( self , e , cmdargs ) : if self . config . getboolean ( 'feature' , 'server' ) : self . server = server . init_server ( self ) self . reload_event . clear ( ) logging . info ( "Successfully reloaded" ) except Exception as ex : backtrace . handle_traceback ( ex , c , self . get_target ( e ) , self . config )
This handles reloads .
314
5
15,276
def _options_to_dict ( df ) : kolums = [ "k1" , "k2" , "value" ] d = df [ kolums ] . values . tolist ( ) dc = { } for x in d : dc . setdefault ( x [ 0 ] , { } ) dc [ x [ 0 ] ] [ x [ 1 ] ] = x [ 2 ] return dc
Make a dictionary to print .
87
6
15,277
def _get_repo ( ) : command = [ 'git' , 'rev-parse' , '--show-toplevel' ] if six . PY2 : try : return check_output ( command ) . decode ( 'utf-8' ) . strip ( ) # nosec except CalledProcessError : return '' else : return ( run ( command , stdout = PIPE , stderr = PIPE ) . stdout . decode ( 'utf-8' ) . strip ( ) )
Identify the path to the repository origin .
110
9
15,278
def _entry_must_exist ( df , k1 , k2 ) : count = df [ ( df [ 'k1' ] == k1 ) & ( df [ 'k2' ] == k2 ) ] . shape [ 0 ] if count == 0 : raise NotRegisteredError ( "Option {0}.{1} not registered" . format ( k1 , k2 ) )
Evaluate key - subkey existence .
83
9
15,279
def _entry_must_not_exist ( df , k1 , k2 ) : count = df [ ( df [ 'k1' ] == k1 ) & ( df [ 'k2' ] == k2 ) ] . shape [ 0 ] if count > 0 : raise AlreadyRegisteredError ( "Option {0}.{1} already registered" . format ( k1 , k2 ) )
Evaluate key - subkey non - existence .
85
11
15,280
def register_option ( self , key , subkey , default , _type , definition , values = None , locked = False ) : if not self . open : return key , subkey = _lower_keys ( key , subkey ) _entry_must_not_exist ( self . gc , key , subkey ) ev . value_eval ( default , _type ) values = None if values is False else values new_opt = pd . Series ( [ key , subkey , default , _type , default , locked , definition , values ] , index = self . clmn ) self . gc = self . gc . append ( new_opt , ignore_index = True )
Create a new option .
147
5
15,281
def unregister_option ( self , key , subkey ) : if not self . open : return key , subkey = _lower_keys ( key , subkey ) _entry_must_exist ( self . gc , key , subkey ) self . gc = self . gc [ ~ ( ( self . gc [ 'k1' ] == key ) & ( self . gc [ 'k2' ] == subkey ) ) ]
Removes an option from the manager .
97
8
15,282
def get_option ( self , key , subkey , in_path_none = False ) : key , subkey = _lower_keys ( key , subkey ) _entry_must_exist ( self . gc , key , subkey ) df = self . gc [ ( self . gc [ "k1" ] == key ) & ( self . gc [ "k2" ] == subkey ) ] if df [ "type" ] . values [ 0 ] == "bool" : return bool ( df [ "value" ] . values [ 0 ] ) elif df [ "type" ] . values [ 0 ] == "int" : return int ( df [ "value" ] . values [ 0 ] ) elif df [ "type" ] . values [ 0 ] == "path_in" : if df [ "value" ] . values [ 0 ] is None and not in_path_none : raise ValueError ( 'Unspecified path for {0}.{1}' . format ( key , subkey ) ) return df [ "value" ] . values [ 0 ] else : return df [ "value" ] . values [ 0 ]
Get the current value of the option .
249
8
15,283
def get_option_default ( self , key , subkey ) : key , subkey = _lower_keys ( key , subkey ) _entry_must_exist ( self . gc , key , subkey ) df = self . gc [ ( self . gc [ "k1" ] == key ) & ( self . gc [ "k2" ] == subkey ) ] if df [ "type" ] . values [ 0 ] == "bool" : return bool ( df [ "default" ] . values [ 0 ] ) elif df [ "type" ] . values [ 0 ] == "int" : return int ( df [ "default" ] . values [ 0 ] ) else : return df [ "default" ] . values [ 0 ]
Get the default value of the option .
164
8
15,284
def get_option_description ( self , key , subkey ) : key , subkey = _lower_keys ( key , subkey ) _entry_must_exist ( self . gc , key , subkey ) return self . gc [ ( self . gc [ "k1" ] == key ) & ( self . gc [ "k2" ] == subkey ) ] [ "description" ] . values [ 0 ]
Get the string describing a particular option .
94
8
15,285
def get_option_type ( self , key , subkey ) : key , subkey = _lower_keys ( key , subkey ) _entry_must_exist ( self . gc , key , subkey ) return self . gc [ ( self . gc [ "k1" ] == key ) & ( self . gc [ "k2" ] == subkey ) ] [ "type" ] . values [ 0 ]
Get the type of a particular option .
94
8
15,286
def get_option_alternatives ( self , key , subkey ) : key , subkey = _lower_keys ( key , subkey ) _entry_must_exist ( self . gc , key , subkey ) return self . gc [ ( self . gc [ "k1" ] == key ) & ( self . gc [ "k2" ] == subkey ) ] [ "values" ] . values [ 0 ]
Get list of available values for an option .
95
9
15,287
def set_option ( self , key , subkey , value ) : key , subkey = _lower_keys ( key , subkey ) _entry_must_exist ( self . gc , key , subkey ) df = self . gc [ ( self . gc [ "k1" ] == key ) & ( self . gc [ "k2" ] == subkey ) ] if df [ "locked" ] . values [ 0 ] : raise ValueError ( "{0}.{1} option is locked" . format ( key , subkey ) ) ev . value_eval ( value , df [ "type" ] . values [ 0 ] ) if not self . check_option ( key , subkey , value ) : info = "{0}.{1} accepted options are: " . format ( key , subkey ) info += "[{}]" . format ( ", " . join ( df [ "values" ] . values [ 0 ] ) ) raise ValueError ( info ) self . gc . loc [ ( self . gc [ "k1" ] == key ) & ( self . gc [ "k2" ] == subkey ) , "value" ] = value
Sets the value of an option .
255
8
15,288
def check_option ( self , key , subkey , value ) : key , subkey = _lower_keys ( key , subkey ) _entry_must_exist ( self . gc , key , subkey ) df = self . gc [ ( self . gc [ "k1" ] == key ) & ( self . gc [ "k2" ] == subkey ) ] ev . value_eval ( value , df [ "type" ] . values [ 0 ] ) if df [ "values" ] . values [ 0 ] is not None : return value in df [ "values" ] . values [ 0 ] return True
Evaluate if a given value fits the option .
137
11
15,289
def reset_option ( self , key , subkey ) : if not self . open : return key , subkey = _lower_keys ( key , subkey ) _entry_must_exist ( self . gc , key , subkey ) df = self . gc [ ( self . gc [ "k1" ] == key ) & ( self . gc [ "k2" ] == subkey ) ] if df [ "locked" ] . values [ 0 ] : raise ValueError ( "{0}.{1} option is locked" . format ( key , subkey ) ) val = df [ "default" ] . values [ 0 ] self . gc . loc [ ( self . gc [ "k1" ] == key ) & ( self . gc [ "k2" ] == subkey ) , "value" ] = val
Resets a single option to the default values .
183
10
15,290
def lock_option ( self , key , subkey ) : key , subkey = _lower_keys ( key , subkey ) _entry_must_exist ( self . gc , key , subkey ) self . gc . loc [ ( self . gc [ "k1" ] == key ) & ( self . gc [ "k2" ] == subkey ) , "locked" ] = True
Make an option unmutable .
89
6
15,291
def reset_options ( self , empty = True ) : if empty : self . gc = pd . DataFrame ( columns = self . clmn ) else : self . gc [ "value" ] = self . gc [ "default" ]
Empty ALL options .
54
4
15,292
def set_options_from_file ( self , filename , file_format = 'yaml' ) : if file_format . lower ( ) == 'yaml' : return self . set_options_from_YAML ( filename ) elif file_format . lower ( ) == 'json' : return self . set_options_from_JSON ( filename ) else : raise ValueError ( 'Unknown format {}' . format ( file_format ) )
Load options from file .
98
5
15,293
def set_options_from_dict ( self , data_dict , filename = None ) : if filename is not None : filename = os . path . dirname ( filename ) for k in data_dict : if not isinstance ( data_dict [ k ] , dict ) : raise ValueError ( "The input data has to be a dict of dict" ) for sk in data_dict [ k ] : if self . gc [ ( self . gc [ "k1" ] == k ) & ( self . gc [ "k2" ] == sk ) ] . shape [ 0 ] == 0 : continue if isinstance ( data_dict [ k ] [ sk ] , six . string_types ) : data_dict [ k ] [ sk ] = str ( data_dict [ k ] [ sk ] ) _type = self . gc [ ( self . gc [ "k1" ] == k ) & ( self . gc [ "k2" ] == sk ) ] [ [ "type" ] ] . values [ 0 ] data_dict [ k ] [ sk ] = ev . cast ( data_dict [ k ] [ sk ] , _type ) if self . get_option ( k , sk , True ) != data_dict [ k ] [ sk ] : try : self . set_option ( k , sk , data_dict [ k ] [ sk ] ) # Provided paths do not work: try add them relative # to the config file except IOError : if filename is None : raise IOError ( 'Error path: {0}.{1}' . format ( k , sk ) ) npat = os . path . join ( filename , data_dict [ k ] [ sk ] ) self . set_option ( k , sk , os . path . normpath ( npat ) ) except ValueError : pass
Load options from a dictionary .
392
6
15,294
def write_options_to_file ( self , filename , file_format = 'yaml' ) : if file_format . lower ( ) == 'yaml' : self . write_options_to_YAML ( filename ) elif file_format . lower ( ) == 'json' : self . write_options_to_JSON ( filename ) else : raise ValueError ( 'Unknown format {}' . format ( file_format ) )
Write options to file .
96
5
15,295
def write_options_to_YAML ( self , filename ) : fd = open ( filename , "w" ) yaml . dump ( _options_to_dict ( self . gc ) , fd , default_flow_style = False ) fd . close ( )
Writes the options in YAML format to a file .
62
13
15,296
def write_options_to_JSON ( self , filename ) : fd = open ( filename , "w" ) fd . write ( json . dumps ( _options_to_dict ( self . gc ) , indent = 2 , separators = ( ',' , ': ' ) ) ) fd . close ( )
Writes the options in JSON format to a file .
70
11
15,297
def document_options ( self ) : k1 = max ( [ len ( _ ) for _ in self . gc [ 'k1' ] ] ) + 4 k1 = max ( [ k1 , len ( 'Option Class' ) ] ) k2 = max ( [ len ( _ ) for _ in self . gc [ 'k2' ] ] ) + 4 k2 = max ( [ k2 , len ( 'Option ID' ) ] ) separators = " " . join ( [ "" . join ( [ "=" , ] * k1 ) , "" . join ( [ "=" , ] * k2 ) , "" . join ( [ "=" , ] * 11 ) ] ) line = ( "{0:>" + str ( k1 ) + "} {1:>" + str ( k2 ) + "} {2}" ) data = [ ] data . append ( separators ) data . append ( line . format ( 'Option Class' , 'Option ID' , 'Description' ) ) data . append ( separators ) for _ , row in self . gc . iterrows ( ) : data . append ( line . format ( "**" + row [ 'k1' ] + "**" , "**" + row [ 'k2' ] + "**" , row [ 'description' ] ) ) data . append ( separators ) return "\n" . join ( data )
Generates a docstring table to add to the library documentation .
302
13
15,298
def get_local_config_file ( cls , filename ) : if os . path . isfile ( filename ) : # Local has priority return filename else : try : # Project. If not in a git repo, this will not exist. config_repo = _get_repo ( ) if len ( config_repo ) == 0 : raise Exception ( ) config_repo = os . path . join ( config_repo , filename ) if os . path . isfile ( config_repo ) : return config_repo else : raise Exception ( ) except Exception : home = os . getenv ( "HOME" , os . path . expanduser ( "~" ) ) config_home = os . path . join ( home , filename ) if os . path . isfile ( config_home ) : return config_home return None
Find local file to setup default values .
180
8
15,299
def cmd ( send , msg , args ) : parser = arguments . ArgParser ( args [ 'config' ] ) parser . add_argument ( 'section' , nargs = '?' ) parser . add_argument ( 'command' ) try : cmdargs = parser . parse_args ( msg ) except arguments . ArgumentException as e : send ( str ( e ) ) return if cmdargs . section : html = get ( 'http://linux.die.net/man/%s/%s' % ( cmdargs . section , cmdargs . command ) ) short = fromstring ( html . text ) . find ( './/meta[@name="description"]' ) if short is not None : short = short . get ( 'content' ) send ( "%s -- http://linux.die.net/man/%s/%s" % ( short , cmdargs . section , cmdargs . command ) ) else : send ( "No manual entry for %s in section %s" % ( cmdargs . command , cmdargs . section ) ) else : for section in range ( 0 , 8 ) : html = get ( 'http://linux.die.net/man/%d/%s' % ( section , cmdargs . command ) ) if html . status_code == 200 : short = fromstring ( html . text ) . find ( './/meta[@name="description"]' ) if short is not None : short = short . get ( 'content' ) send ( "%s -- http://linux.die.net/man/%d/%s" % ( short , section , cmdargs . command ) ) return send ( "No manual entry for %s" % cmdargs . command )
Gets a man page .
364
6