idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
246,900
def _process_registry ( registry , call_func ) : from django . core . exceptions import ImproperlyConfigured from django . apps import apps for key , value in list ( registry . items ( ) ) : model = apps . get_model ( * key . split ( '.' ) ) if model is None : raise ImproperlyConfigured ( _ ( '%(key)s is not a model' ) % { 'key' : key } ) if isinstance ( value , ( tuple , list ) ) : for item in value : if isinstance ( item , str ) : call_func ( model , item ) elif isinstance ( item , dict ) : field_name = item . pop ( 'name' ) call_func ( model , field_name , extra_params = item ) else : raise ImproperlyConfigured ( _ ( "%(settings)s doesn't recognize the value of %(key)s" ) % { 'settings' : 'CATEGORY_SETTINGS' , 'key' : key } ) elif isinstance ( value , str ) : call_func ( model , value ) elif isinstance ( value , dict ) : field_name = value . pop ( 'name' ) call_func ( model , field_name , extra_params = value ) else : raise ImproperlyConfigured ( _ ( "%(settings)s doesn't recognize the value of %(key)s" ) % { 'settings' : 'CATEGORY_SETTINGS' , 'key' : key } )
Given a dictionary and a registration function process the registry
335
10
246,901
def field_exists ( app_name , model_name , field_name ) : model = apps . get_model ( app_name , model_name ) table_name = model . _meta . db_table cursor = connection . cursor ( ) field_info = connection . introspection . get_table_description ( cursor , table_name ) field_names = [ f . name for f in field_info ] return field_name in field_names
Does the FK or M2M table exist in the database already?
98
15
246,902
def drop_field ( app_name , model_name , field_name ) : app_config = apps . get_app_config ( app_name ) model = app_config . get_model ( model_name ) field = model . _meta . get_field ( field_name ) with connection . schema_editor ( ) as schema_editor : schema_editor . remove_field ( model , field )
Drop the given field from the app s model
88
9
246,903
def migrate_app ( sender , * args , * * kwargs ) : from . registration import registry if 'app_config' not in kwargs : return app_config = kwargs [ 'app_config' ] app_name = app_config . label fields = [ fld for fld in list ( registry . _field_registry . keys ( ) ) if fld . startswith ( app_name ) ] sid = transaction . savepoint ( ) for fld in fields : model_name , field_name = fld . split ( '.' ) [ 1 : ] if field_exists ( app_name , model_name , field_name ) : continue model = app_config . get_model ( model_name ) try : with connection . schema_editor ( ) as schema_editor : schema_editor . add_field ( model , registry . _field_registry [ fld ] ) if sid : transaction . savepoint_commit ( sid ) except ProgrammingError : if sid : transaction . savepoint_rollback ( sid ) continue
Migrate all models of this app registered
229
8
246,904
def get_absolute_url ( self ) : from django . urls import NoReverseMatch if self . alternate_url : return self . alternate_url try : prefix = reverse ( 'categories_tree_list' ) except NoReverseMatch : prefix = '/' ancestors = list ( self . get_ancestors ( ) ) + [ self , ] return prefix + '/' . join ( [ force_text ( i . slug ) for i in ancestors ] ) + '/'
Return a path
106
3
246,905
def get_content_type ( self , content_type ) : qs = self . get_queryset ( ) return qs . filter ( content_type__name = content_type )
Get all the items of the given content type related to this item .
42
14
246,906
def get_relation_type ( self , relation_type ) : qs = self . get_queryset ( ) return qs . filter ( relation_type = relation_type )
Get all the items of the given relationship type related to this item .
40
14
246,907
def handle_class_prepared ( sender , * * kwargs ) : from . settings import M2M_REGISTRY , FK_REGISTRY from . registration import registry sender_app = sender . _meta . app_label sender_name = sender . _meta . model_name for key , val in list ( FK_REGISTRY . items ( ) ) : app_name , model_name = key . split ( '.' ) if app_name == sender_app and sender_name == model_name : registry . register_model ( app_name , sender , 'ForeignKey' , val ) for key , val in list ( M2M_REGISTRY . items ( ) ) : app_name , model_name = key . split ( '.' ) if app_name == sender_app and sender_name == model_name : registry . register_model ( app_name , sender , 'ManyToManyField' , val )
See if this class needs registering of fields
207
8
246,908
def get_queryset ( self , request ) : qs = self . model . _default_manager . get_queryset ( ) qs . __class__ = TreeEditorQuerySet return qs
Returns a QuerySet of all model instances that can be edited by the admin site . This is used by changelist_view .
45
26
246,909
def deactivate ( self , request , queryset ) : selected_cats = self . model . objects . filter ( pk__in = [ int ( x ) for x in request . POST . getlist ( '_selected_action' ) ] ) for item in selected_cats : if item . active : item . active = False item . save ( ) item . children . all ( ) . update ( active = False )
Set active to False for selected items
90
7
246,910
def get_indent ( self , string ) : indent_amt = 0 if string [ 0 ] == '\t' : return '\t' for char in string : if char == ' ' : indent_amt += 1 else : return ' ' * indent_amt
Look through the string and count the spaces
60
8
246,911
def make_category ( self , string , parent = None , order = 1 ) : cat = Category ( name = string . strip ( ) , slug = slugify ( SLUG_TRANSLITERATOR ( string . strip ( ) ) ) [ : 49 ] , # arent=parent, order = order ) cat . _tree_manager . insert_node ( cat , parent , 'last-child' , True ) cat . save ( ) if parent : parent . rght = cat . rght + 1 parent . save ( ) return cat
Make and save a category object from a string
118
9
246,912
def parse_lines ( self , lines ) : indent = '' level = 0 if lines [ 0 ] [ 0 ] == ' ' or lines [ 0 ] [ 0 ] == '\t' : raise CommandError ( "The first line in the file cannot start with a space or tab." ) # This keeps track of the current parents at a given level current_parents = { 0 : None } for line in lines : if len ( line ) == 0 : continue if line [ 0 ] == ' ' or line [ 0 ] == '\t' : if indent == '' : indent = self . get_indent ( line ) elif not line [ 0 ] in indent : raise CommandError ( "You can't mix spaces and tabs for indents" ) level = line . count ( indent ) current_parents [ level ] = self . make_category ( line , parent = current_parents [ level - 1 ] ) else : # We are back to a zero level, so reset the whole thing current_parents = { 0 : self . make_category ( line ) } current_parents [ 0 ] . _tree_manager . rebuild ( )
Do the work of parsing each line
239
7
246,913
def handle ( self , * file_paths , * * options ) : import os for file_path in file_paths : if not os . path . isfile ( file_path ) : print ( "File %s not found." % file_path ) continue f = open ( file_path , 'r' ) data = f . readlines ( ) f . close ( ) self . parse_lines ( data )
Handle the basic import
90
4
246,914
def get_cat_model ( model ) : try : if isinstance ( model , string_types ) : model_class = apps . get_model ( * model . split ( "." ) ) elif issubclass ( model , CategoryBase ) : model_class = model if model_class is None : raise TypeError except TypeError : raise TemplateSyntaxError ( "Unknown model submitted: %s" % model ) return model_class
Return a class from a string or class
94
8
246,915
def get_category ( category_string , model = Category ) : model_class = get_cat_model ( model ) category = str ( category_string ) . strip ( "'\"" ) category = category . strip ( '/' ) cat_list = category . split ( '/' ) if len ( cat_list ) == 0 : return None try : categories = model_class . objects . filter ( name = cat_list [ - 1 ] , level = len ( cat_list ) - 1 ) if len ( cat_list ) == 1 and len ( categories ) > 1 : return None # If there is only one, use it. If there is more than one, check # if the parent matches the parent passed in the string if len ( categories ) == 1 : return categories [ 0 ] else : for item in categories : if item . parent . name == cat_list [ - 2 ] : return item except model_class . DoesNotExist : return None
Convert a string including a path and return the Category object
203
12
246,916
def get_category_drilldown ( parser , token ) : bits = token . split_contents ( ) error_str = '%(tagname)s tag should be in the format {%% %(tagname)s ' '"category name" [using "app.Model"] as varname %%} or ' '{%% %(tagname)s category_obj as varname %%}.' if len ( bits ) == 4 : if bits [ 2 ] != 'as' : raise template . TemplateSyntaxError ( error_str % { 'tagname' : bits [ 0 ] } ) if bits [ 2 ] == 'as' : varname = bits [ 3 ] . strip ( "'\"" ) model = "categories.category" if len ( bits ) == 6 : if bits [ 2 ] not in ( 'using' , 'as' ) or bits [ 4 ] not in ( 'using' , 'as' ) : raise template . TemplateSyntaxError ( error_str % { 'tagname' : bits [ 0 ] } ) if bits [ 2 ] == 'as' : varname = bits [ 3 ] . strip ( "'\"" ) model = bits [ 5 ] . strip ( "'\"" ) if bits [ 2 ] == 'using' : varname = bits [ 5 ] . strip ( "'\"" ) model = bits [ 3 ] . strip ( "'\"" ) category = FilterExpression ( bits [ 1 ] , parser ) return CategoryDrillDownNode ( category , varname , model )
Retrieves the specified category its ancestors and its immediate children as an iterable .
327
17
246,917
def get_top_level_categories ( parser , token ) : bits = token . split_contents ( ) usage = 'Usage: {%% %s [using "app.Model"] as <variable> %%}' % bits [ 0 ] if len ( bits ) == 3 : if bits [ 1 ] != 'as' : raise template . TemplateSyntaxError ( usage ) varname = bits [ 2 ] model = "categories.category" elif len ( bits ) == 5 : if bits [ 1 ] not in ( 'as' , 'using' ) and bits [ 3 ] not in ( 'as' , 'using' ) : raise template . TemplateSyntaxError ( usage ) if bits [ 1 ] == 'using' : model = bits [ 2 ] . strip ( "'\"" ) varname = bits [ 4 ] . strip ( "'\"" ) else : model = bits [ 4 ] . strip ( "'\"" ) varname = bits [ 2 ] . strip ( "'\"" ) return TopLevelCategoriesNode ( varname , model )
Retrieves an alphabetical list of all the categories that have no parents .
226
16
246,918
def tree_queryset ( value ) : from django . db . models . query import QuerySet from copy import deepcopy if not isinstance ( value , QuerySet ) : return value qs = value qs2 = deepcopy ( qs ) # Reaching into the bowels of query sets to find out whether the qs is # actually filtered and we need to do the INCLUDE_ANCESTORS dance at all. # INCLUDE_ANCESTORS is quite expensive, so don't do it if not needed. is_filtered = bool ( qs . query . where . children ) if is_filtered : include_pages = set ( ) # Order by 'rght' will return the tree deepest nodes first; # this cuts down the number of queries considerably since all ancestors # will already be in include_pages when they are checked, thus not # trigger additional queries. for p in qs2 . order_by ( 'rght' ) . iterator ( ) : if p . parent_id and p . parent_id not in include_pages and p . id not in include_pages : ancestor_id_list = p . get_ancestors ( ) . values_list ( 'id' , flat = True ) include_pages . update ( ancestor_id_list ) if include_pages : qs = qs | qs . model . _default_manager . filter ( id__in = include_pages ) qs = qs . distinct ( ) return qs
Converts a normal queryset from an MPTT model to include all the ancestors so a filtered subset of items can be formatted correctly
324
27
246,919
def convolve ( data , h , res_g = None , sub_blocks = None ) : if not len ( data . shape ) in [ 1 , 2 , 3 ] : raise ValueError ( "dim = %s not supported" % ( len ( data . shape ) ) ) if len ( data . shape ) != len ( h . shape ) : raise ValueError ( "dimemnsion of data (%s) and h (%s) are different" % ( len ( data . shape ) , len ( h . shape ) ) ) if isinstance ( data , OCLArray ) and isinstance ( h , OCLArray ) : return _convolve_buf ( data , h , res_g ) elif isinstance ( data , np . ndarray ) and isinstance ( h , np . ndarray ) : if sub_blocks == ( 1 , ) * len ( data . shape ) or sub_blocks is None : return _convolve_np ( data , h ) else : # cut the image into tile and operate on every of them N_sub = [ int ( np . ceil ( 1. * n / s ) ) for n , s in zip ( data . shape , sub_blocks ) ] Npads = [ int ( s / 2 ) for s in h . shape ] res = np . empty ( data . shape , np . float32 ) for data_tile , data_s_src , data_s_dest in tile_iterator ( data , blocksize = N_sub , padsize = Npads , mode = "constant" ) : res_tile = _convolve_np ( data_tile . copy ( ) , h ) res [ data_s_src ] = res_tile [ data_s_dest ] return res else : raise TypeError ( "unknown types (%s, %s)" % ( type ( data ) , type ( h ) ) )
convolves 1d - 3d data with kernel h
407
11
246,920
def _convolve3_old ( data , h , dev = None ) : if dev is None : dev = get_device ( ) if dev is None : raise ValueError ( "no OpenCLDevice found..." ) dtype = data . dtype . type dtypes_options = { np . float32 : "" , np . uint16 : "-D SHORTTYPE" } if not dtype in dtypes_options : raise TypeError ( "data type %s not supported yet, please convert to:" % dtype , list ( dtypes_options . keys ( ) ) ) prog = OCLProgram ( abspath ( "kernels/convolve3.cl" ) , build_options = dtypes_options [ dtype ] ) hbuf = OCLArray . from_array ( h . astype ( np . float32 ) ) img = OCLImage . from_array ( data ) res = OCLArray . empty ( data . shape , dtype = np . float32 ) Ns = [ np . int32 ( n ) for n in data . shape + h . shape ] prog . run_kernel ( "convolve3d" , img . shape , None , img , hbuf . data , res . data , * Ns ) return res . get ( )
convolves 3d data with kernel h on the GPU Device dev boundary conditions are clamping to edge . h is converted to float32
273
27
246,921
def _scale_shape ( dshape , scale = ( 1 , 1 , 1 ) ) : nshape = np . round ( np . array ( dshape ) * np . array ( scale ) ) return tuple ( nshape . astype ( np . int ) )
returns the shape after scaling ( should be the same as ndimage . zoom
56
17
246,922
def fftshift ( arr_obj , axes = None , res_g = None , return_buffer = False ) : if axes is None : axes = list ( range ( arr_obj . ndim ) ) if isinstance ( arr_obj , OCLArray ) : if not arr_obj . dtype . type in DTYPE_KERNEL_NAMES : raise NotImplementedError ( "only works for float32 or complex64" ) elif isinstance ( arr_obj , np . ndarray ) : if np . iscomplexobj ( arr_obj ) : arr_obj = OCLArray . from_array ( arr_obj . astype ( np . complex64 , copy = False ) ) else : arr_obj = OCLArray . from_array ( arr_obj . astype ( np . float32 , copy = False ) ) else : raise ValueError ( "unknown type (%s)" % ( type ( arr_obj ) ) ) if not np . all ( [ arr_obj . shape [ a ] % 2 == 0 for a in axes ] ) : raise NotImplementedError ( "only works on axes of even dimensions" ) if res_g is None : res_g = OCLArray . empty_like ( arr_obj ) # iterate over all axes # FIXME: this is still rather inefficient in_g = arr_obj for ax in axes : _fftshift_single ( in_g , res_g , ax ) in_g = res_g if return_buffer : return res_g else : return res_g . get ( )
gpu version of fftshift for numpy arrays or OCLArrays
343
15
246,923
def _fftshift_single ( d_g , res_g , ax = 0 ) : dtype_kernel_name = { np . float32 : "fftshift_1_f" , np . complex64 : "fftshift_1_c" } N = d_g . shape [ ax ] N1 = 1 if ax == 0 else np . prod ( d_g . shape [ : ax ] ) N2 = 1 if ax == len ( d_g . shape ) - 1 else np . prod ( d_g . shape [ ax + 1 : ] ) dtype = d_g . dtype . type prog = OCLProgram ( abspath ( "kernels/fftshift.cl" ) ) prog . run_kernel ( dtype_kernel_name [ dtype ] , ( N2 , N // 2 , N1 ) , None , d_g . data , res_g . data , np . int32 ( N ) , np . int32 ( N2 ) ) return res_g
basic fftshift of an OCLArray
222
9
246,924
def fft_convolve ( data , h , res_g = None , plan = None , inplace = False , kernel_is_fft = False , kernel_is_fftshifted = False ) : if isinstance ( data , np . ndarray ) : return _fft_convolve_numpy ( data , h , plan = plan , kernel_is_fft = kernel_is_fft , kernel_is_fftshifted = kernel_is_fftshifted ) elif isinstance ( data , OCLArray ) : return _fft_convolve_gpu ( data , h , res_g = res_g , plan = plan , inplace = inplace , kernel_is_fft = kernel_is_fft ) else : raise TypeError ( "array argument (1) has bad type: %s" % type ( data ) )
convolves data with kernel h via FFTs
194
10
246,925
def _fft_convolve_numpy ( data , h , plan = None , kernel_is_fft = False , kernel_is_fftshifted = False ) : if data . shape != h . shape : raise ValueError ( "data and kernel must have same size! %s vs %s " % ( str ( data . shape ) , str ( h . shape ) ) ) data_g = OCLArray . from_array ( data . astype ( np . complex64 ) ) if not kernel_is_fftshifted : h = np . fft . fftshift ( h ) h_g = OCLArray . from_array ( h . astype ( np . complex64 ) ) res_g = OCLArray . empty_like ( data_g ) _fft_convolve_gpu ( data_g , h_g , res_g = res_g , plan = plan , kernel_is_fft = kernel_is_fft ) res = abs ( res_g . get ( ) ) del data_g del h_g del res_g return res
convolving via opencl fft for numpy arrays
240
12
246,926
def _fft_convolve_gpu ( data_g , h_g , res_g = None , plan = None , inplace = False , kernel_is_fft = False ) : assert_bufs_type ( np . complex64 , data_g , h_g ) if data_g . shape != h_g . shape : raise ValueError ( "data and kernel must have same size! %s vs %s " % ( str ( data_g . shape ) , str ( h_g . shape ) ) ) if plan is None : plan = fft_plan ( data_g . shape ) if inplace : res_g = data_g else : if res_g is None : res_g = OCLArray . empty ( data_g . shape , data_g . dtype ) res_g . copy_buffer ( data_g ) if not kernel_is_fft : kern_g = OCLArray . empty ( h_g . shape , h_g . dtype ) kern_g . copy_buffer ( h_g ) fft ( kern_g , inplace = True , plan = plan ) else : kern_g = h_g fft ( res_g , inplace = True , plan = plan ) #multiply in fourier domain _complex_multiply_kernel ( res_g , kern_g ) fft ( res_g , inplace = True , inverse = True , plan = plan ) return res_g
fft convolve for gpu buffer
328
8
246,927
def median_filter ( data , size = 3 , cval = 0 , res_g = None , sub_blocks = None ) : if data . ndim == 2 : _filt = make_filter ( _median_filter_gpu_2d ( ) ) elif data . ndim == 3 : _filt = make_filter ( _median_filter_gpu_3d ( ) ) else : raise ValueError ( "currently only 2 or 3 dimensional data is supported" ) return _filt ( data = data , size = size , cval = cval , res_g = res_g , sub_blocks = sub_blocks )
median filter of given size
141
6
246,928
def rotate ( data , axis = ( 1. , 0 , 0 ) , angle = 0. , center = None , mode = "constant" , interpolation = "linear" ) : if center is None : center = tuple ( [ s // 2 for s in data . shape ] ) cx , cy , cz = center m = np . dot ( mat4_translate ( cx , cy , cz ) , np . dot ( mat4_rotate ( angle , * axis ) , mat4_translate ( - cx , - cy , - cz ) ) ) m = np . linalg . inv ( m ) return affine ( data , m , mode = mode , interpolation = interpolation )
rotates data around axis by a given angle
150
9
246,929
def map_coordinates ( data , coordinates , interpolation = "linear" , mode = 'constant' ) : if not ( isinstance ( data , np . ndarray ) and data . ndim in ( 2 , 3 ) ) : raise ValueError ( "input data has to be a 2d or 3d array!" ) coordinates = np . asarray ( coordinates , np . int32 ) if not ( coordinates . shape [ 0 ] == data . ndim ) : raise ValueError ( "coordinate has to be of shape (data.ndim,m) " ) interpolation_defines = { "linear" : [ "-D" , "SAMPLER_FILTER=CLK_FILTER_LINEAR" ] , "nearest" : [ "-D" , "SAMPLER_FILTER=CLK_FILTER_NEAREST" ] } mode_defines = { "constant" : [ "-D" , "SAMPLER_ADDRESS=CLK_ADDRESS_CLAMP" ] , "wrap" : [ "-D" , "SAMPLER_ADDRESS=CLK_ADDRESS_REPEAT" ] , "edge" : [ "-D" , "SAMPLER_ADDRESS=CLK_ADDRESS_CLAMP_TO_EDGE" ] } if not interpolation in interpolation_defines : raise KeyError ( "interpolation = '%s' not defined ,valid: %s" % ( interpolation , list ( interpolation_defines . keys ( ) ) ) ) if not mode in mode_defines : raise KeyError ( "mode = '%s' not defined ,valid: %s" % ( mode , list ( mode_defines . keys ( ) ) ) ) if not data . dtype . type in cl_buffer_datatype_dict : raise KeyError ( "dtype %s not supported yet (%s)" % ( data . dtype . type , tuple ( cl_buffer_datatype_dict . keys ( ) ) ) ) dtype_defines = [ "-D" , "DTYPE=%s" % cl_buffer_datatype_dict [ data . dtype . type ] ] d_im = OCLImage . from_array ( data ) coordinates_g = OCLArray . from_array ( coordinates . astype ( np . float32 , copy = False ) ) res_g = OCLArray . empty ( coordinates . shape [ 1 ] , data . dtype ) prog = OCLProgram ( abspath ( "kernels/map_coordinates.cl" ) , build_options = interpolation_defines [ interpolation ] + mode_defines [ mode ] + dtype_defines ) kernel = "map_coordinates{ndim}" . format ( ndim = data . ndim ) prog . run_kernel ( kernel , ( coordinates . shape [ - 1 ] , ) , None , d_im , res_g . data , coordinates_g . data ) return res_g . get ( )
Map data to new coordinates by interpolation . The array of coordinates is used to find for each point in the output the corresponding coordinates in the input .
665
30
246,930
def pad_to_shape ( d , dshape , mode = "constant" ) : if d . shape == dshape : return d diff = np . array ( dshape ) - np . array ( d . shape ) #first shrink slices = tuple ( slice ( - x // 2 , x // 2 ) if x < 0 else slice ( None , None ) for x in diff ) res = d [ slices ] #then pad # return np.pad(res,[(n/2,n-n/2) if n>0 else (0,0) for n in diff],mode=mode) return np . pad ( res , [ ( int ( np . ceil ( d / 2. ) ) , d - int ( np . ceil ( d / 2. ) ) ) if d > 0 else ( 0 , 0 ) for d in diff ] , mode = mode )
pad array d to shape dshape
188
7
246,931
def pad_to_power2 ( data , axis = None , mode = "constant" ) : if axis is None : axis = list ( range ( data . ndim ) ) if np . all ( [ _is_power2 ( n ) for i , n in enumerate ( data . shape ) if i in axis ] ) : return data else : return pad_to_shape ( data , [ ( _next_power_of_2 ( n ) if i in axis else n ) for i , n in enumerate ( data . shape ) ] , mode )
pad data to a shape of power 2 if axis == None all axis are padded
121
16
246,932
def max_filter ( data , size = 7 , res_g = None , sub_blocks = ( 1 , 1 , 1 ) ) : if data . ndim == 2 : _filt = make_filter ( _generic_filter_gpu_2d ( FUNC = "(val>res?val:res)" , DEFAULT = "-INFINITY" ) ) elif data . ndim == 3 : _filt = make_filter ( _generic_filter_gpu_3d ( FUNC = "(val>res?val:res)" , DEFAULT = "-INFINITY" ) ) return _filt ( data = data , size = size , res_g = res_g , sub_blocks = sub_blocks )
maximum filter of given size
158
5
246,933
def min_filter ( data , size = 7 , res_g = None , sub_blocks = ( 1 , 1 , 1 ) ) : if data . ndim == 2 : _filt = make_filter ( _generic_filter_gpu_2d ( FUNC = "(val<res?val:res)" , DEFAULT = "INFINITY" ) ) elif data . ndim == 3 : _filt = make_filter ( _generic_filter_gpu_3d ( FUNC = "(val<res?val:res)" , DEFAULT = "INFINITY" ) ) else : raise ValueError ( "currently only 2 or 3 dimensional data is supported" ) return _filt ( data = data , size = size , res_g = res_g , sub_blocks = sub_blocks )
minimum filter of given size
176
5
246,934
def uniform_filter ( data , size = 7 , res_g = None , sub_blocks = ( 1 , 1 , 1 ) , normalized = True ) : if normalized : if np . isscalar ( size ) : norm = size else : norm = np . int32 ( np . prod ( size ) ) ** ( 1. / len ( size ) ) FUNC = "res+val/%s" % norm else : FUNC = "res+val" if data . ndim == 2 : _filt = make_filter ( _generic_filter_gpu_2d ( FUNC = FUNC , DEFAULT = "0" ) ) elif data . ndim == 3 : _filt = make_filter ( _generic_filter_gpu_3d ( FUNC = FUNC , DEFAULT = "0" ) ) res = _filt ( data = data , size = size , res_g = res_g , sub_blocks = sub_blocks ) return res
mean filter of given size
212
5
246,935
def _gauss_filter ( data , sigma = 4 , res_g = None , sub_blocks = ( 1 , 1 , 1 ) ) : truncate = 4. radius = tuple ( int ( truncate * s + 0.5 ) for s in sigma ) size = tuple ( 2 * r + 1 for r in radius ) s = sigma [ 0 ] if data . ndim == 2 : _filt = make_filter ( _generic_filter_gpu_2d ( FUNC = "res+(val*native_exp((float)(-(ht-%s)*(ht-%s)/2/%s/%s)))" % ( size [ 0 ] // 2 , size [ 0 ] // 2 , s , s ) , DEFAULT = "0.f" ) ) elif data . ndim == 3 : _filt = make_filter ( _generic_filter_gpu_3d ( FUNC = "res+(val*native_exp((float)(-(ht-%s)*(ht-%s)/2/%s/%s)))" % ( size [ 0 ] // 2 , size [ 0 ] // 2 , s , s ) , DEFAULT = "0.f" ) ) else : raise ValueError ( "currently only 2 or 3 dimensional data is supported" ) return _filt ( data = data , size = size , res_g = res_g , sub_blocks = sub_blocks )
gaussian filter of given size
312
6
246,936
def _separable_series2 ( h , N = 1 ) : if min ( h . shape ) < N : raise ValueError ( "smallest dimension of h is smaller than approximation order! (%s < %s)" % ( min ( h . shape ) , N ) ) U , S , V = linalg . svd ( h ) hx = [ - U [ : , n ] * np . sqrt ( S [ n ] ) for n in range ( N ) ] hy = [ - V [ n , : ] * np . sqrt ( S [ n ] ) for n in range ( N ) ] return np . array ( list ( zip ( hx , hy ) ) )
finds separable approximations to the 2d function 2d h
149
15
246,937
def _separable_approx2 ( h , N = 1 ) : return np . cumsum ( [ np . outer ( fy , fx ) for fy , fx in _separable_series2 ( h , N ) ] , 0 )
returns the N first approximations to the 2d function h whose sum should be h
56
19
246,938
def _separable_approx3 ( h , N = 1 ) : return np . cumsum ( [ np . einsum ( "i,j,k" , fz , fy , fx ) for fz , fy , fx in _separable_series3 ( h , N ) ] , 0 )
returns the N first approximations to the 3d function h
72
14
246,939
def separable_approx ( h , N = 1 ) : if h . ndim == 2 : return _separable_approx2 ( h , N ) elif h . ndim == 3 : return _separable_approx3 ( h , N ) else : raise ValueError ( "unsupported array dimension: %s (only 2d or 3d) " % h . ndim )
finds the k - th rank approximation to h where k = 1 .. N
86
16
246,940
def tables ( self ) : _tables = set ( ) for attr in six . itervalues ( self . __dict__ ) : if isinstance ( attr , list ) : for item in attr : if isinstance ( item , Node ) : _tables |= item . tables ( ) elif isinstance ( attr , Node ) : _tables |= attr . tables ( ) return _tables
Generic method that does a depth - first search on the node attributes .
92
14
246,941
def fix_identities ( self , uniq = None ) : if not hasattr ( self , 'children' ) : return self uniq = list ( set ( self . flat ( ) ) ) if uniq is None else uniq for i , child in enumerate ( self . children ) : if not hasattr ( child , 'children' ) : assert child in uniq self . children [ i ] = uniq [ uniq . index ( child ) ] else : child . fix_identities ( uniq )
Make pattern - tree tips point to same object if they are equal .
110
14
246,942
def find_version ( fname ) : version = "" with open ( fname , "r" ) as fp : reg = re . compile ( r'__version__ = [\'"]([^\'"]*)[\'"]' ) for line in fp : m = reg . match ( line ) if m : version = m . group ( 1 ) break if not version : raise RuntimeError ( "Cannot find version information" ) return version
Attempts to find the version number in the file names fname . Raises RuntimeError if not found .
95
21
246,943
def format_context ( context : Context , formatter : typing . Union [ str , Formatter ] = "full" ) -> str : if not context : return "" if callable ( formatter ) : formatter_func = formatter else : if formatter in CONTEXT_FORMATTERS : formatter_func = CONTEXT_FORMATTERS [ formatter ] else : raise ValueError ( f'Invalid context format: "{formatter}"' ) return formatter_func ( context )
Output the a context dictionary as a string .
104
9
246,944
def make_banner ( text : typing . Optional [ str ] = None , context : typing . Optional [ Context ] = None , banner_template : typing . Optional [ str ] = None , context_format : ContextFormat = "full" , ) -> str : banner_text = text or speak ( ) banner_template = banner_template or BANNER_TEMPLATE ctx = format_context ( context or { } , formatter = context_format ) out = banner_template . format ( version = sys . version , text = banner_text , context = ctx ) return out
Generates a full banner with version info the given text and a formatted list of context variables .
127
19
246,945
def config ( config_dict : typing . Mapping ) -> Config : logger . debug ( f"Updating with {config_dict}" ) _cfg . update ( config_dict ) return _cfg
Configures the konch shell . This function should be called in a . konchrc file .
42
22
246,946
def named_config ( name : str , config_dict : typing . Mapping ) -> None : names = ( name if isinstance ( name , Iterable ) and not isinstance ( name , ( str , bytes ) ) else [ name ] ) for each in names : _config_registry [ each ] = Config ( * * config_dict )
Adds a named config to the config registry . The first argument may either be a string or a collection of strings .
74
23
246,947
def __ensure_directory_in_path ( filename : Path ) -> None : directory = Path ( filename ) . parent . resolve ( ) if directory not in sys . path : logger . debug ( f"Adding {directory} to sys.path" ) sys . path . insert ( 0 , str ( directory ) )
Ensures that a file s directory is in the Python path .
67
14
246,948
def use_file ( filename : typing . Union [ Path , str , None ] , trust : bool = False ) -> typing . Union [ types . ModuleType , None ] : config_file = filename or resolve_path ( CONFIG_FILE ) def preview_unauthorized ( ) -> None : if not config_file : return None print ( SEPARATOR , file = sys . stderr ) with Path ( config_file ) . open ( "r" , encoding = "utf-8" ) as fp : for line in fp : print ( line , end = "" , file = sys . stderr ) print ( SEPARATOR , file = sys . stderr ) if config_file and not Path ( config_file ) . exists ( ) : print_error ( f'"{filename}" not found.' ) sys . exit ( 1 ) if config_file and Path ( config_file ) . exists ( ) : if not trust : with AuthFile . load ( ) as authfile : try : authfile . check ( Path ( config_file ) ) except KonchrcChangedError : print_error ( f'"{config_file}" has changed since you last used it.' ) preview_unauthorized ( ) if confirm ( "Would you like to authorize it?" ) : authfile . allow ( Path ( config_file ) ) print ( ) else : sys . exit ( 1 ) except KonchrcNotAuthorizedError : print_error ( f'"{config_file}" is blocked.' ) preview_unauthorized ( ) if confirm ( "Would you like to authorize it?" ) : authfile . allow ( Path ( config_file ) ) print ( ) else : sys . exit ( 1 ) logger . info ( f"Using {config_file}" ) # Ensure that relative imports are possible __ensure_directory_in_path ( Path ( config_file ) ) mod = None try : mod = imp . load_source ( "konchrc" , str ( config_file ) ) except UnboundLocalError : # File not found pass else : return mod if not config_file : print_warning ( "No konch config file found." ) else : print_warning ( f'"{config_file}" not found.' ) return None
Load filename as a python file . Import filename and return it as a module .
482
16
246,949
def resolve_path ( filename : Path ) -> typing . Union [ Path , None ] : current = Path . cwd ( ) # Stop search at home directory sentinel_dir = Path . home ( ) . parent . resolve ( ) while current != sentinel_dir : target = Path ( current ) / Path ( filename ) if target . exists ( ) : return target . resolve ( ) else : current = current . parent . resolve ( ) return None
Find a file by walking up parent directories until the file is found . Return the absolute path of the file .
94
22
246,950
def parse_args ( argv : typing . Optional [ typing . Sequence ] = None ) -> typing . Dict [ str , str ] : return docopt ( __doc__ , argv = argv , version = __version__ )
Exposes the docopt command - line arguments parser . Return a dictionary of arguments .
50
17
246,951
def main ( argv : typing . Optional [ typing . Sequence ] = None ) -> typing . NoReturn : args = parse_args ( argv ) if args [ "--debug" ] : logging . basicConfig ( format = "%(levelname)s %(filename)s: %(message)s" , level = logging . DEBUG ) logger . debug ( args ) config_file : typing . Union [ Path , None ] if args [ "init" ] : config_file = Path ( args [ "<config_file>" ] or CONFIG_FILE ) init_config ( config_file ) else : config_file = Path ( args [ "<config_file>" ] ) if args [ "<config_file>" ] else None if args [ "edit" ] : edit_config ( config_file ) elif args [ "allow" ] : allow_config ( config_file ) elif args [ "deny" ] : deny_config ( config_file ) mod = use_file ( Path ( args [ "--file" ] ) if args [ "--file" ] else None ) if hasattr ( mod , "setup" ) : mod . setup ( ) # type: ignore if args [ "--name" ] : if args [ "--name" ] not in _config_registry : print_error ( f'Invalid --name: "{args["--name"]}"' ) sys . exit ( 1 ) config_dict = _config_registry [ args [ "--name" ] ] logger . debug ( f'Using named config: "{args["--name"]}"' ) logger . debug ( config_dict ) else : config_dict = _cfg # Allow default shell to be overriden by command-line argument shell_name = args [ "--shell" ] if shell_name : config_dict [ "shell" ] = SHELL_MAP . get ( shell_name . lower ( ) , AutoShell ) logger . debug ( f"Starting with config {config_dict}" ) start ( * * config_dict ) if hasattr ( mod , "teardown" ) : mod . teardown ( ) # type: ignore sys . exit ( 0 )
Main entry point for the konch CLI .
465
10
246,952
def init_autoreload ( mode : int ) -> None : from IPython . extensions import autoreload ip = get_ipython ( ) # type: ignore # noqa: F821 autoreload . load_ipython_extension ( ip ) ip . magics_manager . magics [ "line" ] [ "autoreload" ] ( str ( mode ) )
Load and initialize the IPython autoreload extension .
83
11
246,953
def read_tabular ( table_file , sheetname = 'Sheet1' ) : if isinstance ( table_file , str ) : extension = table_file . split ( '.' ) [ - 1 ] if extension in [ 'xls' , 'xlsx' ] : table = pd . read_excel ( table_file , sheetname = sheetname ) elif extension == 'csv' : table = pd . read_csv ( table_file , encoding = 'UTF-8' ) elif extension == 'tab' : table = pd . read_csv ( table_file , sep = '\t' , encoding = 'UTF-8' ) else : raise ValueError ( 'Unknown file or table type' ) else : raise ValueError ( 'Unknown file or table type' ) if not set ( table . columns ) . issuperset ( { 'Variable' , 'Equation' } ) : raise ValueError ( 'Table must contain at least columns "Variable" and "Equation"' ) if "Units" not in set ( table . columns ) : warnings . warn ( 'Column for "Units" not found' , RuntimeWarning , stacklevel = 2 ) table [ 'Units' ] = '' if "Min" not in set ( table . columns ) : warnings . warn ( 'Column for "Min" not found' , RuntimeWarning , stacklevel = 2 ) table [ 'Min' ] = '' if "Max" not in set ( table . columns ) : warnings . warn ( 'Column for "Max" not found' , RuntimeWarning , stacklevel = 2 ) table [ 'Max' ] = '' mdl_file = table_file . replace ( extension , 'mdl' ) with open ( mdl_file , 'w' , encoding = 'UTF-8' ) as outfile : for element in table . to_dict ( orient = 'records' ) : outfile . write ( "%(Variable)s = \n" "\t %(Equation)s \n" "\t~\t %(Units)s [%(Min)s, %(Max)s] \n" "\t~\t %(Comment)s \n\t|\n\n" % element ) outfile . write ( u'\\\---/// Sketch information - this is where sketch stuff would go.' ) return read_vensim ( mdl_file )
Reads a vensim syntax model which has been formatted as a table .
521
16
246,954
def read_xmile ( xmile_file ) : from . import py_backend from . py_backend . xmile . xmile2py import translate_xmile py_model_file = translate_xmile ( xmile_file ) model = load ( py_model_file ) model . xmile_file = xmile_file return model
Construct a model object from . xmile file .
77
10
246,955
def read_vensim ( mdl_file ) : from . py_backend . vensim . vensim2py import translate_vensim from . py_backend import functions py_model_file = translate_vensim ( mdl_file ) model = functions . Model ( py_model_file ) model . mdl_file = mdl_file return model
Construct a model from Vensim . mdl file .
82
12
246,956
def cache ( horizon ) : def cache_step ( func ) : """ Decorator for caching at a step level""" @ wraps ( func ) def cached ( * args ) : """Step wise cache function""" try : # fails if cache is out of date or not instantiated data = func . __globals__ [ '__data' ] assert cached . cache_t == data [ 'time' ] ( ) assert hasattr ( cached , 'cache_val' ) assert cached . cache_val is not None except ( AssertionError , AttributeError ) : cached . cache_val = func ( * args ) data = func . __globals__ [ '__data' ] cached . cache_t = data [ 'time' ] ( ) return cached . cache_val return cached def cache_run ( func ) : """ Decorator for caching at the run level""" @ wraps ( func ) def cached ( * args ) : """Run wise cache function""" try : # fails if cache is not instantiated return cached . cache_val except AttributeError : cached . cache_val = func ( * args ) return cached . cache_val return cached if horizon == 'step' : return cache_step elif horizon == 'run' : return cache_run else : raise ( AttributeError ( 'Bad horizon for cache decorator' ) )
Put a wrapper around a model function
286
7
246,957
def ramp ( time , slope , start , finish = 0 ) : t = time ( ) if t < start : return 0 else : if finish <= 0 : return slope * ( t - start ) elif t > finish : return slope * ( finish - start ) else : return slope * ( t - start )
Implements vensim s and xmile s RAMP function
65
14
246,958
def pulse ( time , start , duration ) : t = time ( ) return 1 if start <= t < start + duration else 0
Implements vensim s PULSE function
27
11
246,959
def pulse_train ( time , start , duration , repeat_time , end ) : t = time ( ) if start <= t < end : return 1 if ( t - start ) % repeat_time < duration else 0 else : return 0
Implements vensim s PULSE TRAIN function
50
13
246,960
def lookup_extrapolation ( x , xs , ys ) : length = len ( xs ) if x < xs [ 0 ] : dx = xs [ 1 ] - xs [ 0 ] dy = ys [ 1 ] - ys [ 0 ] k = dy / dx return ys [ 0 ] + ( x - xs [ 0 ] ) * k if x > xs [ length - 1 ] : dx = xs [ length - 1 ] - xs [ length - 2 ] dy = ys [ length - 1 ] - ys [ length - 2 ] k = dy / dx return ys [ length - 1 ] + ( x - xs [ length - 1 ] ) * k return np . interp ( x , xs , ys )
Intermediate values are calculated with linear interpolation between the intermediate points . Out - of - range values are calculated with linear extrapolation from the last two values at either end .
167
35
246,961
def xidz ( numerator , denominator , value_if_denom_is_zero ) : small = 1e-6 # What is considered zero according to Vensim Help if abs ( denominator ) < small : return value_if_denom_is_zero else : return numerator * 1.0 / denominator
Implements Vensim s XIDZ function . This function executes a division robust to denominator being zero . In the case of zero denominator the final argument is returned .
72
37
246,962
def initialize ( self , initialization_order = None ) : # Initialize time if self . time is None : if self . time_initialization is None : self . time = Time ( ) else : self . time = self . time_initialization ( ) # if self.time is None: # self.time = time # self.components.time = self.time # self.components.functions.time = self.time # rewrite functions so we don't need this self . components . _init_outer_references ( { 'scope' : self , 'time' : self . time } ) remaining = set ( self . _stateful_elements ) while remaining : progress = set ( ) for element in remaining : try : element . initialize ( ) progress . add ( element ) except ( KeyError , TypeError , AttributeError ) : pass if progress : remaining . difference_update ( progress ) else : raise KeyError ( 'Unresolvable Reference: Probable circular initialization' + '\n' . join ( [ repr ( e ) for e in remaining ] ) )
This function tries to initialize the stateful objects .
232
10
246,963
def set_components ( self , params ) : # It might make sense to allow the params argument to take a pandas series, where # the indices of the series are variable names. This would make it easier to # do a Pandas apply on a DataFrame of parameter values. However, this may conflict # with a pandas series being passed in as a dictionary element. for key , value in params . items ( ) : if isinstance ( value , pd . Series ) : new_function = self . _timeseries_component ( value ) elif callable ( value ) : new_function = value else : new_function = self . _constant_component ( value ) func_name = utils . get_value_by_insensitive_key_or_value ( key , self . components . _namespace ) if func_name is None : raise NameError ( '%s is not recognized as a model component' % key ) if '_integ_' + func_name in dir ( self . components ) : # this won't handle other statefuls... warnings . warn ( "Replacing the equation of stock {} with params" . format ( key ) , stacklevel = 2 ) setattr ( self . components , func_name , new_function )
Set the value of exogenous model elements . Element values can be passed as keyword = value pairs in the function call . Values can be numeric type or pandas Series . Series will be interpolated by integrator .
270
43
246,964
def _timeseries_component ( self , series ) : # this is only called if the set_component function recognizes a pandas series # Todo: raise a warning if extrapolating from the end of the series. return lambda : np . interp ( self . time ( ) , series . index , series . values )
Internal function for creating a timeseries model element
68
9
246,965
def set_state ( self , t , state ) : self . time . update ( t ) for key , value in state . items ( ) : # TODO Implement map with reference between component and stateful element? component_name = utils . get_value_by_insensitive_key_or_value ( key , self . components . _namespace ) if component_name is not None : stateful_name = '_integ_%s' % component_name else : component_name = key stateful_name = key # Try to update stateful component if hasattr ( self . components , stateful_name ) : try : element = getattr ( self . components , stateful_name ) element . update ( value ) except AttributeError : print ( "'%s' has no state elements, assignment failed" ) raise else : # Try to override component try : setattr ( self . components , component_name , self . _constant_component ( value ) ) except AttributeError : print ( "'%s' has no component, assignment failed" ) raise
Set the system state .
229
5
246,966
def clear_caches ( self ) : for element_name in dir ( self . components ) : element = getattr ( self . components , element_name ) if hasattr ( element , 'cache_val' ) : delattr ( element , 'cache_val' )
Clears the Caches for all model elements
58
9
246,967
def doc ( self ) : collector = [ ] for name , varname in self . components . _namespace . items ( ) : try : docstring = getattr ( self . components , varname ) . __doc__ lines = docstring . split ( '\n' ) collector . append ( { 'Real Name' : name , 'Py Name' : varname , 'Eqn' : lines [ 2 ] . replace ( "Original Eqn:" , "" ) . strip ( ) , 'Unit' : lines [ 3 ] . replace ( "Units:" , "" ) . strip ( ) , 'Lims' : lines [ 4 ] . replace ( "Limits:" , "" ) . strip ( ) , 'Type' : lines [ 5 ] . replace ( "Type:" , "" ) . strip ( ) , 'Comment' : '\n' . join ( lines [ 7 : ] ) . strip ( ) } ) except : pass docs_df = _pd . DataFrame ( collector ) docs_df . fillna ( 'None' , inplace = True ) order = [ 'Real Name' , 'Py Name' , 'Unit' , 'Lims' , 'Type' , 'Eqn' , 'Comment' ] return docs_df [ order ] . sort_values ( by = 'Real Name' ) . reset_index ( drop = True )
Formats a table of documentation strings to help users remember variable names and understand how they are translated into python safe names .
293
24
246,968
def initialize ( self ) : self . time . update ( self . components . initial_time ( ) ) self . time . stage = 'Initialization' super ( Model , self ) . initialize ( )
Initializes the simulation model
42
5
246,969
def _format_return_timestamps ( self , return_timestamps = None ) : if return_timestamps is None : # Build based upon model file Start, Stop times and Saveper # Vensim's standard is to expect that the data set includes the `final time`, # so we have to add an extra period to make sure we get that value in what # numpy's `arange` gives us. return_timestamps_array = np . arange ( self . components . initial_time ( ) , self . components . final_time ( ) + self . components . saveper ( ) , self . components . saveper ( ) , dtype = np . float64 ) elif inspect . isclass ( range ) and isinstance ( return_timestamps , range ) : return_timestamps_array = np . array ( return_timestamps , ndmin = 1 ) elif isinstance ( return_timestamps , ( list , int , float , np . ndarray ) ) : return_timestamps_array = np . array ( return_timestamps , ndmin = 1 ) elif isinstance ( return_timestamps , _pd . Series ) : return_timestamps_array = return_timestamps . as_matrix ( ) else : raise TypeError ( '`return_timestamps` expects a list, array, pandas Series, ' 'or numeric value' ) return return_timestamps_array
Format the passed in return timestamps value as a numpy array . If no value is passed build up array of timestamps based upon model start and end times and the saveper value .
322
40
246,970
def run ( self , params = None , return_columns = None , return_timestamps = None , initial_condition = 'original' , reload = False ) : if reload : self . reload ( ) if params : self . set_components ( params ) self . set_initial_condition ( initial_condition ) return_timestamps = self . _format_return_timestamps ( return_timestamps ) t_series = self . _build_euler_timeseries ( return_timestamps ) if return_columns is None : return_columns = self . _default_return_columns ( ) self . time . stage = 'Run' self . clear_caches ( ) capture_elements , return_addresses = utils . get_return_elements ( return_columns , self . components . _namespace , self . components . _subscript_dict ) res = self . _integrate ( t_series , capture_elements , return_timestamps ) return_df = utils . make_flat_df ( res , return_addresses ) return_df . index = return_timestamps return return_df
Simulate the model s behavior over time . Return a pandas dataframe with timestamps as rows model elements as columns .
255
26
246,971
def _default_return_columns ( self ) : return_columns = [ ] parsed_expr = [ ] for key , value in self . components . _namespace . items ( ) : if hasattr ( self . components , value ) : sig = signature ( getattr ( self . components , value ) ) # The `*args` reference handles the py2.7 decorator. if len ( set ( sig . parameters ) - { 'args' } ) == 0 : expr = self . components . _namespace [ key ] if not expr in parsed_expr : return_columns . append ( key ) parsed_expr . append ( expr ) return return_columns
Return a list of the model elements that does not include lookup functions or other functions that take parameters .
144
20
246,972
def set_initial_condition ( self , initial_condition ) : if isinstance ( initial_condition , tuple ) : # Todo: check the values more than just seeing if they are a tuple. self . set_state ( * initial_condition ) elif isinstance ( initial_condition , str ) : if initial_condition . lower ( ) in [ 'original' , 'o' ] : self . initialize ( ) elif initial_condition . lower ( ) in [ 'current' , 'c' ] : pass else : raise ValueError ( 'Valid initial condition strings include: \n' + ' "original"/"o", \n' + ' "current"/"c"' ) else : raise TypeError ( 'Check documentation for valid entries' )
Set the initial conditions of the integration .
160
8
246,973
def _euler_step ( self , dt ) : self . state = self . state + self . ddt ( ) * dt
Performs a single step in the euler integration updating stateful components
30
14
246,974
def _integrate ( self , time_steps , capture_elements , return_timestamps ) : # Todo: consider adding the timestamp to the return elements, and using that as the index outputs = [ ] for t2 in time_steps [ 1 : ] : if self . time ( ) in return_timestamps : outputs . append ( { key : getattr ( self . components , key ) ( ) for key in capture_elements } ) self . _euler_step ( t2 - self . time ( ) ) self . time . update ( t2 ) # this will clear the stepwise caches # need to add one more time step, because we run only the state updates in the previous # loop and thus may be one short. if self . time ( ) in return_timestamps : outputs . append ( { key : getattr ( self . components , key ) ( ) for key in capture_elements } ) return outputs
Performs euler integration
202
5
246,975
def merge_partial_elements ( element_list ) : outs = dict ( ) # output data structure for element in element_list : if element [ 'py_expr' ] != "None" : # for name = element [ 'py_name' ] if name not in outs : # Use 'expr' for Vensim models, and 'eqn' for Xmile (This makes the Vensim equation prettier.) eqn = element [ 'expr' ] if 'expr' in element else element [ 'eqn' ] outs [ name ] = { 'py_name' : element [ 'py_name' ] , 'real_name' : element [ 'real_name' ] , 'doc' : element [ 'doc' ] , 'py_expr' : [ element [ 'py_expr' ] ] , # in a list 'unit' : element [ 'unit' ] , 'subs' : [ element [ 'subs' ] ] , 'lims' : element [ 'lims' ] , 'eqn' : eqn , 'kind' : element [ 'kind' ] , 'arguments' : element [ 'arguments' ] } else : outs [ name ] [ 'doc' ] = outs [ name ] [ 'doc' ] or element [ 'doc' ] outs [ name ] [ 'unit' ] = outs [ name ] [ 'unit' ] or element [ 'unit' ] outs [ name ] [ 'lims' ] = outs [ name ] [ 'lims' ] or element [ 'lims' ] outs [ name ] [ 'eqn' ] = outs [ name ] [ 'eqn' ] or element [ 'eqn' ] outs [ name ] [ 'py_expr' ] += [ element [ 'py_expr' ] ] outs [ name ] [ 'subs' ] += [ element [ 'subs' ] ] outs [ name ] [ 'arguments' ] = element [ 'arguments' ] return list ( outs . values ( ) )
merges model elements which collectively all define the model component mostly for multidimensional subscripts
434
18
246,976
def add_n_delay ( delay_input , delay_time , initial_value , order , subs , subscript_dict ) : # the py name has to be unique to all the passed parameters, or if there are two things # that delay the output by different amounts, they'll overwrite the original function... stateful = { 'py_name' : utils . make_python_identifier ( '_delay_%s_%s_%s_%s' % ( delay_input , delay_time , initial_value , order ) ) [ 0 ] , 'real_name' : 'Delay of %s' % delay_input , 'doc' : 'Delay time: %s \n Delay initial value %s \n Delay order %s' % ( delay_time , initial_value , order ) , 'py_expr' : 'functions.Delay(lambda: %s, lambda: %s, lambda: %s, lambda: %s)' % ( delay_input , delay_time , initial_value , order ) , 'unit' : 'None' , 'lims' : 'None' , 'eqn' : 'None' , 'subs' : '' , 'kind' : 'stateful' , 'arguments' : '' } return "%s()" % stateful [ 'py_name' ] , [ stateful ]
Creates code to instantiate a stateful Delay object and provides reference to that object s output .
296
20
246,977
def add_n_smooth ( smooth_input , smooth_time , initial_value , order , subs , subscript_dict ) : stateful = { 'py_name' : utils . make_python_identifier ( '_smooth_%s_%s_%s_%s' % ( smooth_input , smooth_time , initial_value , order ) ) [ 0 ] , 'real_name' : 'Smooth of %s' % smooth_input , 'doc' : 'Smooth time: %s \n Smooth initial value %s \n Smooth order %s' % ( smooth_time , initial_value , order ) , 'py_expr' : 'functions.Smooth(lambda: %s, lambda: %s, lambda: %s, lambda: %s)' % ( smooth_input , smooth_time , initial_value , order ) , 'unit' : 'None' , 'lims' : 'None' , 'eqn' : 'None' , 'subs' : '' , 'kind' : 'stateful' , 'arguments' : '' } return "%s()" % stateful [ 'py_name' ] , [ stateful ]
Constructs stock and flow chains that implement the calculation of a smoothing function .
262
16
246,978
def add_initial ( initial_input ) : stateful = { 'py_name' : utils . make_python_identifier ( '_initial_%s' % initial_input ) [ 0 ] , 'real_name' : 'Smooth of %s' % initial_input , 'doc' : 'Returns the value taken on during the initialization phase' , 'py_expr' : 'functions.Initial(lambda: %s)' % ( initial_input ) , 'unit' : 'None' , 'lims' : 'None' , 'eqn' : 'None' , 'subs' : '' , 'kind' : 'stateful' , 'arguments' : '' } return "%s()" % stateful [ 'py_name' ] , [ stateful ]
Constructs a stateful object for handling vensim s Initial functionality
173
14
246,979
def add_macro ( macro_name , filename , arg_names , arg_vals ) : func_args = '{ %s }' % ', ' . join ( [ "'%s': lambda: %s" % ( key , val ) for key , val in zip ( arg_names , arg_vals ) ] ) stateful = { 'py_name' : '_macro_' + macro_name + '_' + '_' . join ( [ utils . make_python_identifier ( f ) [ 0 ] for f in arg_vals ] ) , 'real_name' : 'Macro Instantiation of ' + macro_name , 'doc' : 'Instantiates the Macro' , 'py_expr' : "functions.Macro('%s', %s, '%s', time_initialization=lambda: __data['time'])" % ( filename , func_args , macro_name ) , 'unit' : 'None' , 'lims' : 'None' , 'eqn' : 'None' , 'subs' : '' , 'kind' : 'stateful' , 'arguments' : '' } return "%s()" % stateful [ 'py_name' ] , [ stateful ]
Constructs a stateful object instantiating a Macro
275
10
246,980
def add_incomplete ( var_name , dependencies ) : warnings . warn ( '%s has no equation specified' % var_name , SyntaxWarning , stacklevel = 2 ) # first arg is `self` reference return "functions.incomplete(%s)" % ', ' . join ( dependencies [ 1 : ] ) , [ ]
Incomplete functions don t really need to be builders as they add no new real structure but it s helpful to have a function in which we can raise a warning about the incomplete equation at translate time .
73
40
246,981
def get_model_elements ( model_str ) : model_structure_grammar = _include_common_grammar ( r""" model = (entry / section)+ sketch? entry = element "~" element "~" element ("~" element)? "|" section = element "~" element "|" sketch = ~r".*" #anything # Either an escape group, or a character that is not tilde or pipe element = (escape_group / ~r"[^~|]")* """ ) parser = parsimonious . Grammar ( model_structure_grammar ) tree = parser . parse ( model_str ) class ModelParser ( parsimonious . NodeVisitor ) : def __init__ ( self , ast ) : self . entries = [ ] self . visit ( ast ) def visit_entry ( self , n , vc ) : units , lims = parse_units ( vc [ 2 ] . strip ( ) ) self . entries . append ( { 'eqn' : vc [ 0 ] . strip ( ) , 'unit' : units , 'lims' : str ( lims ) , 'doc' : vc [ 4 ] . strip ( ) , 'kind' : 'entry' } ) def visit_section ( self , n , vc ) : if vc [ 2 ] . strip ( ) != "Simulation Control Parameters" : self . entries . append ( { 'eqn' : '' , 'unit' : '' , 'lims' : '' , 'doc' : vc [ 2 ] . strip ( ) , 'kind' : 'section' } ) def generic_visit ( self , n , vc ) : return '' . join ( filter ( None , vc ) ) or n . text or '' return ModelParser ( tree ) . entries
Takes in a string representing model text and splits it into elements
390
13
246,982
def get_equation_components ( equation_str ) : component_structure_grammar = _include_common_grammar ( r""" entry = component / subscript_definition / lookup_definition component = name _ subscriptlist? _ "=" _ expression subscript_definition = name _ ":" _ subscript _ ("," _ subscript)* lookup_definition = name _ &"(" _ expression # uses lookahead assertion to capture whole group name = basic_id / escape_group subscriptlist = '[' _ subscript _ ("," _ subscript)* _ ']' expression = ~r".*" # expression could be anything, at this point. subscript = basic_id / escape_group """ ) # replace any amount of whitespace with a single space equation_str = equation_str . replace ( '\\t' , ' ' ) equation_str = re . sub ( r"\s+" , ' ' , equation_str ) parser = parsimonious . Grammar ( component_structure_grammar ) tree = parser . parse ( equation_str ) class ComponentParser ( parsimonious . NodeVisitor ) : def __init__ ( self , ast ) : self . subscripts = [ ] self . real_name = None self . expression = None self . kind = None self . visit ( ast ) def visit_subscript_definition ( self , n , vc ) : self . kind = 'subdef' def visit_lookup_definition ( self , n , vc ) : self . kind = 'lookup' def visit_component ( self , n , vc ) : self . kind = 'component' def visit_name ( self , n , vc ) : ( name , ) = vc self . real_name = name . strip ( ) def visit_subscript ( self , n , vc ) : ( subscript , ) = vc self . subscripts . append ( subscript . strip ( ) ) def visit_expression ( self , n , vc ) : self . expression = n . text . strip ( ) def generic_visit ( self , n , vc ) : return '' . join ( filter ( None , vc ) ) or n . text def visit__ ( self , n , vc ) : return ' ' parse_object = ComponentParser ( tree ) return { 'real_name' : parse_object . real_name , 'subs' : parse_object . subscripts , 'expr' : parse_object . expression , 'kind' : parse_object . kind }
Breaks down a string representing only the equation part of a model element . Recognizes the various types of model elements that may exist and identifies them .
535
30
246,983
def parse_units ( units_str ) : if not len ( units_str ) : return units_str , ( None , None ) if units_str [ - 1 ] == ']' : units , lims = units_str . rsplit ( '[' ) # type: str, str else : units = units_str lims = '?, ?]' lims = tuple ( [ float ( x ) if x . strip ( ) != '?' else None for x in lims . strip ( ']' ) . split ( ',' ) ] ) return units . strip ( ) , lims
Extract and parse the units Extract the bounds over which the expression is assumed to apply .
125
18
246,984
def parse_lookup_expression ( element ) : lookup_grammar = r""" lookup = _ "(" range? _ ( "(" _ number _ "," _ number _ ")" _ ","? _ )+ ")" number = ("+"/"-")? ~r"\d+\.?\d*(e[+-]\d+)?" _ = ~r"[\s\\]*" # whitespace character range = _ "[" ~r"[^\]]*" "]" _ "," """ parser = parsimonious . Grammar ( lookup_grammar ) tree = parser . parse ( element [ 'expr' ] ) class LookupParser ( parsimonious . NodeVisitor ) : def __init__ ( self , ast ) : self . translation = "" self . new_structure = [ ] self . visit ( ast ) def visit__ ( self , n , vc ) : # remove whitespace return '' def visit_lookup ( self , n , vc ) : pairs = max ( vc , key = len ) mixed_list = pairs . replace ( '(' , '' ) . replace ( ')' , '' ) . split ( ',' ) xs = mixed_list [ : : 2 ] ys = mixed_list [ 1 : : 2 ] string = "functions.lookup(x, [%(xs)s], [%(ys)s])" % { 'xs' : ',' . join ( xs ) , 'ys' : ',' . join ( ys ) } self . translation = string def generic_visit ( self , n , vc ) : return '' . join ( filter ( None , vc ) ) or n . text parse_object = LookupParser ( tree ) return { 'py_expr' : parse_object . translation , 'arguments' : 'x' }
This syntax parses lookups that are defined with their own element
399
13
246,985
def dict_find ( in_dict , value ) : # Todo: make this robust to repeated values # Todo: make this robust to missing values return list ( in_dict . keys ( ) ) [ list ( in_dict . values ( ) ) . index ( value ) ]
Helper function for looking up directory keys by their values . This isn t robust to repeated values
60
18
246,986
def find_subscript_name ( subscript_dict , element ) : if element in subscript_dict . keys ( ) : return element for name , elements in subscript_dict . items ( ) : if element in elements : return name
Given a subscript dictionary and a member of a subscript family return the first key of which the member is within the value list . If element is already a subscript name return that
48
34
246,987
def make_coord_dict ( subs , subscript_dict , terse = True ) : sub_elems_list = [ y for x in subscript_dict . values ( ) for y in x ] coordinates = { } for sub in subs : if sub in sub_elems_list : name = find_subscript_name ( subscript_dict , sub ) coordinates [ name ] = [ sub ] elif not terse : coordinates [ sub ] = subscript_dict [ sub ] return coordinates
This is for assisting with the lookup of a particular element such that the output of this function would take the place of %s in this expression
104
28
246,988
def make_python_identifier ( string , namespace = None , reserved_words = None , convert = 'drop' , handle = 'force' ) : if namespace is None : namespace = dict ( ) if reserved_words is None : reserved_words = list ( ) if string in namespace : return namespace [ string ] , namespace # create a working copy (and make it lowercase, while we're at it) s = string . lower ( ) # remove leading and trailing whitespace s = s . strip ( ) # Make spaces into underscores s = re . sub ( '[\\s\\t\\n]+' , '_' , s ) if convert == 'hex' : # Convert invalid characters to hex. Note: \p{l} designates all Unicode letter characters (any language), # \p{m} designates all mark symbols (e.g., vowel marks in Indian scrips, such as the final) # and \p{n} designates all numbers. We allow any of these to be present in the regex. s = '' . join ( [ c . encode ( "hex" ) if re . findall ( '[^\p{l}\p{m}\p{n}_]' , c ) else c for c in s ] ) elif convert == 'drop' : # Remove invalid characters s = re . sub ( '[^\p{l}\p{m}\p{n}_]' , '' , s ) # Remove leading characters until we find a letter or underscore. Only letters can be leading characters. s = re . sub ( '^[^\p{l}_]+' , '' , s ) # Check that the string is not a python identifier while ( s in keyword . kwlist or s in namespace . values ( ) or s in reserved_words ) : if handle == 'throw' : raise NameError ( s + ' already exists in namespace or is a reserved word' ) if handle == 'force' : if re . match ( ".*?_\d+$" , s ) : i = re . match ( ".*?_(\d+)$" , s ) . groups ( ) [ 0 ] s = s . strip ( '_' + i ) + '_' + str ( int ( i ) + 1 ) else : s += '_1' namespace [ string ] = s return s , namespace
Takes an arbitrary string and creates a valid Python identifier .
504
12
246,989
def make_flat_df ( frames , return_addresses ) : # Todo: could also try a list comprehension here, or parallel apply visited = list ( map ( lambda x : visit_addresses ( x , return_addresses ) , frames ) ) return pd . DataFrame ( visited )
Takes a list of dictionaries each representing what is returned from the model at a particular time and creates a dataframe whose columns correspond to the keys of return addresses
64
33
246,990
def visit_addresses ( frame , return_addresses ) : outdict = dict ( ) for real_name , ( pyname , address ) in return_addresses . items ( ) : if address : xrval = frame [ pyname ] . loc [ address ] if xrval . size > 1 : outdict [ real_name ] = xrval else : outdict [ real_name ] = float ( np . squeeze ( xrval . values ) ) else : outdict [ real_name ] = frame [ pyname ] return outdict
Visits all of the addresses returns a new dict which contains just the addressed elements
122
16
246,991
def validate_request ( request ) : if getattr ( settings , 'BASICAUTH_DISABLE' , False ) : # Not to use this env return True if 'HTTP_AUTHORIZATION' not in request . META : return False authorization_header = request . META [ 'HTTP_AUTHORIZATION' ] ret = extract_basicauth ( authorization_header ) if not ret : return False username , password = ret raw_pass = settings . BASICAUTH_USERS . get ( username ) if raw_pass is None : return False # To avoid timing atacks # https://security.stackexchange.com/questions/83660/simple-string-comparisons-not-secure-against-timing-attacks if not constant_time_compare ( raw_pass , password ) : return False request . META [ 'REMOTE_USER' ] = username return True
Check an incoming request .
200
5
246,992
def _find_address_range ( addresses ) : first = last = addresses [ 0 ] last_index = 0 for ip in addresses [ 1 : ] : if ip . _ip == last . _ip + 1 : last = ip last_index += 1 else : break return ( first , last , last_index )
Find a sequence of addresses .
67
6
246,993
def _prefix_from_prefix_int ( self , prefixlen ) : if not isinstance ( prefixlen , ( int , long ) ) : raise NetmaskValueError ( '%r is not an integer' % prefixlen ) prefixlen = int ( prefixlen ) if not ( 0 <= prefixlen <= self . _max_prefixlen ) : raise NetmaskValueError ( '%d is not a valid prefix length' % prefixlen ) return prefixlen
Validate and return a prefix length integer .
97
9
246,994
def output_colored ( code , text , is_bold = False ) : if is_bold : code = '1;%s' % code return '\033[%sm%s\033[0m' % ( code , text )
Create function to output with color sequence
52
7
246,995
def _set_asset_paths ( self , app ) : webpack_stats = app . config [ 'WEBPACK_MANIFEST_PATH' ] try : with app . open_resource ( webpack_stats , 'r' ) as stats_json : stats = json . load ( stats_json ) if app . config [ 'WEBPACK_ASSETS_URL' ] : self . assets_url = app . config [ 'WEBPACK_ASSETS_URL' ] else : self . assets_url = stats [ 'publicPath' ] self . assets = stats [ 'assets' ] except IOError : raise RuntimeError ( "Flask-Webpack requires 'WEBPACK_MANIFEST_PATH' to be set and " "it must point to a valid json file." )
Read in the manifest json file which acts as a manifest for assets . This allows us to get the asset path as well as hashed names .
174
29
246,996
def javascript_tag ( self , * args ) : tags = [ ] for arg in args : asset_path = self . asset_url_for ( '{0}.js' . format ( arg ) ) if asset_path : tags . append ( '<script src="{0}"></script>' . format ( asset_path ) ) return '\n' . join ( tags )
Convenience tag to output 1 or more javascript tags .
83
12
246,997
def asset_url_for ( self , asset ) : if '//' in asset : return asset if asset not in self . assets : return None return '{0}{1}' . format ( self . assets_url , self . assets [ asset ] )
Lookup the hashed asset path of a file name unless it starts with something that resembles a web address then take it as is .
55
27
246,998
def pre_change_receiver ( self , instance : Model , action : Action ) : if action == Action . CREATE : group_names = set ( ) else : group_names = set ( self . group_names ( instance ) ) # use a thread local dict to be safe... if not hasattr ( instance , '__instance_groups' ) : instance . __instance_groups = threading . local ( ) instance . __instance_groups . observers = { } if not hasattr ( instance . __instance_groups , 'observers' ) : instance . __instance_groups . observers = { } instance . __instance_groups . observers [ self ] = group_names
Entry point for triggering the old_binding from save signals .
145
12
246,999
def post_change_receiver ( self , instance : Model , action : Action , * * kwargs ) : try : old_group_names = instance . __instance_groups . observers [ self ] except ( ValueError , KeyError ) : old_group_names = set ( ) if action == Action . DELETE : new_group_names = set ( ) else : new_group_names = set ( self . group_names ( instance ) ) # if post delete, new_group_names should be [] # Django DDP had used the ordering of DELETE, UPDATE then CREATE for good reasons. self . send_messages ( instance , old_group_names - new_group_names , Action . DELETE , * * kwargs ) # the object has been updated so that its groups are not the same. self . send_messages ( instance , old_group_names & new_group_names , Action . UPDATE , * * kwargs ) # self . send_messages ( instance , new_group_names - old_group_names , Action . CREATE , * * kwargs )
Triggers the old_binding to possibly send to its group .
245
14