idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
45,000
def get_object_perms ( obj , user = None ) : def format_permissions ( perms ) : ctype = ContentType . objects . get_for_model ( obj ) return [ perm . replace ( '_{}' . format ( ctype . name ) , '' ) for perm in perms ] perms_list = [ ] if user : if user . is_authenticated : user_perms , group_perms = get_user_group_perms ( user , obj ) else : user_perms , group_perms = [ ] , [ ] if user_perms != [ ] : perms_list . append ( { 'type' : 'user' , 'id' : user . pk , 'name' : user . get_full_name ( ) or user . username , 'permissions' : format_permissions ( user_perms ) , } ) if group_perms != [ ] : for group_id , group_name , perms in group_perms : perms_list . append ( { 'type' : 'group' , 'id' : group_id , 'name' : group_name , 'permissions' : format_permissions ( perms ) , } ) else : user_options = { 'attach_perms' : True , 'with_group_users' : False } for user , perms in get_users_with_perms ( obj , ** user_options ) . items ( ) : if user . username == settings . ANONYMOUS_USER_NAME : continue perms_list . append ( { 'type' : 'user' , 'id' : user . pk , 'name' : user . get_full_name ( ) or user . username , 'permissions' : format_permissions ( perms ) , } ) group_options = { 'attach_perms' : True , } for group , perms in get_groups_with_perms ( obj , ** group_options ) . items ( ) : perms_list . append ( { 'type' : 'group' , 'id' : group . pk , 'name' : group . name , 'permissions' : format_permissions ( perms ) , } ) public_perms = get_perms ( AnonymousUser ( ) , obj ) if public_perms != [ ] : perms_list . append ( { 'type' : 'public' , 'permissions' : format_permissions ( public_perms ) , } ) return perms_list
Return permissions for given object in Resolwe specific format .
45,001
def get_users_with_permission ( obj , permission ) : user_model = get_user_model ( ) return user_model . objects . filter ( userobjectpermission__object_pk = obj . pk , userobjectpermission__permission__codename = permission , ) . distinct ( )
Return users with specific permission on object .
45,002
def purge_run ( self , event ) : location_id = event [ 'location_id' ] verbosity = event [ 'verbosity' ] try : logger . info ( __ ( "Running purge for location id {}." , location_id ) ) location_purge ( location_id = location_id , delete = True , verbosity = verbosity ) except Exception : logger . exception ( "Error while purging location." , extra = { 'location_id' : location_id } )
Run purge for the object with location_id specified in event argument .
45,003
def get_resource_limits ( self ) : limit_defaults = getattr ( settings , 'FLOW_PROCESS_RESOURCE_DEFAULTS' , { } ) limit_overrides = getattr ( settings , 'FLOW_PROCESS_RESOURCE_OVERRIDES' , { } ) limits = { } resources = self . requirements . get ( 'resources' , { } ) limits [ 'cores' ] = int ( resources . get ( 'cores' , 1 ) ) max_cores = getattr ( settings , 'FLOW_PROCESS_MAX_CORES' , None ) if max_cores : limits [ 'cores' ] = min ( limits [ 'cores' ] , max_cores ) memory = limit_overrides . get ( 'memory' , { } ) . get ( self . slug , None ) if memory is None : memory = int ( resources . get ( 'memory' , limit_defaults . get ( 'memory' , 4096 ) ) ) limits [ 'memory' ] = memory return limits
Get the core count and memory usage limits for this process .
45,004
def validate_query_params ( self ) : allowed_params = set ( self . get_filters ( ) . keys ( ) ) allowed_params . update ( self . get_always_allowed_arguments ( ) ) unallowed = set ( self . request . query_params . keys ( ) ) - allowed_params if unallowed : msg = 'Unsupported parameter(s): {}. Please use a combination of: {}.' . format ( ', ' . join ( unallowed ) , ', ' . join ( allowed_params ) , ) self . form . add_error ( field = None , error = ParseError ( msg ) )
Ensure no unsupported query params were used .
45,005
def from_wc ( cls , wc ) : return cls ( wcdict = wc . dict , scale = wc . scale , eft = wc . eft , basis = wc . basis )
Return a Wilson instance initialized by a wcxf . WC instance
45,006
def load_wc ( cls , stream ) : wc = wcxf . WC . load ( stream ) return cls . from_wc ( wc )
Return a Wilson instance initialized by a WCxf file - like object
45,007
def _get_from_cache ( self , sector , scale , eft , basis ) : try : return self . _cache [ eft ] [ scale ] [ basis ] [ sector ] except KeyError : return None
Try to load a set of Wilson coefficients from the cache else return None .
45,008
def plotdata ( self , key , part = 're' , scale = 'log' , steps = 50 ) : if scale == 'log' : x = np . logspace ( log ( self . scale_min ) , log ( self . scale_max ) , steps , base = e ) elif scale == 'linear' : x = np . linspace ( self . scale_min , self . scale_max , steps ) y = self . fun ( x ) y = np . array ( [ d [ key ] for d in y ] ) if part == 're' : return x , y . real elif part == 'im' : return x , y . imag
Return a tuple of arrays x y that can be fed to plt . plot where x is the scale in GeV and y is the parameter of interest .
45,009
def plot ( self , key , part = 're' , scale = 'log' , steps = 50 , legend = True , plotargs = { } ) : try : import matplotlib . pyplot as plt except ImportError : raise ImportError ( "Please install matplotlib if you want to use the plot method" ) pdat = self . plotdata ( key , part = part , scale = scale , steps = steps ) plt . plot ( * pdat , label = key , ** plotargs ) if scale == 'log' : plt . xscale ( 'log' ) if legend : plt . legend ( )
Plot the RG evolution of parameter key .
45,010
async def run_consumer ( timeout = None , dry_run = False ) : channel = state . MANAGER_CONTROL_CHANNEL scope = { 'type' : 'control_event' , 'channel' : channel , } app = ApplicationCommunicator ( ManagerConsumer , scope ) channel_layer = get_channel_layer ( ) async def _consume_loop ( ) : while True : message = await channel_layer . receive ( channel ) if dry_run : continue if message . get ( 'type' , { } ) == '_resolwe_manager_quit' : break message . update ( scope ) await app . send_input ( message ) if timeout is None : await _consume_loop ( ) try : async with async_timeout . timeout ( timeout or 1 ) : await _consume_loop ( ) except asyncio . TimeoutError : pass await app . wait ( )
Run the consumer until it finishes processing .
45,011
def discover_extensions ( self ) : if self . _discovery_done : return try : previous_state = self . _extensions . copy ( ) for app_config in apps . get_app_configs ( ) : indexes_path = '{}.extensions' . format ( app_config . name ) try : import_module ( indexes_path ) except ImportError : pass self . _discovery_done = True except Exception : self . _extensions = previous_state raise
Discover available extensions .
45,012
def _get_class_path ( self , klass_or_instance ) : if inspect . isclass ( klass_or_instance ) : klass = '{}.{}' . format ( klass_or_instance . __module__ , klass_or_instance . __name__ ) elif not isinstance ( klass_or_instance , str ) : klass = klass_or_instance . __class__ klass = '{}.{}' . format ( klass . __module__ , klass . __name__ ) else : klass = klass_or_instance return klass
Return class path for a given class .
45,013
def add_extension ( self , klass , extension ) : klass = self . _get_class_path ( klass ) self . _extensions . setdefault ( klass , [ ] ) . append ( extension )
Register an extension for a class .
45,014
def get_extensions ( self , klass ) : self . discover_extensions ( ) return self . _extensions . get ( self . _get_class_path ( klass ) , [ ] )
Return all registered extensions of a class .
45,015
def run ( self , scale_out , sectors = 'all' ) : C_out = self . _run_dict ( scale_out , sectors = sectors ) all_wcs = set ( wcxf . Basis [ self . eft , 'JMS' ] . all_wcs ) C_out = { k : v for k , v in C_out . items ( ) if v != 0 and k in all_wcs } return wcxf . WC ( eft = self . eft , basis = 'JMS' , scale = scale_out , values = wcxf . WC . dict2values ( C_out ) )
Evolve the Wilson coefficients to the scale scale_out .
45,016
def sort_by ( self , sf ) : params = join_params ( self . parameters , { "sf" : sf } ) return self . __class__ ( ** params )
Determines how to sort search results . Available sorting methods are sort . SCORE sort . COMMENTS sort . HEIGHT sort . RELEVANCE sort . CREATED_AT and sort . RANDOM ; default is sort . CREATED_AT .
45,017
def limit ( self , limit ) : params = join_params ( self . parameters , { "limit" : limit } ) return self . __class__ ( ** params )
Set absolute limit on number of images to return or set to None to return as many results as needed ; default 50 posts .
45,018
def faves ( self , option ) : params = join_params ( self . parameters , { "faves" : option } ) return self . __class__ ( ** params )
Set whether to filter by a user s faves list . Options available are user . ONLY user . NOT and None ; default is None .
45,019
def upvotes ( self , option ) : params = join_params ( self . parameters , { "upvotes" : option } ) return self . __class__ ( ** params )
Set whether to filter by a user s upvoted list . Options available are user . ONLY user . NOT and None ; default is None .
45,020
def uploads ( self , option ) : params = join_params ( self . parameters , { "uploads" : option } ) return self . __class__ ( ** params )
Set whether to filter by a user s uploads list . Options available are user . ONLY user . NOT and None ; default is None .
45,021
def watched ( self , option ) : params = join_params ( self . parameters , { "watched" : option } ) return self . __class__ ( ** params )
Set whether to filter by a user s watchlist . Options available are user . ONLY user . NOT and None ; default is None .
45,022
def define_contributor ( self , request ) : request . data [ 'contributor' ] = self . resolve_user ( request . user ) . pk
Define contributor by adding it to request . data .
45,023
def slug_exists ( self , request ) : if not request . user . is_authenticated : return Response ( status = status . HTTP_401_UNAUTHORIZED ) if 'name' not in request . query_params : return Response ( { 'error' : 'Query parameter `name` must be given.' } , status = status . HTTP_400_BAD_REQUEST ) queryset = self . get_queryset ( ) slug_name = request . query_params [ 'name' ] return Response ( queryset . filter ( slug__iexact = slug_name ) . exists ( ) )
Check if given url slug exists .
45,024
def get_ids ( self , request_data , parameter_name = 'ids' ) : if parameter_name not in request_data : raise ParseError ( "`{}` parameter is required" . format ( parameter_name ) ) ids = request_data . get ( parameter_name ) if not isinstance ( ids , list ) : raise ParseError ( "`{}` parameter not a list" . format ( parameter_name ) ) if not ids : raise ParseError ( "`{}` parameter is empty" . format ( parameter_name ) ) if any ( map ( lambda id : not isinstance ( id , int ) , ids ) ) : raise ParseError ( "`{}` parameter contains non-integers" . format ( parameter_name ) ) return ids
Extract a list of integers from request data .
45,025
def get_id ( self , request_data , parameter_name = 'id' ) : if parameter_name not in request_data : raise ParseError ( "`{}` parameter is required" . format ( parameter_name ) ) id_parameter = request_data . get ( parameter_name , None ) if not isinstance ( id_parameter , int ) : raise ParseError ( "`{}` parameter not an integer" . format ( parameter_name ) ) return id_parameter
Extract an integer from request data .
45,026
def deserialize ( dataone_exception_xml ) : try : dataone_exception_pyxb = d1_common . xml . deserialize_d1_exception ( dataone_exception_xml ) except ValueError as e : raise ServiceFailure ( detailCode = '0' , description = 'Deserialization failed. error="{}" doc="{}"' . format ( str ( e ) , '<empty response>' if not dataone_exception_xml else dataone_exception_xml , ) , traceInformation = traceback . format_exc ( ) , ) else : x = create_exception_by_name ( dataone_exception_pyxb . name , dataone_exception_pyxb . detailCode , dataone_exception_pyxb . description , _get_trace_information_content ( dataone_exception_pyxb ) , dataone_exception_pyxb . identifier , dataone_exception_pyxb . nodeId , ) return x
Deserialize a DataONE Exception XML doc .
45,027
def create_exception_by_name ( name , detailCode = '0' , description = '' , traceInformation = None , identifier = None , nodeId = None , ) : try : dataone_exception = globals ( ) [ name ] except LookupError : dataone_exception = ServiceFailure return dataone_exception ( detailCode , description , traceInformation , identifier , nodeId )
Create a DataONEException based object by name .
45,028
def create_exception_by_error_code ( errorCode , detailCode = '0' , description = '' , traceInformation = None , identifier = None , nodeId = None , ) : try : dataone_exception = ERROR_CODE_TO_EXCEPTION_DICT [ errorCode ] except LookupError : dataone_exception = ServiceFailure return dataone_exception ( detailCode , description , traceInformation , identifier , nodeId )
Create a DataONE Exception object by errorCode .
45,029
def _fmt ( self , tag , msg ) : msg = msg or '<unset>' msg = str ( msg ) msg = msg . strip ( ) if not msg : return if len ( msg ) > 2048 : msg = msg [ : 1024 ] + '...' if msg . count ( '\n' ) <= 1 : return '{}: {}\n' . format ( tag , msg . strip ( ) ) else : return '{}:\n {}\n' . format ( tag , msg . replace ( '\n' , '\n ' ) . strip ( ) )
Format a string for inclusion in the exception s string representation .
45,030
def friendly_format ( self ) : if self . description is not None : msg = self . description else : msg = 'errorCode: {} / detailCode: {}' . format ( self . errorCode , self . detailCode ) return self . _fmt ( self . name , msg )
Serialize to a format more suitable for displaying to end users .
45,031
def serialize_to_transport ( self , encoding = 'utf-8' , xslt_url = None ) : assert encoding in ( 'utf-8' , 'UTF-8' ) dataone_exception_pyxb = self . get_pyxb ( ) return d1_common . xml . serialize_for_transport ( dataone_exception_pyxb , xslt_url = xslt_url )
Serialize to XML bytes with prolog .
45,032
def serialize_to_display ( self , xslt_url = None ) : return d1_common . xml . serialize_to_xml_str ( self . get_pyxb ( ) , pretty = True , xslt_url = xslt_url )
Serialize to a pretty printed Unicode str suitable for display .
45,033
def serialize_to_headers ( self ) : return { 'DataONE-Exception-Name' : self . __class__ . __name__ , 'DataONE-Exception-ErrorCode' : self . _format_header ( self . errorCode ) , 'DataONE-Exception-DetailCode' : self . _format_header ( self . detailCode ) , 'DataONE-Exception-Description' : self . _format_header ( self . description ) , 'DataONE-Exception-TraceInformation' : self . _format_header ( self . traceInformation ) , 'DataONE-Exception-Identifier' : self . _format_header ( self . identifier ) , 'DataONE-Exception-NodeID' : self . _format_header ( self . nodeId ) , }
Serialize to a dict of HTTP headers .
45,034
def get_pyxb ( self ) : dataone_exception_pyxb = dataoneErrors . error ( ) dataone_exception_pyxb . name = self . __class__ . __name__ dataone_exception_pyxb . errorCode = self . errorCode dataone_exception_pyxb . detailCode = self . detailCode if self . description is not None : dataone_exception_pyxb . description = self . description dataone_exception_pyxb . traceInformation = self . traceInformation if self . identifier is not None : dataone_exception_pyxb . identifier = self . identifier if self . nodeId is not None : dataone_exception_pyxb . nodeId = self . nodeId return dataone_exception_pyxb
Generate a DataONE Exception PyXB object .
45,035
def recreate_parent_dependencies ( apps , schema_editor ) : Data = apps . get_model ( 'flow' , 'Data' ) DataDependency = apps . get_model ( 'flow' , 'DataDependency' ) def process_dependency ( data , parent ) : if not Data . objects . filter ( pk = parent ) . exists ( ) : DataDependency . objects . create ( child = data , parent = None , kind = 'io' ) for data in Data . objects . all ( ) : for field_schema , fields in iterate_fields ( data . input , data . process . input_schema ) : name = field_schema [ 'name' ] value = fields [ name ] if field_schema . get ( 'type' , '' ) . startswith ( 'data:' ) : process_dependency ( data , value ) elif field_schema . get ( 'type' , '' ) . startswith ( 'list:data:' ) : for parent in value : process_dependency ( data , parent )
Create empty dependency relation if parent has been deleted .
45,036
def add_access_policy_filter ( request , query , column_name ) : q = d1_gmn . app . models . Subject . objects . filter ( subject__in = request . all_subjects_set ) . values ( 'permission__sciobj' ) filter_arg = '{}__in' . format ( column_name ) return query . filter ( ** { filter_arg : q } )
Filter records that do not have read or better access for one or more of the active subjects .
45,037
def add_redact_annotation ( request , query ) : return query . annotate ( redact = django . db . models . Exists ( d1_gmn . app . models . Permission . objects . filter ( sciobj = django . db . models . OuterRef ( 'sciobj' ) , subject__subject__in = request . all_subjects_set , level__gte = d1_gmn . app . auth . WRITE_LEVEL , ) , negated = True , ) )
Flag LogEntry records that require ipAddress and subject fields to be redacted before being returned to the client .
45,038
def post_has_mime_parts ( request , parts ) : missing = [ ] for part_type , part_name in parts : if part_type == 'header' : if 'HTTP_' + part_name . upper ( ) not in request . META : missing . append ( '{}: {}' . format ( part_type , part_name ) ) elif part_type == 'file' : if part_name not in list ( request . FILES . keys ( ) ) : missing . append ( '{}: {}' . format ( part_type , part_name ) ) elif part_type == 'field' : if part_name not in list ( request . POST . keys ( ) ) : missing . append ( '{}: {}' . format ( part_type , part_name ) ) else : raise d1_common . types . exceptions . ServiceFailure ( 0 , 'Invalid part_type. part_type="{}"' . format ( part_type ) ) if len ( missing ) > 0 : raise d1_common . types . exceptions . InvalidRequest ( 0 , 'Missing part(s) in MIME Multipart document. missing="{}"' . format ( ', ' . join ( missing ) ) , )
Validate that a MMP POST contains all required sections .
45,039
def _decade_ranges_in_date_range ( self , begin_date , end_date ) : begin_dated = begin_date . year / 10 end_dated = end_date . year / 10 decades = [ ] for d in range ( begin_dated , end_dated + 1 ) : decades . append ( '{}-{}' . format ( d * 10 , d * 10 + 9 ) ) return decades
Return a list of decades which is covered by date range .
45,040
def _years_in_date_range_within_decade ( self , decade , begin_date , end_date ) : begin_year = begin_date . year end_year = end_date . year if begin_year < decade : begin_year = decade if end_year > decade + 9 : end_year = decade + 9 return list ( range ( begin_year , end_year + 1 ) )
Return a list of years in one decade which is covered by date range .
45,041
def smeft_evolve_leadinglog ( C_in , scale_in , scale_out , newphys = True ) : C_out = deepcopy ( C_in ) b = beta . beta ( C_out , newphys = newphys ) for k , C in C_out . items ( ) : C_out [ k ] = C + b [ k ] / ( 16 * pi ** 2 ) * log ( scale_out / scale_in ) return C_out
Solve the SMEFT RGEs in the leading log approximation .
45,042
def _smeft_evolve ( C_in , scale_in , scale_out , newphys = True , ** kwargs ) : def fun ( t0 , y ) : return beta . beta_array ( C = C_array2dict ( y . view ( complex ) ) , newphys = newphys ) . view ( float ) / ( 16 * pi ** 2 ) y0 = C_dict2array ( C_in ) . view ( float ) sol = solve_ivp ( fun = fun , t_span = ( log ( scale_in ) , log ( scale_out ) ) , y0 = y0 , ** kwargs ) return sol
Axuliary function used in smeft_evolve and smeft_evolve_continuous
45,043
def smeft_evolve ( C_in , scale_in , scale_out , newphys = True , ** kwargs ) : sol = _smeft_evolve ( C_in , scale_in , scale_out , newphys = newphys , ** kwargs ) return C_array2dict ( sol . y [ : , - 1 ] . view ( complex ) )
Solve the SMEFT RGEs by numeric integration .
45,044
def smeft_evolve_continuous ( C_in , scale_in , scale_out , newphys = True , ** kwargs ) : sol = _smeft_evolve ( C_in , scale_in , scale_out , newphys = newphys , dense_output = True , ** kwargs ) @ np . vectorize def _rge_solution ( scale ) : t = log ( scale ) y = sol . sol ( t ) . view ( complex ) yd = C_array2dict ( y ) yw = arrays2wcxf_nonred ( yd ) return yw def rge_solution ( scale ) : return _rge_solution ( scale ) [ ( ) ] return rge_solution
Solve the SMEFT RGEs by numeric integration returning a function that allows to compute an interpolated solution at arbitrary intermediate scales .
45,045
def invalid_index ( self , name ) : self . stderr . write ( "Unknown index: {}" . format ( name ) ) self . stderr . write ( "Supported indices are:" ) for index in index_builder . indexes : self . stderr . write ( " * {}" . format ( index . __class__ . __name__ ) )
Show an invalid index error message .
45,046
def filter_indices ( self , options , verbosity , * args , ** kwargs ) : index_name_map = { index . __class__ . __name__ : index for index in index_builder . indexes } if options [ 'index' ] : indices = set ( options [ 'index' ] ) else : indices = set ( index_name_map . keys ( ) ) for index_name in options [ 'exclude' ] : if index_name not in index_name_map : self . invalid_index ( index_name ) return indices . discard ( index_name ) for index_name in indices : try : index = index_name_map [ index_name ] except KeyError : self . invalid_index ( index_name ) return if verbosity > 0 : self . stdout . write ( "Processing index '{}'..." . format ( index_name ) ) self . handle_index ( index , * args , ** kwargs )
Filter indices and execute an action for each index .
45,047
def get_contributor_sort_value ( self , obj ) : user = obj . contributor if user . first_name or user . last_name : contributor = user . get_full_name ( ) else : contributor = user . username return contributor . strip ( ) . lower ( )
Generate display name for contributor .
45,048
def _get_user ( self , user ) : return ' ' . join ( [ user . username , user . first_name , user . last_name ] )
Generate user filtering tokens .
45,049
def get_owner_ids_value ( self , obj ) : return [ user . pk for user in get_users_with_permission ( obj , get_full_perm ( 'owner' , obj ) ) ]
Extract owners ids .
45,050
def get_owner_names_value ( self , obj ) : return [ self . _get_user ( user ) for user in get_users_with_permission ( obj , get_full_perm ( 'owner' , obj ) ) ]
Extract owners names .
45,051
def _get_and_assert_slice_param ( url_dict , param_name , default_int ) : param_str = url_dict [ 'query' ] . get ( param_name , default_int ) try : n = int ( param_str ) except ValueError : raise d1_common . types . exceptions . InvalidRequest ( 0 , 'Slice parameter is not a valid integer. {}="{}"' . format ( param_name , param_str ) , ) if n < 0 : raise d1_common . types . exceptions . InvalidRequest ( 0 , 'Slice parameter cannot be a negative number. {}="{}"' . format ( param_name , param_str ) , ) return n
Return param_str converted to an int .
45,052
def _assert_valid_start ( start_int , count_int , total_int ) : if total_int and start_int >= total_int : raise d1_common . types . exceptions . InvalidRequest ( 0 , 'Requested a non-existing slice. start={} count={} total={}' . format ( start_int , count_int , total_int ) , )
Assert that the number of objects visible to the active subject is higher than the requested start position for the slice .
45,053
def _adjust_count_if_required ( start_int , count_int , total_int ) : if start_int + count_int > total_int : count_int = total_int - start_int count_int = min ( count_int , django . conf . settings . MAX_SLICE_ITEMS ) return count_int
Adjust requested object count down if there are not enough objects visible to the active subjects to cover the requested slice start and count .
45,054
def _add_fallback_slice_filter ( query , start_int , count_int , total_int ) : logging . debug ( 'Adding fallback slice filter. start={} count={} total={} ' . format ( start_int , count_int , total_int ) ) if not count_int : return query . none ( ) else : return query [ start_int : start_int + count_int ]
Create a slice of a query based on request start and count parameters .
45,055
def _cache_get_last_in_slice ( url_dict , start_int , total_int , authn_subj_list ) : key_str = _gen_cache_key_for_slice ( url_dict , start_int , total_int , authn_subj_list ) try : last_ts_tup = django . core . cache . cache . get ( key_str ) except KeyError : last_ts_tup = None logging . debug ( 'Cache get. key="{}" -> last_ts_tup={}' . format ( key_str , last_ts_tup ) ) return last_ts_tup
Return None if cache entry does not exist .
45,056
def _gen_cache_key_for_slice ( url_dict , start_int , total_int , authn_subj_list ) : key_url_dict = copy . deepcopy ( url_dict ) key_url_dict [ 'query' ] . pop ( 'start' , None ) key_url_dict [ 'query' ] . pop ( 'count' , None ) key_json = d1_common . util . serialize_to_normalized_compact_json ( { 'url_dict' : key_url_dict , 'start' : start_int , 'total' : total_int , 'subject' : authn_subj_list , } ) logging . debug ( 'key_json={}' . format ( key_json ) ) return hashlib . sha256 ( key_json . encode ( 'utf-8' ) ) . hexdigest ( )
Generate cache key for the REST URL the client is currently accessing or is expected to access in order to get the slice starting at the given start_int of a multi - slice result set .
45,057
def smeft_toarray ( wc_name , wc_dict ) : shape = smeftutil . C_keys_shape [ wc_name ] C = np . zeros ( shape , dtype = complex ) for k , v in wc_dict . items ( ) : if k . split ( '_' ) [ 0 ] != wc_name : continue indices = k . split ( '_' ) [ - 1 ] indices = tuple ( int ( s ) - 1 for s in indices ) C [ indices ] = v C = smeftutil . symmetrize ( { wc_name : C } ) [ wc_name ] return C
Construct a numpy array with Wilson coefficient values from a dictionary of label - value pairs corresponding to the non - redundant elements .
45,058
def warsaw_to_warsawmass ( C , parameters = None , sectors = None ) : p = default_parameters . copy ( ) if parameters is not None : p . update ( parameters ) C_out = C . copy ( ) C_rotate_u = [ 'uphi' , 'uG' , 'uW' , 'uB' ] for name in C_rotate_u : _array = smeft_toarray ( name , C ) V = ckmutil . ckm . ckm_tree ( p [ "Vus" ] , p [ "Vub" ] , p [ "Vcb" ] , p [ "delta" ] ) UuL = V . conj ( ) . T _array = UuL . conj ( ) . T @ _array _dict = smeft_fromarray ( name , _array ) C_out . update ( _dict ) _array = smeft_toarray ( 'llphiphi' , C ) _array = np . diag ( ckmutil . diag . msvd ( _array ) [ 1 ] ) _dict = smeft_fromarray ( 'llphiphi' , _array ) C_out . update ( _dict ) return C_out
Translate from the Warsaw basis to the Warsaw mass basis .
45,059
def warsaw_up_to_warsaw ( C , parameters = None , sectors = None ) : C_in = smeftutil . wcxf2arrays_symmetrized ( C ) p = default_parameters . copy ( ) if parameters is not None : p . update ( parameters ) Uu = Ud = Ul = Ue = np . eye ( 3 ) V = ckmutil . ckm . ckm_tree ( p [ "Vus" ] , p [ "Vub" ] , p [ "Vcb" ] , p [ "delta" ] ) Uq = V C_out = smeftutil . flavor_rotation ( C_in , Uq , Uu , Ud , Ul , Ue ) C_out = smeftutil . arrays2wcxf_nonred ( C_out ) warsaw = wcxf . Basis [ 'SMEFT' , 'Warsaw' ] all_wcs = set ( warsaw . all_wcs ) return { k : v for k , v in C_out . items ( ) if k in all_wcs }
Translate from the Warsaw up basis to the Warsaw basis .
45,060
def sysmeta_add_preferred ( sysmeta_pyxb , node_urn ) : if not has_replication_policy ( sysmeta_pyxb ) : sysmeta_set_default_rp ( sysmeta_pyxb ) rp_pyxb = sysmeta_pyxb . replicationPolicy _add_node ( rp_pyxb , 'pref' , node_urn ) _remove_node ( rp_pyxb , 'block' , node_urn )
Add a remote Member Node to the list of preferred replication targets to this System Metadata object .
45,061
def normalize ( rp_pyxb ) : def sort ( r , a ) : d1_common . xml . sort_value_list_pyxb ( _get_attr_or_list ( r , a ) ) rp_pyxb . preferredMemberNode = set ( _get_attr_or_list ( rp_pyxb , 'pref' ) ) - set ( _get_attr_or_list ( rp_pyxb , 'block' ) ) sort ( rp_pyxb , 'block' ) sort ( rp_pyxb , 'pref' )
Normalize a ReplicationPolicy PyXB type in place .
45,062
def are_equivalent_xml ( a_xml , b_xml ) : return are_equivalent_pyxb ( d1_common . xml . deserialize ( a_xml ) , d1_common . xml . deserialize ( b_xml ) )
Check if two ReplicationPolicy XML docs are semantically equivalent .
45,063
def pyxb_to_dict ( rp_pyxb ) : return { 'allowed' : bool ( _get_attr_or_list ( rp_pyxb , 'allowed' ) ) , 'num' : _get_as_int ( rp_pyxb ) , 'block' : _get_as_set ( rp_pyxb , 'block' ) , 'pref' : _get_as_set ( rp_pyxb , 'pref' ) , }
Convert ReplicationPolicy PyXB object to a normalized dict .
45,064
def dict_to_pyxb ( rp_dict ) : rp_pyxb = d1_common . types . dataoneTypes . replicationPolicy ( ) rp_pyxb . replicationAllowed = rp_dict [ 'allowed' ] rp_pyxb . numberReplicas = rp_dict [ 'num' ] rp_pyxb . blockedMemberNode = rp_dict [ 'block' ] rp_pyxb . preferredMemberNode = rp_dict [ 'pref' ] normalize ( rp_pyxb ) return rp_pyxb
Convert dict to ReplicationPolicy PyXB object .
45,065
def _ensure_allow_rp ( rp_pyxb ) : if not rp_pyxb . replicationAllowed : rp_pyxb . replicationAllowed = True if not rp_pyxb . numberReplicas : rp_pyxb . numberReplicas = 3
Ensure that RP allows replication .
45,066
def _display_interval ( i ) : sigils = [ "d" , "h" , "m" , "s" ] factors = [ 24 * 60 * 60 , 60 * 60 , 60 , 1 ] remain = int ( i ) result = "" for fac , sig in zip ( factors , sigils ) : if remain < fac : continue result += "{}{}" . format ( remain // fac , sig ) remain = remain % fac return result
Convert a time interval into a human - readable string .
45,067
def update ( self , num ) : num = float ( num ) self . count += 1 self . low = min ( self . low , num ) self . high = max ( self . high , num ) delta = num - self . mean self . mean = self . mean + delta / self . count delta2 = num - self . mean self . _rolling_variance = self . _rolling_variance + delta * delta2 if self . count > 1 : self . deviation = math . sqrt ( self . _rolling_variance / ( self . count - 1 ) ) else : self . deviation = 0.0
Update metrics with the new number .
45,068
def to_dict ( self ) : return { 'high' : self . high , 'low' : self . low , 'mean' : self . mean , 'count' : self . count , 'deviation' : self . deviation , }
Pack the stats computed into a dictionary .
45,069
def add ( self , count , timestamp = None ) : if timestamp is None : timestamp = time . time ( ) if self . last_data >= timestamp : raise ValueError ( "Time {} >= {} in load average calculation" . format ( self . last_data , timestamp ) ) self . last_data = timestamp for meta in self . intervals . values ( ) : meta . push ( count , timestamp )
Add a value at the specified time to the series .
45,070
def to_dict ( self ) : result = { } for meta in self . intervals . values ( ) : result [ meta . display ] = meta . value return result
Pack the load averages into a nicely - keyed dictionary .
45,071
def valid ( self , instance , schema ) : try : jsonschema . validate ( instance , schema ) except jsonschema . exceptions . ValidationError as ex : self . stderr . write ( " VALIDATION ERROR: {}" . format ( instance [ 'name' ] if 'name' in instance else '' ) ) self . stderr . write ( " path: {}" . format ( ex . path ) ) self . stderr . write ( " message: {}" . format ( ex . message ) ) self . stderr . write ( " validator: {}" . format ( ex . validator ) ) self . stderr . write ( " val. value: {}" . format ( ex . validator_value ) ) return False try : for field in [ 'input' , 'output' , 'schema' ] : for schema , _ , path in iterate_schema ( { } , instance . get ( field , { } ) ) : if 'default' in schema : validate_schema ( { schema [ 'name' ] : schema [ 'default' ] } , [ schema ] ) except ValidationError : self . stderr . write ( " VALIDATION ERROR: {}" . format ( instance [ 'name' ] ) ) self . stderr . write ( " Default value of field '{}' is not valid." . format ( path ) ) return False return True
Validate schema .
45,072
def find_descriptor_schemas ( self , schema_file ) : if not schema_file . lower ( ) . endswith ( ( '.yml' , '.yaml' ) ) : return [ ] with open ( schema_file ) as fn : schemas = yaml . load ( fn , Loader = yaml . FullLoader ) if not schemas : self . stderr . write ( "Could not read YAML file {}" . format ( schema_file ) ) return [ ] descriptor_schemas = [ ] for schema in schemas : if 'schema' not in schema : continue descriptor_schemas . append ( schema ) return descriptor_schemas
Find descriptor schemas in given path .
45,073
def find_schemas ( self , schema_path , schema_type = SCHEMA_TYPE_PROCESS , verbosity = 1 ) : schema_matches = [ ] if not os . path . isdir ( schema_path ) : if verbosity > 0 : self . stdout . write ( "Invalid path {}" . format ( schema_path ) ) return if schema_type not in [ SCHEMA_TYPE_PROCESS , SCHEMA_TYPE_DESCRIPTOR ] : raise ValueError ( 'Invalid schema type' ) for root , _ , files in os . walk ( schema_path ) : for schema_file in [ os . path . join ( root , fn ) for fn in files ] : schemas = None if schema_type == SCHEMA_TYPE_DESCRIPTOR : schemas = self . find_descriptor_schemas ( schema_file ) elif schema_type == SCHEMA_TYPE_PROCESS : schemas = [ ] for execution_engine in manager . execution_engines . values ( ) : schemas . extend ( execution_engine . discover_process ( schema_file ) ) for schema in schemas : schema_matches . append ( schema ) return schema_matches
Find schemas in packages that match filters .
45,074
def register_descriptors ( self , descriptor_schemas , user , force = False , verbosity = 1 ) : log_descriptors = [ ] for descriptor_schema in descriptor_schemas : for schema , _ , _ in iterate_schema ( { } , descriptor_schema . get ( 'schema' , { } ) ) : if not schema [ 'type' ] [ - 1 ] . endswith ( ':' ) : schema [ 'type' ] += ':' if 'schema' not in descriptor_schema : descriptor_schema [ 'schema' ] = [ ] if not self . valid ( descriptor_schema , DESCRIPTOR_SCHEMA ) : continue slug = descriptor_schema [ 'slug' ] version = descriptor_schema . get ( 'version' , '0.0.0' ) int_version = convert_version_string_to_int ( version , VERSION_NUMBER_BITS ) latest_version = DescriptorSchema . objects . filter ( slug = slug ) . aggregate ( Max ( 'version' ) ) [ 'version__max' ] if latest_version is not None and latest_version > int_version : self . stderr . write ( "Skip descriptor schema {}: newer version installed" . format ( slug ) ) continue previous_descriptor_qs = DescriptorSchema . objects . filter ( slug = slug ) if previous_descriptor_qs . exists ( ) : previous_descriptor = previous_descriptor_qs . latest ( ) else : previous_descriptor = None descriptor_query = DescriptorSchema . objects . filter ( slug = slug , version = version ) if descriptor_query . exists ( ) : if not force : if verbosity > 0 : self . stdout . write ( "Skip descriptor schema {}: same version installed" . format ( slug ) ) continue descriptor_query . update ( ** descriptor_schema ) log_descriptors . append ( "Updated {}" . format ( slug ) ) else : descriptor = DescriptorSchema . objects . create ( contributor = user , ** descriptor_schema ) assign_contributor_permissions ( descriptor ) if previous_descriptor : copy_permissions ( previous_descriptor , descriptor ) log_descriptors . append ( "Inserted {}" . format ( slug ) ) if log_descriptors and verbosity > 0 : self . stdout . write ( "Descriptor schemas Updates:" ) for log in log_descriptors : self . stdout . write ( " {}" . format ( log ) )
Read and register descriptors .
45,075
def retire ( self , process_schemas ) : process_slugs = set ( ps [ 'slug' ] for ps in process_schemas ) retired_processes = Process . objects . filter ( ~ Q ( slug__in = process_slugs ) ) retired_processes . filter ( data__exact = None ) . delete ( ) latest_version_processes = Process . objects . order_by ( 'slug' , '-version' ) . distinct ( 'slug' ) Process . objects . filter ( data__exact = None ) . difference ( latest_version_processes ) . delete ( ) retired_processes . update ( is_active = False )
Retire obsolete processes .
45,076
def handle ( self , * args , ** options ) : force = options . get ( 'force' ) retire = options . get ( 'retire' ) verbosity = int ( options . get ( 'verbosity' ) ) users = get_user_model ( ) . objects . filter ( is_superuser = True ) . order_by ( 'date_joined' ) if not users . exists ( ) : self . stderr . write ( "Admin does not exist: create a superuser" ) exit ( 1 ) process_paths , descriptor_paths = [ ] , [ ] process_schemas , descriptor_schemas = [ ] , [ ] for finder in get_finders ( ) : process_paths . extend ( finder . find_processes ( ) ) descriptor_paths . extend ( finder . find_descriptors ( ) ) for proc_path in process_paths : process_schemas . extend ( self . find_schemas ( proc_path , schema_type = SCHEMA_TYPE_PROCESS , verbosity = verbosity ) ) for desc_path in descriptor_paths : descriptor_schemas . extend ( self . find_schemas ( desc_path , schema_type = SCHEMA_TYPE_DESCRIPTOR , verbosity = verbosity ) ) user_admin = users . first ( ) self . register_descriptors ( descriptor_schemas , user_admin , force , verbosity = verbosity ) self . register_processes ( process_schemas , user_admin , force , verbosity = verbosity ) if retire : self . retire ( process_schemas ) if verbosity > 0 : self . stdout . write ( "Running executor post-registration hook..." ) manager . get_executor ( ) . post_register_hook ( verbosity = verbosity )
Register processes .
45,077
def is_valid_sid_for_chain ( pid , sid ) : if _is_unused_did ( sid ) : return True existing_sid = d1_gmn . app . revision . get_sid_by_pid ( pid ) if existing_sid is None : return False return existing_sid == sid
Return True if sid can be assigned to the single object pid or to the chain to which pid belongs .
45,078
def is_existing_object ( did ) : return d1_gmn . app . models . ScienceObject . objects . filter ( pid__did = did ) . exists ( )
Return True if PID is for an object for which science bytes are stored locally .
45,079
def classify_identifier ( did ) : if _is_unused_did ( did ) : return 'unused on this Member Node' elif is_sid ( did ) : return 'a Series ID (SID) of a revision chain' elif is_local_replica ( did ) : return 'a Persistent ID (PID) of a local replica' elif is_unprocessed_local_replica ( did ) : return ( 'a Persistent ID (PID) of an accepted but not yet processed local replica' ) elif is_archived ( did ) : return 'a Persistent ID (PID) of a previously archived local object' elif is_obsoleted ( did ) : return 'a Persistent ID (PID) of a previously updated (obsoleted) local object' elif is_resource_map_db ( did ) : return 'a Persistent ID (PID) of a local resource map' elif is_existing_object ( did ) : return 'a Persistent ID (PID) of an existing local object' elif is_revision_chain_placeholder ( did ) : return ( 'a Persistent ID (PID) of a remote or non-existing revision of a local ' 'replica' ) elif is_resource_map_member ( did ) : return ( 'a Persistent ID (PID) of a remote or non-existing object aggregated in ' 'a local Resource Map' ) logger . warning ( 'Unable to classify known identifier. did="{}"' . format ( did ) ) return '<UNKNOWN>'
Return a text fragment classifying the did
45,080
def is_local_replica ( pid ) : return d1_gmn . app . models . LocalReplica . objects . filter ( pid__did = pid ) . exists ( )
Includes unprocessed replication requests .
45,081
def is_unprocessed_local_replica ( pid ) : return d1_gmn . app . models . LocalReplica . objects . filter ( pid__did = pid , info__status__status = 'queued' ) . exists ( )
Is local replica with status queued .
45,082
def is_revision_chain_placeholder ( pid ) : return d1_gmn . app . models . ReplicaRevisionChainReference . objects . filter ( pid__did = pid ) . exists ( )
For replicas the PIDs referenced in revision chains are reserved for use by other replicas .
45,083
def _is_did ( did ) : return d1_gmn . app . models . IdNamespace . objects . filter ( did = did ) . exists ( )
Return True if did is recorded in a local context .
45,084
def prepare_connection ( ) : elasticsearch_host = getattr ( settings , 'ELASTICSEARCH_HOST' , 'localhost' ) elasticsearch_port = getattr ( settings , 'ELASTICSEARCH_PORT' , 9200 ) connections . create_connection ( hosts = [ '{}:{}' . format ( elasticsearch_host , elasticsearch_port ) ] )
Set dafault connection for ElasticSearch .
45,085
def _log ( pid , request , event , timestamp = None ) : sciobj_model = None if pid is not None : try : sciobj_model = d1_gmn . app . models . ScienceObject . objects . filter ( pid__did = pid ) [ 0 ] except IndexError : raise d1_common . types . exceptions . ServiceFailure ( 0 , 'Attempted to create event log for non-existing object. pid="{}"' . format ( pid ) , ) event_log_model = create_log_entry ( sciobj_model , event , request . META [ 'REMOTE_ADDR' ] , request . META . get ( 'HTTP_USER_AGENT' , '<not provided>' ) , request . primary_subject_str , ) if timestamp is not None : event_log_model . timestamp = timestamp event_log_model . save ( )
Log an operation that was performed on a sciobj .
45,086
def action_to_level ( action ) : try : return ACTION_LEVEL_MAP [ action ] except LookupError : raise d1_common . types . exceptions . InvalidRequest ( 0 , 'Unknown action. action="{}"' . format ( action ) )
Map action name to action level .
45,087
def level_to_action ( level ) : try : return LEVEL_ACTION_MAP [ level ] except LookupError : raise d1_common . types . exceptions . InvalidRequest ( 0 , 'Unknown action level. level="{}"' . format ( level ) )
Map action level to action name .
45,088
def get_trusted_subjects ( ) : cert_subj = _get_client_side_certificate_subject ( ) return ( d1_gmn . app . node_registry . get_cn_subjects ( ) | django . conf . settings . DATAONE_TRUSTED_SUBJECTS | { cert_subj } if cert_subj is not None else set ( ) )
Get set of subjects that have unlimited access to all SciObj and APIs on this node .
45,089
def is_trusted_subject ( request ) : logging . debug ( 'Active subjects: {}' . format ( ', ' . join ( request . all_subjects_set ) ) ) logging . debug ( 'Trusted subjects: {}' . format ( ', ' . join ( get_trusted_subjects ( ) ) ) ) return not request . all_subjects_set . isdisjoint ( get_trusted_subjects ( ) )
Determine if calling subject is fully trusted .
45,090
def _get_client_side_certificate_subject ( ) : subject = django . core . cache . cache . get ( 'client_side_certificate_subject' ) if subject is not None : return subject cert_pem = _get_client_side_certificate_pem ( ) if cert_pem is None : return None subject = _extract_subject_from_pem ( cert_pem ) django . core . cache . cache . set ( 'client_side_certificate_subject' , subject ) return subject
Return the DN from the client side certificate as a D1 subject if a client side cert has been configured .
45,091
def is_allowed ( request , level , pid ) : if is_trusted_subject ( request ) : return True return d1_gmn . app . models . Permission . objects . filter ( sciobj__pid__did = pid , subject__subject__in = request . all_subjects_set , level__gte = level , ) . exists ( )
Check if one or more subjects are allowed to perform action level on object .
45,092
def assert_allowed ( request , level , pid ) : if not d1_gmn . app . models . ScienceObject . objects . filter ( pid__did = pid ) . exists ( ) : raise d1_common . types . exceptions . NotFound ( 0 , 'Attempted to perform operation on non-existing object. pid="{}"' . format ( pid ) , ) if not is_allowed ( request , level , pid ) : raise d1_common . types . exceptions . NotAuthorized ( 0 , 'Operation is denied. level="{}", pid="{}", active_subjects="{}"' . format ( level_to_action ( level ) , pid , format_active_subjects ( request ) ) , )
Assert that one or more subjects are allowed to perform action on object .
45,093
def format_active_subjects ( request ) : decorated_subject_list = [ request . primary_subject_str + ' (primary)' ] for subject in request . all_subjects_set : if subject != request . primary_subject_str : decorated_subject_list . append ( subject ) return ', ' . join ( decorated_subject_list )
Create a string listing active subjects for this connection suitable for appending to authentication error messages .
45,094
def get_atomtrailer_list ( r ) : dot_set = set ( ) for n in r . find_all ( ( "atomtrailers" , ) ) : name_list = [ ] for x in n . value : if x . type != "name" : break name_list . append ( x . value ) if name_list : dot_set . add ( tuple ( name_list ) ) return sorted ( dot_set )
Capture only the leading dotted name list .
45,095
def custom_filter_tags ( self , value , search ) : if not isinstance ( value , list ) : value = value . split ( ',' ) filters = [ Q ( 'match' , ** { 'tags' : item } ) for item in value ] search = search . query ( 'bool' , must = filters ) return search
Support tags query .
45,096
def custom_filter_text ( self , value , search ) : if isinstance ( value , list ) : value = ' ' . join ( value ) should = [ Q ( 'match' , slug = { 'query' : value , 'operator' : 'and' , 'boost' : 10.0 } ) , Q ( 'match' , ** { 'slug.ngrams' : { 'query' : value , 'operator' : 'and' , 'boost' : 5.0 } } ) , Q ( 'match' , name = { 'query' : value , 'operator' : 'and' , 'boost' : 10.0 } ) , Q ( 'match' , ** { 'name.ngrams' : { 'query' : value , 'operator' : 'and' , 'boost' : 5.0 } } ) , Q ( 'match' , contributor_name = { 'query' : value , 'operator' : 'and' , 'boost' : 5.0 } ) , Q ( 'match' , ** { 'contributor_name.ngrams' : { 'query' : value , 'operator' : 'and' , 'boost' : 2.0 } } ) , Q ( 'match' , owner_names = { 'query' : value , 'operator' : 'and' , 'boost' : 5.0 } ) , Q ( 'match' , ** { 'owner_names.ngrams' : { 'query' : value , 'operator' : 'and' , 'boost' : 2.0 } } ) , Q ( 'match' , descriptor_data = { 'query' : value , 'operator' : 'and' } ) , ] for extension in composer . get_extensions ( self ) : if hasattr ( extension , 'text_filter' ) : should += extension . text_filter ( value ) search = search . query ( 'bool' , should = should ) return search
Support general query using the text attribute .
45,097
def set_content_permissions ( self , user , obj , payload ) : for entity in obj . entity_set . all ( ) : if user . has_perm ( 'share_entity' , entity ) : update_permission ( entity , payload ) payload = remove_permission ( payload , 'add' ) for data in obj . data . all ( ) : if user . has_perm ( 'share_data' , data ) : update_permission ( data , payload )
Apply permissions to data objects and entities in Collection .
45,098
def create ( self , request , * args , ** kwargs ) : if not request . user . is_authenticated : raise exceptions . NotFound return super ( ) . create ( request , * args , ** kwargs )
Only authenticated usesr can create new collections .
45,099
def add_data ( self , request , pk = None ) : collection = self . get_object ( ) if 'ids' not in request . data : return Response ( { "error" : "`ids`parameter is required" } , status = status . HTTP_400_BAD_REQUEST ) missing = [ ] for data_id in request . data [ 'ids' ] : if not Data . objects . filter ( pk = data_id ) . exists ( ) : missing . append ( data_id ) if missing : return Response ( { "error" : "Data objects with following ids are missing: {}" . format ( ', ' . join ( missing ) ) } , status = status . HTTP_400_BAD_REQUEST ) for data_id in request . data [ 'ids' ] : collection . data . add ( data_id ) return Response ( )
Add data to collection .