idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
45,300
def _assert_dirs_exist ( self , setting_name ) : v = self . _get_setting ( setting_name ) if ( not os . path . isdir ( os . path . split ( v ) [ 0 ] ) ) or os . path . isdir ( v ) : self . raise_config_error ( setting_name , v , str , 'a file path in an existing directory' , is_none_allowed = False , )
Check that the dirs leading up to the given file path exist .
45,301
def _warn_unsafe_for_prod ( self ) : safe_settings_list = [ ( 'DEBUG' , False ) , ( 'DEBUG_GMN' , False ) , ( 'STAND_ALONE' , False ) , ( 'DATABASES.default.ATOMIC_REQUESTS' , True ) , ( 'SECRET_KEY' , '<Do not modify this placeholder value>' ) , ( 'STATIC_SERVER' , False ) , ] for setting_str , setting_safe in safe_settings_list : setting_current = self . _get_setting ( setting_str ) if setting_current != setting_safe : logger . warning ( 'Setting is unsafe for use in production. setting="{}" current="{}" ' 'safe="{}"' . format ( setting_str , setting_current , setting_safe ) )
Warn on settings that are not safe for production .
45,302
def _get_setting ( self , setting_dotted_name , default = None ) : name_list = setting_dotted_name . split ( '.' ) setting_obj = getattr ( django . conf . settings , name_list [ 0 ] , default ) return functools . reduce ( lambda o , a : o . get ( a , default ) , [ setting_obj ] + name_list [ 1 : ] )
Return the value of a potentially nested dict setting .
45,303
def _refresh_connection ( self ) : current_thread_id = threading . current_thread ( ) . ident if current_thread_id != self . connection_thread_id : prepare_connection ( ) self . connection_thread_id = current_thread_id
Refresh connection to Elasticsearch when worker is started .
45,304
def generate_id ( self , obj ) : object_type = type ( obj ) . __name__ . lower ( ) return '{}_{}' . format ( object_type , self . get_object_id ( obj ) )
Generate unique document id for ElasticSearch .
45,305
def process_object ( self , obj ) : document = self . document_class ( meta = { 'id' : self . generate_id ( obj ) } ) for field in document . _doc_type . mapping : if field in [ 'users_with_permissions' , 'groups_with_permissions' , 'public_permission' ] : continue try : get_value_function = getattr ( self , 'get_{}_value' . format ( field ) , None ) if get_value_function : setattr ( document , field , get_value_function ( obj ) ) continue if field in self . mapping : if callable ( self . mapping [ field ] ) : setattr ( document , field , self . mapping [ field ] ( obj ) ) continue try : object_attr = dict_dot ( obj , self . mapping [ field ] ) except ( KeyError , AttributeError ) : object_attr = None if callable ( object_attr ) : setattr ( document , field , object_attr ( obj ) ) else : setattr ( document , field , object_attr ) continue try : object_value = dict_dot ( obj , field ) setattr ( document , field , object_value ) continue except KeyError : pass raise AttributeError ( "Cannot determine mapping for field {}" . format ( field ) ) except Exception : logger . exception ( "Error occurred while setting value of field '%s' in '%s' Elasticsearch index." , field , self . __class__ . __name__ , extra = { 'object_type' : self . object_type , 'obj_id' : obj . pk } ) permissions = self . get_permissions ( obj ) document . users_with_permissions = permissions [ 'users' ] document . groups_with_permissions = permissions [ 'groups' ] document . public_permission = permissions [ 'public' ] self . push_queue . append ( document )
Process current object and push it to the ElasticSearch .
45,306
def create_mapping ( self ) : try : self . document_class . init ( ) self . _mapping_created = True except IllegalOperation as error : if error . args [ 0 ] . startswith ( 'You cannot update analysis configuration' ) : return raise
Create the mappings in elasticsearch .
45,307
def destroy ( self ) : self . _refresh_connection ( ) self . push_queue = [ ] index_name = self . document_class ( ) . _get_index ( ) connections . get_connection ( ) . indices . delete ( index_name , ignore = 404 ) self . _mapping_created = False
Destroy an index .
45,308
def get_permissions ( self , obj ) : filters = { 'object_pk' : obj . id , 'content_type' : ContentType . objects . get_for_model ( obj ) , 'permission__codename__startswith' : 'view' , } return { 'users' : list ( UserObjectPermission . objects . filter ( ** filters ) . distinct ( 'user' ) . values_list ( 'user_id' , flat = True ) ) , 'groups' : list ( GroupObjectPermission . objects . filter ( ** filters ) . distinct ( 'group' ) . values_list ( 'group' , flat = True ) ) , 'public' : UserObjectPermission . objects . filter ( user__username = ANONYMOUS_USER_NAME , ** filters ) . exists ( ) , }
Return users and groups with view permission on the current object .
45,309
def remove_object ( self , obj ) : obj_id = self . generate_id ( obj ) es_obj = self . document_class . get ( obj_id , ignore = [ 404 ] ) if es_obj : es_obj . delete ( refresh = True )
Remove current object from the ElasticSearch .
45,310
def generate_version_file ( self , schema_filename , binding_filename ) : version_filename = binding_filename + '_version.txt' version_path = os . path . join ( self . binding_dir , version_filename ) schema_path = os . path . join ( self . schema_dir , schema_filename ) try : tstamp , svnpath , svnrev , version = self . get_version_info_from_svn ( schema_path ) except TypeError : pass else : self . write_version_file ( version_path , tstamp , svnpath , svnrev , version )
Given a DataONE schema generates a file that contains version information about the schema .
45,311
def set_data_location ( apps , schema_editor ) : Data = apps . get_model ( 'flow' , 'Data' ) DataLocation = apps . get_model ( 'flow' , 'DataLocation' ) for data in Data . objects . all ( ) : if os . path . isdir ( os . path . join ( settings . FLOW_EXECUTOR [ 'DATA_DIR' ] , str ( data . id ) ) ) : with transaction . atomic ( ) : data_location = DataLocation . objects . create ( id = data . id , subpath = str ( data . id ) ) data_location . data . add ( data ) if DataLocation . objects . exists ( ) : max_id = DataLocation . objects . order_by ( 'id' ) . last ( ) . id with connection . cursor ( ) as cursor : cursor . execute ( "ALTER SEQUENCE flow_datalocation_id_seq RESTART WITH {};" . format ( max_id + 1 ) )
Create DataLocation for each Data .
45,312
def _loadMore ( self , start = 0 , trys = 0 , validation = True ) : self . _log . debug ( "Loading page starting from %d" % start ) self . _czero = start self . _pageoffs = 0 try : pyxb . RequireValidWhenParsing ( validation ) self . _object_list = self . _client . listObjects ( start = start , count = self . _pagesize , fromDate = self . _fromDate , nodeId = self . _nodeId , ) except http . client . BadStatusLine as e : self . _log . warning ( "Server responded with Bad Status Line. Retrying in 5sec" ) self . _client . connection . close ( ) if trys > 3 : raise e trys += 1 self . _loadMore ( start , trys ) except d1_common . types . exceptions . ServiceFailure as e : self . _log . error ( e ) if trys > 3 : raise e trys += 1 self . _loadMore ( start , trys , validation = False )
Retrieves the next page of results .
45,313
def _delete_chunked ( queryset , chunk_size = 500 ) : while True : with transaction . atomic ( ) : offset = queryset . order_by ( 'pk' ) [ : chunk_size ] . count ( ) if not offset : break last_instance = queryset . order_by ( 'pk' ) [ offset - 1 ] queryset . filter ( pk__lte = last_instance . pk ) . delete ( )
Chunked delete which should be used if deleting many objects .
45,314
def create_entity ( self ) : entity_type = self . process . entity_type entity_descriptor_schema = self . process . entity_descriptor_schema entity_input = self . process . entity_input if entity_type : data_filter = { } if entity_input : input_id = dict_dot ( self . input , entity_input , default = lambda : None ) if input_id is None : logger . warning ( "Skipping creation of entity due to missing input." ) return if isinstance ( input_id , int ) : data_filter [ 'data__pk' ] = input_id elif isinstance ( input_id , list ) : data_filter [ 'data__pk__in' ] = input_id else : raise ValueError ( "Cannot create entity due to invalid value of field {}." . format ( entity_input ) ) else : data_filter [ 'data__in' ] = self . parents . all ( ) entity_query = Entity . objects . filter ( type = entity_type , ** data_filter ) . distinct ( ) entity_count = entity_query . count ( ) if entity_count == 0 : descriptor_schema = DescriptorSchema . objects . filter ( slug = entity_descriptor_schema ) . latest ( ) entity = Entity . objects . create ( contributor = self . contributor , descriptor_schema = descriptor_schema , type = entity_type , name = self . name , tags = self . tags , ) assign_contributor_permissions ( entity ) elif entity_count == 1 : entity = entity_query . first ( ) copy_permissions ( entity , self ) else : logger . info ( "Skipping creation of entity due to multiple entities found." ) entity = None if entity : entity . data . add ( self ) for collection in entity . collections . all ( ) : collection . data . add ( self )
Create entity if flow_collection is defined in process .
45,315
def save ( self , render_name = False , * args , ** kwargs ) : if self . name != self . _original_name : self . named_by_user = True create = self . pk is None if create : fill_with_defaults ( self . input , self . process . input_schema ) if not self . name : self . _render_name ( ) else : self . named_by_user = True self . checksum = get_data_checksum ( self . input , self . process . slug , self . process . version ) elif render_name : self . _render_name ( ) self . save_storage ( self . output , self . process . output_schema ) if self . status != Data . STATUS_ERROR : hydrate_size ( self ) if 'update_fields' in kwargs : kwargs [ 'update_fields' ] . append ( 'size' ) skip_missing_data = not create validate_schema ( self . input , self . process . input_schema , skip_missing_data = skip_missing_data ) render_descriptor ( self ) if self . descriptor_schema : try : validate_schema ( self . descriptor , self . descriptor_schema . schema ) self . descriptor_dirty = False except DirtyError : self . descriptor_dirty = True elif self . descriptor and self . descriptor != { } : raise ValueError ( "`descriptor_schema` must be defined if `descriptor` is given" ) if self . status != Data . STATUS_ERROR : output_schema = self . process . output_schema if self . status == Data . STATUS_DONE : validate_schema ( self . output , output_schema , data_location = self . location , skip_missing_data = True ) else : validate_schema ( self . output , output_schema , data_location = self . location , test_required = False ) with transaction . atomic ( ) : self . _perform_save ( * args , ** kwargs ) if create : self . save_dependencies ( self . input , self . process . input_schema ) self . create_entity ( )
Save the data model .
45,316
def delete ( self , * args , ** kwargs ) : storage_ids = list ( self . storages . values_list ( 'pk' , flat = True ) ) super ( ) . delete ( * args , ** kwargs ) Storage . objects . filter ( pk__in = storage_ids , data = None ) . delete ( )
Delete the data model .
45,317
def _render_name ( self ) : if not self . process . data_name or self . named_by_user : return inputs = copy . deepcopy ( self . input ) hydrate_input_references ( inputs , self . process . input_schema , hydrate_values = False ) template_context = inputs try : name = render_template ( self . process , self . process . data_name , template_context ) except EvaluationError : name = '?' self . name = name
Render data name .
45,318
def get_path ( self , prefix = None , filename = None ) : prefix = prefix or settings . FLOW_EXECUTOR [ 'DATA_DIR' ] path = os . path . join ( prefix , self . subpath ) if filename : path = os . path . join ( path , filename ) return path
Compose data location path .
45,319
def get_runtime_path ( self , filename = None ) : return self . get_path ( prefix = settings . FLOW_EXECUTOR [ 'RUNTIME_DIR' ] , filename = filename )
Compose data runtime location path .
45,320
def get_data ( self , entity ) : data = self . _filter_queryset ( 'view_data' , entity . data . all ( ) ) return self . _serialize_data ( data )
Return serialized list of data objects on entity that user has view permission on .
45,321
def validate ( self ) : required_fields = ( 'slug' , 'name' , 'process_type' , 'version' ) for field in required_fields : if getattr ( self . metadata , field , None ) is None : raise ValidationError ( "process '{}' is missing required meta attribute: {}" . format ( self . metadata . slug or '<unknown>' , field ) ) if not PROCESSOR_TYPE_RE . match ( self . metadata . process_type ) : raise ValidationError ( "process '{}' has invalid type: {}" . format ( self . metadata . slug , self . metadata . process_type ) )
Validate process descriptor .
45,322
def to_schema ( self ) : process_type = self . metadata . process_type if not process_type . endswith ( ':' ) : process_type = '{}:' . format ( process_type ) schema = { 'slug' : self . metadata . slug , 'name' : self . metadata . name , 'type' : process_type , 'version' : self . metadata . version , 'data_name' : '' , 'requirements' : { 'executor' : { 'docker' : { 'image' : 'resolwe/base:ubuntu-18.04' , } , } , } , } if self . metadata . description is not None : schema [ 'description' ] = self . metadata . description if self . metadata . category is not None : schema [ 'category' ] = self . metadata . category if self . metadata . scheduling_class is not None : schema [ 'scheduling_class' ] = self . metadata . scheduling_class if self . metadata . persistence is not None : schema [ 'persistence' ] = self . metadata . persistence if self . metadata . requirements is not None : schema [ 'requirements' ] = self . metadata . requirements if self . metadata . data_name is not None : schema [ 'data_name' ] = self . metadata . data_name if self . metadata . entity is not None : schema [ 'entity' ] = self . metadata . entity if self . inputs : schema [ 'input' ] = [ ] for field in self . inputs . values ( ) : schema [ 'input' ] . append ( field . to_schema ( ) ) if self . outputs : schema [ 'output' ] = [ ] for field in self . outputs . values ( ) : schema [ 'output' ] . append ( field . to_schema ( ) ) schema [ 'run' ] = { 'language' : 'python' , 'program' : self . source or '' , } return schema
Return process schema for this process .
45,323
def post_register_hook ( self , verbosity = 1 ) : if not getattr ( settings , 'FLOW_DOCKER_DONT_PULL' , False ) : call_command ( 'list_docker_images' , pull = True , verbosity = verbosity )
Pull Docker images needed by processes after registering .
45,324
def are_equal ( a_dt , b_dt , round_sec = 1 ) : ra_dt = round_to_nearest ( a_dt , round_sec ) rb_dt = round_to_nearest ( b_dt , round_sec ) logger . debug ( 'Rounded:' ) logger . debug ( '{} -> {}' . format ( a_dt , ra_dt ) ) logger . debug ( '{} -> {}' . format ( b_dt , rb_dt ) ) return normalize_datetime_to_utc ( ra_dt ) == normalize_datetime_to_utc ( rb_dt )
Determine if two datetimes are equal with fuzz factor .
45,325
def http_datetime_str_from_dt ( dt ) : epoch_seconds = ts_from_dt ( dt ) return email . utils . formatdate ( epoch_seconds , localtime = False , usegmt = True )
Format datetime to HTTP Full Date format .
45,326
def dt_from_http_datetime_str ( http_full_datetime ) : date_parts = list ( email . utils . parsedate ( http_full_datetime ) [ : 6 ] ) year = date_parts [ 0 ] if year <= 99 : year = year + 2000 if year < 50 else year + 1900 return create_utc_datetime ( year , * date_parts [ 1 : ] )
Parse HTTP Full Date formats and return as datetime .
45,327
def normalize_datetime_to_utc ( dt ) : return datetime . datetime ( * dt . utctimetuple ( ) [ : 6 ] , microsecond = dt . microsecond , tzinfo = datetime . timezone . utc )
Adjust datetime to UTC .
45,328
def cast_naive_datetime_to_tz ( dt , tz = UTC ( ) ) : if has_tz ( dt ) : return dt return dt . replace ( tzinfo = tz )
If datetime is tz - naive set it to tz . If datetime is tz - aware return it unmodified .
45,329
def round_to_nearest ( dt , n_round_sec = 1.0 ) : ts = ts_from_dt ( strip_timezone ( dt ) ) + n_round_sec / 2.0 res = dt_from_ts ( ts - ( ts % n_round_sec ) ) return res . replace ( tzinfo = dt . tzinfo )
Round datetime up or down to nearest divisor .
45,330
def submit ( self , data , runtime_dir , argv ) : limits = data . process . get_resource_limits ( ) logger . debug ( __ ( "Connector '{}' running for Data with id {} ({})." , self . __class__ . __module__ , data . id , repr ( argv ) ) ) partition = getattr ( settings , 'FLOW_SLURM_PARTITION_DEFAULT' , None ) if data . process . slug in getattr ( settings , 'FLOW_SLURM_PARTITION_OVERRIDES' , { } ) : partition = settings . FLOW_SLURM_PARTITION_OVERRIDES [ data . process . slug ] try : script_path = os . path . join ( runtime_dir , 'slurm.sh' ) file_descriptor = os . open ( script_path , os . O_WRONLY | os . O_CREAT , mode = 0o555 ) with os . fdopen ( file_descriptor , 'wt' ) as script : script . write ( '#!/bin/bash\n' ) script . write ( '#SBATCH --mem={}M\n' . format ( limits [ 'memory' ] + EXECUTOR_MEMORY_OVERHEAD ) ) script . write ( '#SBATCH --cpus-per-task={}\n' . format ( limits [ 'cores' ] ) ) if partition : script . write ( '#SBATCH --partition={}\n' . format ( partition ) ) line = ' ' . join ( map ( shlex . quote , argv ) ) script . write ( line + '\n' ) command = [ '/usr/bin/env' , 'sbatch' , script_path ] subprocess . Popen ( command , cwd = runtime_dir , stdin = subprocess . DEVNULL ) . wait ( ) except OSError as err : logger . error ( __ ( "OSError occurred while preparing SLURM script for Data {}: {}" , data . id , err ) )
Run process with SLURM .
45,331
def get_purge_files ( root , output , output_schema , descriptor , descriptor_schema ) : def remove_file ( fn , paths ) : while fn : for i in range ( len ( paths ) - 1 , - 1 , - 1 ) : if fn == paths [ i ] : paths . pop ( i ) fn , _ = os . path . split ( fn ) def remove_tree ( fn , paths ) : for i in range ( len ( paths ) - 1 , - 1 , - 1 ) : head = paths [ i ] while head : if fn == head : paths . pop ( i ) break head , _ = os . path . split ( head ) remove_file ( fn , paths ) def subfiles ( root ) : subs = [ ] for path , dirs , files in os . walk ( root , topdown = False ) : path = path [ len ( root ) + 1 : ] subs . extend ( os . path . join ( path , f ) for f in files ) subs . extend ( os . path . join ( path , d ) for d in dirs ) return subs unreferenced_files = subfiles ( root ) remove_file ( 'jsonout.txt' , unreferenced_files ) remove_file ( 'stderr.txt' , unreferenced_files ) remove_file ( 'stdout.txt' , unreferenced_files ) meta_fields = [ [ output , output_schema ] , [ descriptor , descriptor_schema ] ] for meta_field , meta_field_schema in meta_fields : for field_schema , fields in iterate_fields ( meta_field , meta_field_schema ) : if 'type' in field_schema : field_type = field_schema [ 'type' ] field_name = field_schema [ 'name' ] if field_type . startswith ( 'basic:file:' ) : remove_file ( fields [ field_name ] [ 'file' ] , unreferenced_files ) elif field_type . startswith ( 'list:basic:file:' ) : for field in fields [ field_name ] : remove_file ( field [ 'file' ] , unreferenced_files ) elif field_type . startswith ( 'basic:dir:' ) : remove_tree ( fields [ field_name ] [ 'dir' ] , unreferenced_files ) elif field_type . startswith ( 'list:basic:dir:' ) : for field in fields [ field_name ] : remove_tree ( field [ 'dir' ] , unreferenced_files ) if field_type . startswith ( 'basic:file:' ) or field_type . startswith ( 'basic:dir:' ) : for ref in fields [ field_name ] . get ( 'refs' , [ ] ) : remove_tree ( ref , unreferenced_files ) elif field_type . startswith ( 'list:basic:file:' ) or field_type . startswith ( 'list:basic:dir:' ) : for field in fields [ field_name ] : for ref in field . get ( 'refs' , [ ] ) : remove_tree ( ref , unreferenced_files ) return set ( [ os . path . join ( root , filename ) for filename in unreferenced_files ] )
Get files to purge .
45,332
def location_purge ( location_id , delete = False , verbosity = 0 ) : try : location = DataLocation . objects . get ( id = location_id ) except DataLocation . DoesNotExist : logger . warning ( "Data location does not exist" , extra = { 'location_id' : location_id } ) return unreferenced_files = set ( ) purged_data = Data . objects . none ( ) referenced_by_data = location . data . exists ( ) if referenced_by_data : if location . data . exclude ( status__in = [ Data . STATUS_DONE , Data . STATUS_ERROR ] ) . exists ( ) : return purge_files_sets = list ( ) purged_data = location . data . all ( ) for data in purged_data : purge_files_sets . append ( get_purge_files ( location . get_path ( ) , data . output , data . process . output_schema , data . descriptor , getattr ( data . descriptor_schema , 'schema' , [ ] ) ) ) intersected_files = set . intersection ( * purge_files_sets ) if purge_files_sets else set ( ) unreferenced_files . update ( intersected_files ) else : unreferenced_files . add ( location . get_path ( ) ) unreferenced_files . add ( location . get_runtime_path ( ) ) if verbosity >= 1 : if unreferenced_files : logger . info ( __ ( "Unreferenced files for location id {} ({}):" , location_id , len ( unreferenced_files ) ) ) for name in unreferenced_files : logger . info ( __ ( " {}" , name ) ) else : logger . info ( __ ( "No unreferenced files for location id {}" , location_id ) ) if delete : for name in unreferenced_files : if os . path . isfile ( name ) or os . path . islink ( name ) : os . remove ( name ) elif os . path . isdir ( name ) : shutil . rmtree ( name ) location . purged = True location . save ( ) if not referenced_by_data : location . delete ( )
Print and conditionally delete files not referenced by meta data .
45,333
def _storage_purge_all ( delete = False , verbosity = 0 ) : orphaned_storages = Storage . objects . filter ( data = None ) if verbosity >= 1 : if orphaned_storages . exists ( ) : logger . info ( __ ( "Unreferenced storages ({}):" , orphaned_storages . count ( ) ) ) for storage_id in orphaned_storages . values_list ( 'id' , flat = True ) : logger . info ( __ ( " {}" , storage_id ) ) else : logger . info ( "No unreferenced storages" ) if delete : orphaned_storages . delete ( )
Purge unreferenced storages .
45,334
def visit_ClassDef ( self , node ) : for base in node . bases : if isinstance ( base , ast . Name ) and isinstance ( base . ctx , ast . Load ) : base = getattr ( runtime , base . id , None ) elif isinstance ( base , ast . Attribute ) and isinstance ( base . ctx , ast . Load ) : base = getattr ( runtime , base . attr , None ) else : continue if issubclass ( base , runtime . Process ) : break else : return descriptor = ProcessDescriptor ( source = self . source ) embedded_class_fields = { runtime . PROCESS_INPUTS_NAME : descriptor . inputs , runtime . PROCESS_OUTPUTS_NAME : descriptor . outputs , } for item in node . body : if isinstance ( item , ast . Assign ) : if ( len ( item . targets ) == 1 and isinstance ( item . targets [ 0 ] , ast . Name ) and isinstance ( item . targets [ 0 ] . ctx , ast . Store ) and item . targets [ 0 ] . id in PROCESS_METADATA ) : value = PROCESS_METADATA [ item . targets [ 0 ] . id ] . get_value ( item . value ) setattr ( descriptor . metadata , item . targets [ 0 ] . id , value ) elif ( isinstance ( item , ast . Expr ) and isinstance ( item . value , ast . Str ) and descriptor . metadata . description is None ) : descriptor . metadata . description = item . value . s elif isinstance ( item , ast . ClassDef ) and item . name in embedded_class_fields . keys ( ) : self . visit_field_class ( item , descriptor , embedded_class_fields [ item . name ] ) descriptor . validate ( ) self . processes . append ( descriptor )
Visit top - level classes .
45,335
def parse ( self ) : root = ast . parse ( self . _source ) visitor = ProcessVisitor ( source = self . _source ) visitor . visit ( root ) return visitor . processes
Parse process .
45,336
def get_permissions_class ( permissions_name = None ) : def load_permissions ( permissions_name ) : try : return import_module ( '{}' . format ( permissions_name ) ) . ResolwePermissions except AttributeError : raise AttributeError ( "'ResolwePermissions' class not found in {} module." . format ( permissions_name ) ) except ImportError as ex : permissions_dir = os . path . join ( os . path . dirname ( upath ( __file__ ) ) , '..' , 'perms' ) permissions_dir = os . path . normpath ( permissions_dir ) try : builtin_permissions = [ name for _ , name , _ in pkgutil . iter_modules ( [ permissions_dir ] ) if name not in [ 'tests' ] ] except EnvironmentError : builtin_permissions = [ ] if permissions_name not in [ 'resolwe.auth.{}' . format ( p ) for p in builtin_permissions ] : permissions_reprs = map ( repr , sorted ( builtin_permissions ) ) err_msg = ( "{} isn't an available flow permissions class.\n" "Try using 'resolwe.auth.XXX', where XXX is one of:\n" " {}\n" "Error was: {}" . format ( permissions_name , ", " . join ( permissions_reprs ) , ex ) ) raise ImproperlyConfigured ( err_msg ) else : raise if permissions_name is None : permissions_name = settings . FLOW_API [ 'PERMISSIONS' ] if permissions_name not in permissions_classes : permissions_classes [ permissions_name ] = load_permissions ( permissions_name ) return permissions_classes [ permissions_name ]
Load and cache permissions class .
45,337
def get_identifiers ( sysmeta_pyxb ) : pid = d1_common . xml . get_opt_val ( sysmeta_pyxb , 'identifier' ) sid = d1_common . xml . get_opt_val ( sysmeta_pyxb , 'seriesId' ) obsoletes_pid = d1_common . xml . get_opt_val ( sysmeta_pyxb , 'obsoletes' ) obsoleted_by_pid = d1_common . xml . get_opt_val ( sysmeta_pyxb , 'obsoletedBy' ) return pid , sid , obsoletes_pid , obsoleted_by_pid
Get set of identifiers that provide revision context for SciObj .
45,338
def topological_sort ( unsorted_dict ) : sorted_list = [ ] sorted_set = set ( ) found = True unconnected_dict = unsorted_dict . copy ( ) while found : found = False for pid , obsoletes_pid in list ( unconnected_dict . items ( ) ) : if obsoletes_pid is None or obsoletes_pid in sorted_set : found = True sorted_list . append ( pid ) sorted_set . add ( pid ) del unconnected_dict [ pid ] return sorted_list , unconnected_dict
Sort objects by dependency .
45,339
def wrap ( indent_int , unwrap_str ) : with io . StringIO ( ) as str_buf : is_rest_block = unwrap_str . startswith ( ( "- " , "* " ) ) while unwrap_str : cut_pos = ( unwrap_str + " " ) . rfind ( " " , 0 , WRAP_MARGIN_INT - indent_int ) if cut_pos == - 1 : cut_pos = WRAP_MARGIN_INT this_str , unwrap_str = unwrap_str [ : cut_pos ] , unwrap_str [ cut_pos + 1 : ] str_buf . write ( "{}{}\n" . format ( " " * indent_int , this_str ) ) if is_rest_block : is_rest_block = False indent_int += 2 return str_buf . getvalue ( )
Wrap a single line to one or more lines that start at indent_int and end at the last word that will fit before WRAP_MARGIN_INT .
45,340
def createSimpleResourceMap ( ore_pid , scimeta_pid , sciobj_pid_list ) : ore = ResourceMap ( ) ore . initialize ( ore_pid ) ore . addMetadataDocument ( scimeta_pid ) ore . addDataDocuments ( sciobj_pid_list , scimeta_pid ) return ore
Create a simple OAI - ORE Resource Map with one Science Metadata document and any number of Science Data objects .
45,341
def createResourceMapFromStream ( in_stream , base_url = d1_common . const . URL_DATAONE_ROOT ) : pids = [ ] for line in in_stream : pid = line . strip ( ) if pid == "#" or pid . startswith ( "# " ) : continue if len ( pids ) < 2 : raise ValueError ( "Insufficient numbers of identifiers provided." ) logging . info ( "Read {} identifiers" . format ( len ( pids ) ) ) ore = ResourceMap ( base_url = base_url ) logging . info ( "ORE PID = {}" . format ( pids [ 0 ] ) ) ore . initialize ( pids [ 0 ] ) logging . info ( "Metadata PID = {}" . format ( pids [ 1 ] ) ) ore . addMetadataDocument ( pids [ 1 ] ) ore . addDataDocuments ( pids [ 2 : ] , pids [ 1 ] ) return ore
Create a simple OAI - ORE Resource Map with one Science Metadata document and any number of Science Data objects using a stream of PIDs .
45,342
def initialize ( self , pid , ore_software_id = d1_common . const . ORE_SOFTWARE_ID ) : for k in list ( d1_common . const . ORE_NAMESPACE_DICT . keys ( ) ) : self . bind ( k , d1_common . const . ORE_NAMESPACE_DICT [ k ] ) oid = self . _pid_to_id ( pid ) ore = rdflib . URIRef ( oid ) self . add ( ( ore , rdflib . RDF . type , ORE . ResourceMap ) ) self . add ( ( ore , DCTERMS . identifier , rdflib . term . Literal ( pid ) ) ) self . add ( ( ore , DCTERMS . creator , rdflib . term . Literal ( ore_software_id ) ) ) ag = rdflib . URIRef ( oid + "#aggregation" ) self . add ( ( ore , ORE . describes , ag ) ) self . add ( ( ag , rdflib . RDF . type , ORE . Aggregation ) ) self . add ( ( ORE . Aggregation , rdflib . RDFS . isDefinedBy , ORE . term ( "" ) ) ) self . add ( ( ORE . Aggregation , rdflib . RDFS . label , rdflib . term . Literal ( "Aggregation" ) ) ) self . _ore_initialized = True
Create the basic ORE document structure .
45,343
def serialize_to_transport ( self , doc_format = "xml" , * args , ** kwargs ) : return super ( ResourceMap , self ) . serialize ( format = doc_format , encoding = "utf-8" , * args , ** kwargs )
Serialize ResourceMap to UTF - 8 encoded XML document .
45,344
def serialize_to_display ( self , doc_format = "pretty-xml" , * args , ** kwargs ) : return ( super ( ResourceMap , self ) . serialize ( format = doc_format , encoding = None , * args , ** kwargs ) . decode ( "utf-8" ) )
Serialize ResourceMap to an XML doc that is pretty printed for display .
45,345
def deserialize ( self , * args , ** kwargs ) : self . parse ( * args , ** kwargs ) self . _ore_initialized = True
Deserialize Resource Map XML doc .
45,346
def addResource ( self , pid ) : self . _check_initialized ( ) try : self . getObjectByPid ( pid ) return except IndexError : pass oid = self . _pid_to_id ( pid ) obj = rdflib . URIRef ( oid ) ag = self . getAggregation ( ) self . add ( ( ag , ORE . aggregates , obj ) ) self . add ( ( obj , ORE . isAggregatedBy , ag ) ) self . add ( ( obj , DCTERMS . identifier , rdflib . term . Literal ( pid ) ) )
Add a resource to the Resource Map .
45,347
def setDocuments ( self , documenting_pid , documented_pid ) : self . _check_initialized ( ) documenting_id = self . getObjectByPid ( documenting_pid ) documented_id = self . getObjectByPid ( documented_pid ) self . add ( ( documenting_id , CITO . documents , documented_id ) )
Add a CiTO the Citation Typing Ontology triple asserting that documenting_pid documents documented_pid .
45,348
def setDocumentedBy ( self , documented_pid , documenting_pid ) : self . _check_initialized ( ) documented_id = self . getObjectByPid ( documented_pid ) documenting_id = self . getObjectByPid ( documenting_pid ) self . add ( ( documented_id , CITO . isDocumentedBy , documenting_id ) )
Add a CiTO the Citation Typing Ontology triple asserting that documented_pid isDocumentedBy documenting_pid .
45,349
def parseDoc ( self , doc_str , format = "xml" ) : self . parse ( data = doc_str , format = format ) self . _ore_initialized = True return self
Parse a OAI - ORE Resource Maps document .
45,350
def _pid_to_id ( self , pid ) : return d1_common . url . joinPathElements ( self . _base_url , self . _version_tag , "resolve" , d1_common . url . encodePathElement ( pid ) , )
Converts a pid to a URI that can be used as an OAI - ORE identifier .
45,351
def make_checksum_validation_script ( stats_list ) : if not os . path . exists ( './hash_check' ) : os . mkdir ( './hash_check' ) with open ( './hash_check/curl.sh' , 'w' ) as curl_f , open ( './hash_check/md5.txt' , 'w' ) as md5_f , open ( './hash_check/sha1.txt' , 'w' ) as sha1_f : curl_f . write ( '#!/usr/bin/env bash\n\n' ) for stats_dict in stats_list : for sysmeta_xml in stats_dict [ 'largest_sysmeta_xml' ] : print ( sysmeta_xml ) sysmeta_pyxb = d1_common . types . dataoneTypes_v1_2 . CreateFromDocument ( sysmeta_xml ) pid = sysmeta_pyxb . identifier . value ( ) . encode ( 'utf-8' ) file_name = re . sub ( '\W+' , '_' , pid ) size = sysmeta_pyxb . size base_url = stats_dict [ 'gmn_dict' ] [ 'base_url' ] if size > 100 * 1024 * 1024 : logging . info ( 'Ignored large object. size={} pid={}' ) curl_f . write ( '# {} {}\n' . format ( size , pid ) ) curl_f . write ( 'curl -o obj/{} {}/v1/object/{}\n' . format ( file_name , base_url , d1_common . url . encodePathElement ( pid ) ) ) if sysmeta_pyxb . checksum . algorithm == 'MD5' : md5_f . write ( '{} obj/{}\n' . format ( sysmeta_pyxb . checksum . value ( ) , file_name ) ) else : sha1_f . write ( '{} obj/{}\n' . format ( sysmeta_pyxb . checksum . value ( ) , file_name ) ) with open ( './hash_check/check.sh' , 'w' ) as f : f . write ( '#!/usr/bin/env bash\n\n' ) f . write ( 'mkdir -p obj\n' ) f . write ( './curl.sh\n' ) f . write ( 'sha1sum -c sha1.txt\n' ) f . write ( 'md5sum -c md5.txt\n' )
Make batch files required for checking checksums from another machine .
45,352
def update_dependency_kinds ( apps , schema_editor ) : DataDependency = apps . get_model ( 'flow' , 'DataDependency' ) for dependency in DataDependency . objects . all ( ) : dependency . kind = 'subprocess' child = dependency . child parent = dependency . parent for field_schema , fields in iterate_fields ( child . input , child . process . input_schema ) : name = field_schema [ 'name' ] value = fields [ name ] if field_schema . get ( 'type' , '' ) . startswith ( 'data:' ) : if value == parent . pk : dependency . kind = 'io' break elif field_schema . get ( 'type' , '' ) . startswith ( 'list:data:' ) : for data in value : if value == parent . pk : dependency . kind = 'io' break dependency . save ( )
Update historical dependency kinds as they may be wrong .
45,353
def escape ( self , value ) : value = soft_unicode ( value ) if self . _engine . _escape is None : return value return self . _engine . _escape ( value )
Escape given value .
45,354
def _wrap_jinja_filter ( self , function ) : def wrapper ( * args , ** kwargs ) : try : return function ( * args , ** kwargs ) except Exception : return NestedUndefined ( ) for attribute in dir ( function ) : if attribute . endswith ( 'filter' ) : setattr ( wrapper , attribute , getattr ( function , attribute ) ) return wrapper
Propagate exceptions as undefined values filter .
45,355
def _register_custom_filters ( self ) : custom_filters = self . settings . get ( 'CUSTOM_FILTERS' , [ ] ) if not isinstance ( custom_filters , list ) : raise KeyError ( "`CUSTOM_FILTERS` setting must be a list." ) for filter_module_name in custom_filters : try : filter_module = import_module ( filter_module_name ) except ImportError as error : raise ImproperlyConfigured ( "Failed to load custom filter module '{}'.\n" "Error was: {}" . format ( filter_module_name , error ) ) try : filter_map = getattr ( filter_module , 'filters' ) if not isinstance ( filter_map , dict ) : raise TypeError except ( AttributeError , TypeError ) : raise ImproperlyConfigured ( "Filter module '{}' does not define a 'filters' dictionary" . format ( filter_module_name ) ) self . _environment . filters . update ( filter_map )
Register any custom filter modules .
45,356
def _evaluation_context ( self , escape , safe_wrapper ) : self . _escape = escape self . _safe_wrapper = safe_wrapper try : yield finally : self . _escape = None self . _safe_wrapper = None
Configure the evaluation context .
45,357
def evaluate_block ( self , template , context = None , escape = None , safe_wrapper = None ) : if context is None : context = { } try : with self . _evaluation_context ( escape , safe_wrapper ) : template = self . _environment . from_string ( template ) return template . render ( ** context ) except jinja2 . TemplateError as error : raise EvaluationError ( error . args [ 0 ] ) finally : self . _escape = None
Evaluate a template block .
45,358
def evaluate_inline ( self , expression , context = None , escape = None , safe_wrapper = None ) : if context is None : context = { } try : with self . _evaluation_context ( escape , safe_wrapper ) : compiled = self . _environment . compile_expression ( expression ) return compiled ( ** context ) except jinja2 . TemplateError as error : raise EvaluationError ( error . args [ 0 ] )
Evaluate an inline expression .
45,359
def update_module_file ( redbaron_tree , module_path , show_diff = False , dry_run = False ) : with tempfile . NamedTemporaryFile ( ) as tmp_file : tmp_file . write ( redbaron_tree_to_module_str ( redbaron_tree ) ) tmp_file . seek ( 0 ) if are_files_equal ( module_path , tmp_file . name ) : logging . debug ( 'Source unchanged' ) return False logging . debug ( 'Source modified' ) tmp_file . seek ( 0 ) diff_update_file ( module_path , tmp_file . read ( ) , show_diff , dry_run )
Set show_diff to False to overwrite module_path with a new file generated from redbaron_tree .
45,360
def find_repo_root_by_path ( path ) : repo = git . Repo ( path , search_parent_directories = True ) repo_path = repo . git . rev_parse ( '--show-toplevel' ) logging . info ( 'Repository: {}' . format ( repo_path ) ) return repo_path
Given a path to an item in a git repository find the root of the repository .
45,361
def _format_value ( self , operation , key , indent ) : v = self . _find_value ( operation , key ) if v == "NOT_FOUND" : return [ ] if not isinstance ( v , list ) : v = [ v ] if not len ( v ) : v = [ None ] key = key + ":" lines = [ ] for s in v : if isinstance ( s , tuple ) : s = "{}: {}" . format ( * s ) lines . append ( "{}{}{}{}" . format ( " " * indent , key , " " * ( TAB - indent - len ( key ) - 1 ) , s ) ) key = "" return lines
A value that exists in the operation but has value None is displayed .
45,362
def run_process ( self , slug , inputs ) : def export_files ( value ) : if isinstance ( value , str ) and os . path . isfile ( value ) : print ( "export {}" . format ( value ) ) elif isinstance ( value , dict ) : for item in value . values ( ) : export_files ( item ) elif isinstance ( value , list ) : for item in value : export_files ( item ) export_files ( inputs ) print ( 'run {}' . format ( json . dumps ( { 'process' : slug , 'input' : inputs } , separators = ( ',' , ':' ) ) ) )
Run a new process from a running process .
45,363
def info ( self , * args ) : report = resolwe_runtime_utils . info ( ' ' . join ( [ str ( x ) for x in args ] ) ) print ( report )
Log informational message .
45,364
def get_data_id_by_slug ( self , slug ) : resolwe_host = os . environ . get ( 'RESOLWE_HOST_URL' ) url = urllib . parse . urljoin ( resolwe_host , '/api/data?slug={}&fields=id' . format ( slug ) ) with urllib . request . urlopen ( url , timeout = 60 ) as f : data = json . loads ( f . read ( ) . decode ( 'utf-8' ) ) if len ( data ) == 1 : return data [ 0 ] [ 'id' ] elif not data : raise ValueError ( 'Data not found for slug {}' . format ( slug ) ) else : raise ValueError ( 'More than one data object returned for slug {}' . format ( slug ) )
Find data object ID for given slug .
45,365
def requirements ( self ) : class dotdict ( dict ) : def __getattr__ ( self , attr ) : value = self . get ( attr ) return dotdict ( value ) if isinstance ( value , dict ) else value return dotdict ( self . _meta . metadata . requirements )
Process requirements .
45,366
def prepare_runtime ( self , runtime_dir , data ) : import resolwe . process as runtime_package src_dir = os . path . dirname ( inspect . getsourcefile ( runtime_package ) ) dest_package_dir = os . path . join ( runtime_dir , PYTHON_RUNTIME_DIRNAME , 'resolwe' , 'process' ) shutil . copytree ( src_dir , dest_package_dir ) os . chmod ( dest_package_dir , 0o755 ) source = data . process . run . get ( 'program' , '' ) program_path = os . path . join ( runtime_dir , PYTHON_PROGRAM_FILENAME ) with open ( program_path , 'w' ) as file : file . write ( source ) os . chmod ( program_path , 0o755 ) inputs = copy . deepcopy ( data . input ) hydrate_input_references ( inputs , data . process . input_schema ) hydrate_input_uploads ( inputs , data . process . input_schema ) inputs_path = os . path . join ( runtime_dir , PYTHON_INPUTS_FILENAME ) def default ( obj ) : class_name = obj . __class__ . __name__ if class_name == 'LazyStorageJSON' : return '' raise TypeError ( f'Object of type {class_name} is not JSON serializable' ) with open ( inputs_path , 'w' ) as file : json . dump ( inputs , file , default = default ) volume_maps = { PYTHON_RUNTIME_DIRNAME : PYTHON_RUNTIME_VOLUME , PYTHON_PROGRAM_FILENAME : PYTHON_PROGRAM_VOLUME , PYTHON_INPUTS_FILENAME : PYTHON_INPUTS_VOLUME , } return volume_maps
Prepare runtime directory .
45,367
def get_subject_with_local_validation ( jwt_bu64 , cert_obj ) : try : jwt_dict = validate_and_decode ( jwt_bu64 , cert_obj ) except JwtException as e : return log_jwt_bu64_info ( logging . error , str ( e ) , jwt_bu64 ) try : return jwt_dict [ 'sub' ] except LookupError : log_jwt_dict_info ( logging . error , 'Missing "sub" key' , jwt_dict )
Validate the JWT and return the subject it contains .
45,368
def get_subject_without_validation ( jwt_bu64 ) : try : jwt_dict = get_jwt_dict ( jwt_bu64 ) except JwtException as e : return log_jwt_bu64_info ( logging . error , str ( e ) , jwt_bu64 ) try : return jwt_dict [ 'sub' ] except LookupError : log_jwt_dict_info ( logging . error , 'Missing "sub" key' , jwt_dict )
Extract subject from the JWT without validating the JWT .
45,369
def get_jwt_dict ( jwt_bu64 ) : jwt_tup = get_jwt_tup ( jwt_bu64 ) try : jwt_dict = json . loads ( jwt_tup [ 0 ] . decode ( 'utf-8' ) ) jwt_dict . update ( json . loads ( jwt_tup [ 1 ] . decode ( 'utf-8' ) ) ) jwt_dict [ '_sig_sha1' ] = hashlib . sha1 ( jwt_tup [ 2 ] ) . hexdigest ( ) except TypeError as e : raise JwtException ( 'Decode failed. error="{}"' . format ( e ) ) return jwt_dict
Parse Base64 encoded JWT and return as a dict .
45,370
def validate_and_decode ( jwt_bu64 , cert_obj ) : try : return jwt . decode ( jwt_bu64 . strip ( ) , cert_obj . public_key ( ) , algorithms = [ 'RS256' ] , verify = True ) except jwt . InvalidTokenError as e : raise JwtException ( 'Signature is invalid. error="{}"' . format ( str ( e ) ) )
Validate the JWT and return as a dict .
45,371
def log_jwt_dict_info ( log , msg_str , jwt_dict ) : d = ts_to_str ( jwt_dict ) log_list = [ ( b , d . pop ( a ) ) for a , b , c in CLAIM_LIST if a in d ] + [ ( k , d [ k ] ) for k in sorted ( d ) ] list ( map ( log , [ '{}:' . format ( msg_str ) ] + [ ' {}: {}' . format ( k , v ) for k , v in log_list ] , ) )
Dump JWT to log .
45,372
def ts_to_str ( jwt_dict ) : d = ts_to_dt ( jwt_dict ) for k , v in list ( d . items ( ) ) : if isinstance ( v , datetime . datetime ) : d [ k ] = v . isoformat ( ) . replace ( 'T' , ' ' ) return d
Convert timestamps in JWT to human readable dates .
45,373
def ts_to_dt ( jwt_dict ) : d = jwt_dict . copy ( ) for k , v in [ v [ : 2 ] for v in CLAIM_LIST if v [ 2 ] ] : if k in jwt_dict : d [ k ] = d1_common . date_time . dt_from_ts ( jwt_dict [ k ] ) return d
Convert timestamps in JWT to datetime objects .
45,374
def _escape ( self , value ) : if isinstance ( value , SafeString ) : return value return shellescape . quote ( value )
Escape given value unless it is safe .
45,375
def open_sciobj_file_by_pid_ctx ( pid , write = False ) : abs_path = get_abs_sciobj_file_path_by_pid ( pid ) with open_sciobj_file_by_path_ctx ( abs_path , write ) as sciobj_file : yield sciobj_file
Open the file containing the Science Object bytes of pid in the default location within the tree of the local SciObj store .
45,376
def open_sciobj_file_by_path_ctx ( abs_path , write = False ) : if write : d1_common . utils . filesystem . create_missing_directories_for_file ( abs_path ) try : with open ( abs_path , 'wb' if write else 'rb' ) as sciobj_file : yield sciobj_file finally : if os . path . exists ( abs_path ) and not os . path . getsize ( abs_path ) : os . unlink ( abs_path )
Open the file containing the Science Object bytes at the custom location abs_path in the local filesystem .
45,377
def open_sciobj_file_by_pid ( pid , write = False ) : abs_path = get_abs_sciobj_file_path_by_pid ( pid ) if write : d1_common . utils . filesystem . create_missing_directories_for_file ( abs_path ) return open_sciobj_file_by_path ( abs_path , write )
Open the file containing the Science Object bytes at the custom location abs_path in the local filesystem for read .
45,378
def open_sciobj_file_by_path ( abs_path , write = False ) : if write : d1_common . utils . filesystem . create_missing_directories_for_file ( abs_path ) return open ( abs_path , 'wb' if write else 'rb' )
Open a SciObj file for read or write . If opened for write create any missing directories . For a SciObj stored in the default SciObj store the path includes the PID hash based directory levels .
45,379
def get_rel_sciobj_file_path ( pid ) : hash_str = hashlib . sha1 ( pid . encode ( 'utf-8' ) ) . hexdigest ( ) return os . path . join ( hash_str [ : 2 ] , hash_str [ 2 : 4 ] , hash_str )
Get the relative local path to the file holding an object s bytes .
45,380
def get_abs_sciobj_file_path_by_url ( file_url ) : assert_sciobj_store_exists ( ) m = re . match ( r'file://(.*?)/(.*)' , file_url , re . IGNORECASE ) if m . group ( 1 ) == RELATIVE_PATH_MAGIC_HOST_STR : return os . path . join ( get_abs_sciobj_store_path ( ) , m . group ( 2 ) ) assert os . path . isabs ( m . group ( 2 ) ) return m . group ( 2 )
Get the absolute path to the file holding an object s bytes .
45,381
def get_gmn_version ( base_url ) : home_url = d1_common . url . joinPathElements ( base_url , 'home' ) try : response = requests . get ( home_url , verify = False ) except requests . exceptions . ConnectionError as e : return False , str ( e ) if not response . ok : return False , 'invalid /home. status={}' . format ( response . status_code ) soup = bs4 . BeautifulSoup ( response . content , 'html.parser' ) version_str = soup . find ( string = 'GMN version:' ) . find_next ( 'td' ) . string if version_str is None : return False , 'Parse failed' return True , version_str
Return the version currently running on a GMN instance .
45,382
def extract_subjects ( subject_info_xml , primary_str ) : subject_info_pyxb = deserialize_subject_info ( subject_info_xml ) subject_info_tree = gen_subject_info_tree ( subject_info_pyxb , primary_str ) return subject_info_tree . get_subject_set ( )
Extract a set of authenticated subjects from a DataONE SubjectInfo .
45,383
def deserialize_subject_info ( subject_info_xml ) : try : return d1_common . xml . deserialize ( subject_info_xml ) except ValueError as e : raise d1_common . types . exceptions . InvalidToken ( 0 , 'Could not deserialize SubjectInfo. subject_info="{}", error="{}"' . format ( subject_info_xml , str ( e ) ) , )
Deserialize SubjectInfo XML doc to native object .
45,384
def gen_subject_info_tree ( subject_info_pyxb , authn_subj , include_duplicates = False ) : class State : pass state = State ( ) state . subject_info_pyxb = subject_info_pyxb state . include_duplicates = include_duplicates state . visited_set = set ( ) state . tree = SubjectInfoNode ( "Root" , TYPE_NODE_TAG ) _add_subject ( state , state . tree , authn_subj ) symbolic_node = state . tree . add_child ( "Symbolic" , TYPE_NODE_TAG ) _add_subject ( state , symbolic_node , d1_common . const . SUBJECT_AUTHENTICATED ) _trim_tree ( state ) return state . tree
Convert the flat self referential lists in the SubjectInfo to a tree structure .
45,385
def _trim_tree ( state ) : for n in list ( state . tree . leaf_node_gen ) : if n . type_str == TYPE_NODE_TAG : n . parent . child_list . remove ( n ) return _trim_tree ( state )
Trim empty leaf nodes from the tree .
45,386
def get_path_str ( self , sep = os . path . sep , type_str = None ) : return sep . join ( list ( reversed ( [ v . label_str for v in self . parent_gen if type_str in ( None , v . type_str ) ] ) ) )
Get path from root to this node .
45,387
def get_leaf_node_path_list ( self , sep = os . path . sep , type_str = None ) : return [ v . get_path_str ( sep , type_str ) for v in self . leaf_node_gen ]
Get paths for all leaf nodes for the tree rooted at this node .
45,388
def get_path_list ( self , type_str = None ) : return list ( reversed ( [ v . label_str for v in self . parent_gen if type_str in ( None , v . type_str ) ] ) )
Get list of the labels of the nodes leading up to this node from the root .
45,389
def get_label_set ( self , type_str = None ) : return { v . label_str for v in self . node_gen if type_str in ( None , v . type_str ) }
Get a set of label_str for the tree rooted at this node .
45,390
def start_task_type ( self , task_type_str , total_task_count ) : assert ( task_type_str not in self . _task_dict ) , "Task type has already been started" self . _task_dict [ task_type_str ] = { "start_time" : time . time ( ) , "total_task_count" : total_task_count , "task_idx" : 0 , }
Call when about to start processing a new type of task typically just before entering a loop that processes many task of the given type .
45,391
def end_task_type ( self , task_type_str ) : assert ( task_type_str in self . _task_dict ) , "Task type has not been started yet: {}" . format ( task_type_str ) self . _log_progress ( ) del self . _task_dict [ task_type_str ]
Call when processing of all tasks of the given type is completed typically just after exiting a loop that processes many tasks of the given type .
45,392
def start_task ( self , task_type_str , current_task_index = None ) : assert ( task_type_str in self . _task_dict ) , "Task type has not been started yet: {}" . format ( task_type_str ) if current_task_index is not None : self . _task_dict [ task_type_str ] [ "task_idx" ] = current_task_index else : self . _task_dict [ task_type_str ] [ "task_idx" ] += 1 self . _log_progress_if_interval_elapsed ( )
Call when processing is about to start on a single task of the given task type typically at the top inside of the loop that processes the tasks .
45,393
def event ( self , event_name ) : self . _event_dict . setdefault ( event_name , 0 ) self . _event_dict [ event_name ] += 1 self . _log_progress_if_interval_elapsed ( )
Register an event that occurred during processing of a task of the given type .
45,394
def admeig ( classname , f , m_u , m_d , m_s , m_c , m_b , m_e , m_mu , m_tau ) : args = f , m_u , m_d , m_s , m_c , m_b , m_e , m_mu , m_tau A = getattr ( adm , 'adm_s_' + classname ) ( * args ) perm_keys = get_permissible_wcs ( classname , f ) if perm_keys != 'all' : A = A [ perm_keys ] [ : , perm_keys ] w , v = np . linalg . eig ( A . T ) return w , v
Compute the eigenvalues and eigenvectors for a QCD anomalous dimension matrix that is defined in adm . adm_s_X where X is the name of the sector .
45,395
def run_sector ( sector , C_in , eta_s , f , p_in , p_out , qed_order = 1 , qcd_order = 1 ) : r Cdictout = OrderedDict ( ) classname = sectors [ sector ] keylist = coeffs [ sector ] if sector == 'dF=0' : perm_keys = get_permissible_wcs ( 'dF0' , f ) else : perm_keys = get_permissible_wcs ( sector , f ) if perm_keys != 'all' : keylist = np . asarray ( keylist ) [ perm_keys ] C_input = np . array ( [ C_in . get ( key , 0 ) for key in keylist ] ) if np . count_nonzero ( C_input ) == 0 or classname == 'inv' : C_result = C_input else : C_scaled = np . asarray ( [ C_input [ i ] * scale_C ( key , p_in ) for i , key in enumerate ( keylist ) ] ) if qcd_order == 0 : Us = np . eye ( len ( C_scaled ) ) elif qcd_order == 1 : Us = getUs ( classname , eta_s , f , ** p_in ) if qed_order == 0 : Ue = np . zeros ( C_scaled . shape ) elif qed_order == 1 : if qcd_order == 0 : Ue = getUe ( classname , 1 , f , ** p_in ) else : Ue = getUe ( classname , eta_s , f , ** p_in ) C_out = ( Us + Ue ) @ C_scaled C_result = [ C_out [ i ] / scale_C ( key , p_out ) for i , key in enumerate ( keylist ) ] for j in range ( len ( C_result ) ) : Cdictout [ keylist [ j ] ] = C_result [ j ] return Cdictout
r Solve the WET RGE for a specific sector .
45,396
def _merge_region_trees ( self , dst_tree , src_tree , pid ) : for k , v in list ( src_tree . items ( ) ) : if k not in dst_tree or dst_tree [ k ] is None : dst_tree [ k ] = { } dst_tree [ k ] [ pid ] = None if v is not None : self . _merge_region_trees ( dst_tree [ k ] , v , pid )
Merge conflicts occur if a folder in one tree is a file in the other .
45,397
def save ( self , ** kwargs ) : try : return super ( ) . save ( ** kwargs ) except SlugError as error : raise ParseError ( error )
Override save method to catch handled errors and repackage them as 400 errors .
45,398
def get_api_major_by_base_url ( base_url , * client_arg_list , ** client_arg_dict ) : api_major = 0 client = d1_client . mnclient . MemberNodeClient ( base_url , * client_arg_list , ** client_arg_dict ) node_pyxb = client . getCapabilities ( ) for service_pyxb in node_pyxb . services . service : if service_pyxb . available : api_major = max ( api_major , int ( service_pyxb . version [ - 1 ] ) ) return api_major
Read the Node document from a node and return an int containing the latest D1 API version supported by the node .
45,399
def delete_unused_subjects ( ) : query = d1_gmn . app . models . Subject . objects . all ( ) query = query . filter ( scienceobject_submitter__isnull = True ) query = query . filter ( scienceobject_rights_holder__isnull = True ) query = query . filter ( eventlog__isnull = True ) query = query . filter ( permission__isnull = True ) query = query . filter ( whitelistforcreateupdatedelete__isnull = True ) logger . debug ( 'Deleting {} unused subjects:' . format ( query . count ( ) ) ) for s in query . all ( ) : logging . debug ( ' {}' . format ( s . subject ) ) query . delete ( )
Delete any unused subjects from the database .