idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
246,200
def add_badge ( self , kind ) : badge = self . get_badge ( kind ) if badge : return badge if kind not in getattr ( self , '__badges__' , { } ) : msg = 'Unknown badge type for {model}: {kind}' raise db . ValidationError ( msg . format ( model = self . __class__ . __name__ , kind = kind ) ) badge = Badge ( kind = kind ) if current_user . is_authenticated : badge . created_by = current_user . id self . update ( __raw__ = { '$push' : { 'badges' : { '$each' : [ badge . to_mongo ( ) ] , '$position' : 0 } } } ) self . reload ( ) post_save . send ( self . __class__ , document = self ) on_badge_added . send ( self , kind = kind ) return self . get_badge ( kind )
Perform an atomic prepend for a new badge
211
10
246,201
def remove_badge ( self , kind ) : self . update ( __raw__ = { '$pull' : { 'badges' : { 'kind' : kind } } } ) self . reload ( ) on_badge_removed . send ( self , kind = kind ) post_save . send ( self . __class__ , document = self )
Perform an atomic removal for a given badge
78
9
246,202
def toggle_badge ( self , kind ) : badge = self . get_badge ( kind ) if badge : return self . remove_badge ( kind ) else : return self . add_badge ( kind )
Toggle a bdage given its kind
47
9
246,203
def badge_label ( self , badge ) : kind = badge . kind if isinstance ( badge , Badge ) else badge return self . __badges__ [ kind ]
Display the badge label for a given kind
35
8
246,204
def discussions_for ( user , only_open = True ) : # Only fetch required fields for discussion filtering (id and slug) # Greatly improve performances and memory usage datasets = Dataset . objects . owned_by ( user . id , * user . organizations ) . only ( 'id' , 'slug' ) reuses = Reuse . objects . owned_by ( user . id , * user . organizations ) . only ( 'id' , 'slug' ) qs = Discussion . objects ( subject__in = list ( datasets ) + list ( reuses ) ) if only_open : qs = qs ( closed__exists = False ) return qs
Build a queryset to query discussions related to a given user s assets .
144
16
246,205
def nofollow_callback ( attrs , new = False ) : parsed_url = urlparse ( attrs [ ( None , 'href' ) ] ) if parsed_url . netloc in ( '' , current_app . config [ 'SERVER_NAME' ] ) : attrs [ ( None , 'href' ) ] = '{scheme}://{netloc}{path}' . format ( scheme = 'https' if request . is_secure else 'http' , netloc = current_app . config [ 'SERVER_NAME' ] , path = parsed_url . path ) return attrs else : rel = [ x for x in attrs . get ( ( None , 'rel' ) , '' ) . split ( ' ' ) if x ] if 'nofollow' not in [ x . lower ( ) for x in rel ] : rel . append ( 'nofollow' ) attrs [ ( None , 'rel' ) ] = ' ' . join ( rel ) return attrs
Turn relative links into external ones and avoid nofollow for us
216
12
246,206
def bleach_clean ( stream ) : return bleach . clean ( stream , tags = current_app . config [ 'MD_ALLOWED_TAGS' ] , attributes = current_app . config [ 'MD_ALLOWED_ATTRIBUTES' ] , styles = current_app . config [ 'MD_ALLOWED_STYLES' ] , strip_comments = False )
Sanitize malicious attempts but keep the EXCERPT_TOKEN . By default only keeps bleach . ALLOWED_TAGS .
85
29
246,207
def toggle ( path_or_id , badge_kind ) : if exists ( path_or_id ) : with open ( path_or_id ) as open_file : for id_or_slug in open_file . readlines ( ) : toggle_badge ( id_or_slug . strip ( ) , badge_kind ) else : toggle_badge ( path_or_id , badge_kind )
Toggle a badge_kind for a given path_or_id
92
14
246,208
def upload ( name ) : storage = fs . by_name ( name ) return jsonify ( success = True , * * handle_upload ( storage ) )
Handle upload on POST if authorized .
33
7
246,209
def unindex_model_on_delete ( sender , document , * * kwargs ) : if current_app . config . get ( 'AUTO_INDEX' ) : unindex . delay ( document )
Unindex Mongo document on post_delete
46
8
246,210
def register ( adapter ) : # register the class in the catalog if adapter . model and adapter . model not in adapter_catalog : adapter_catalog [ adapter . model ] = adapter # Automatically (re|un)index objects on save/delete post_save . connect ( reindex_model_on_save , sender = adapter . model ) post_delete . connect ( unindex_model_on_delete , sender = adapter . model ) return adapter
Register a search adapter
97
4
246,211
def process ( self , formdata = None , obj = None , data = None , * * kwargs ) : self . _obj = obj super ( CommonFormMixin , self ) . process ( formdata , obj , data , * * kwargs )
Wrap the process method to store the current object instance
56
11
246,212
def get ( name ) : linkcheckers = get_enabled ( ENTRYPOINT , current_app ) linkcheckers . update ( no_check = NoCheckLinkchecker ) # no_check always enabled selected_linkchecker = linkcheckers . get ( name ) if not selected_linkchecker : default_linkchecker = current_app . config . get ( 'LINKCHECKING_DEFAULT_LINKCHECKER' ) selected_linkchecker = linkcheckers . get ( default_linkchecker ) if not selected_linkchecker : log . error ( 'No linkchecker found ({} requested and no fallback)' . format ( name ) ) return selected_linkchecker
Get a linkchecker given its name or fallback on default
152
13
246,213
def get_notifications ( user ) : notifications = [ ] for name , func in _providers . items ( ) : notifications . extend ( [ { 'type' : name , 'created_on' : dt , 'details' : details } for dt , details in func ( user ) ] ) return notifications
List notification for a given user
67
6
246,214
def count_tags ( self ) : for key , model in TAGGED . items ( ) : collection = '{0}_tags' . format ( key ) results = ( model . objects ( tags__exists = True ) . map_reduce ( map_tags , reduce_tags , collection ) ) for result in results : tag , created = Tag . objects . get_or_create ( name = result . key , auto_save = False ) tag . counts [ key ] = int ( result . value ) if result . value else 0 tag . save ( )
Count tag occurences by type and update the tag collection
121
12
246,215
def from_model ( cls , document ) : return cls ( meta = { 'id' : document . id } , * * cls . serialize ( document ) )
By default use the to_dict method
38
8
246,216
def completer_tokenize ( cls , value , min_length = 3 ) : tokens = list ( itertools . chain ( * [ [ m for m in n . split ( "'" ) if len ( m ) > min_length ] for n in value . split ( ' ' ) ] ) ) return list ( set ( [ value ] + tokens + [ ' ' . join ( tokens ) ] ) )
Quick and dirty tokenizer for completion suggester
88
9
246,217
def facet_search ( cls , * facets ) : f = dict ( ( k , v ) for k , v in cls . facets . items ( ) if k in facets ) class TempSearch ( SearchQuery ) : adapter = cls analyzer = cls . analyzer boosters = cls . boosters doc_types = cls facets = f fields = cls . fields fuzzy = cls . fuzzy match_type = cls . match_type model = cls . model return TempSearch
Build a FacetSearch for a given list of facets
105
11
246,218
def populate_slug ( instance , field ) : value = getattr ( instance , field . db_field ) try : previous = instance . __class__ . objects . get ( id = instance . id ) except Exception : previous = None # Field value has changed changed = field . db_field in instance . _get_changed_fields ( ) # Field initial value has been manually set manual = not previous and value or changed if not manual and field . populate_from : # value to slugify is extracted from populate_from parameter value = getattr ( instance , field . populate_from ) if previous and value == getattr ( previous , field . populate_from ) : return value if previous and getattr ( previous , field . db_field ) == value : # value is unchanged from DB return value if previous and not changed and not field . update : # Field is not manually set and slug should not update on change return value slug = field . slugify ( value ) # This can happen when serializing an object which does not contain # the properties used to generate the slug. Typically, when such # an object is passed to one of the Celery workers (see issue #20). if slug is None : return old_slug = getattr ( previous , field . db_field , None ) if slug == old_slug : return slug # Ensure uniqueness if field . unique : base_slug = slug index = 1 qs = instance . __class__ . objects if previous : qs = qs ( id__ne = previous . id ) def exists ( s ) : return qs ( class_check = False , * * { field . db_field : s } ) . limit ( 1 ) . count ( True ) > 0 while exists ( slug ) : slug = '{0}-{1}' . format ( base_slug , index ) index += 1 # Track old slugs for this class if field . follow and old_slug != slug : ns = instance . __class__ . __name__ # Destroy redirections from this new slug SlugFollow . objects ( namespace = ns , old_slug = slug ) . delete ( ) if old_slug : # Create a redirect for previous slug slug_follower , created = SlugFollow . objects . get_or_create ( namespace = ns , old_slug = old_slug , auto_save = False , ) slug_follower . new_slug = slug slug_follower . save ( ) # Maintain previous redirects SlugFollow . objects ( namespace = ns , new_slug = old_slug ) . update ( new_slug = slug ) setattr ( instance , field . db_field , slug ) return slug
Populate a slug field if needed .
575
8
246,219
def slugify ( self , value ) : if value is None : return return slugify . slugify ( value , max_length = self . max_length , separator = self . separator , to_lower = self . lower_case )
Apply slugification according to specified field rules
52
8
246,220
def cleanup_on_delete ( self , sender , document , * * kwargs ) : if not self . follow or sender is not self . owner_document : return slug = getattr ( document , self . db_field ) namespace = self . owner_document . __name__ SlugFollow . objects ( namespace = namespace , new_slug = slug ) . delete ( )
Clean up slug redirections on object deletion
80
8
246,221
def badge_form ( model ) : class BadgeForm ( ModelForm ) : model_class = Badge kind = fields . RadioField ( _ ( 'Kind' ) , [ validators . DataRequired ( ) ] , choices = model . __badges__ . items ( ) , description = _ ( 'Kind of badge (certified, etc)' ) ) return BadgeForm
A form factory for a given model badges
77
8
246,222
def delay ( name , args , kwargs ) : args = args or [ ] kwargs = dict ( k . split ( ) for k in kwargs ) if kwargs else { } if name not in celery . tasks : log . error ( 'Job %s not found' , name ) job = celery . tasks [ name ] log . info ( 'Sending job %s' , name ) async_result = job . delay ( * args , * * kwargs ) log . info ( 'Job %s sended to workers' , async_result . id )
Run a job asynchronously
126
6
246,223
def is_url ( default_scheme = 'http' , * * kwargs ) : def converter ( value ) : if value is None : return value if '://' not in value and default_scheme : value = '://' . join ( ( default_scheme , value . strip ( ) ) ) try : return uris . validate ( value ) except uris . ValidationError as e : raise Invalid ( e . message ) return converter
Return a converter that converts a clean string to an URL .
97
12
246,224
def hash ( value ) : if not value : return elif len ( value ) == 32 : type = 'md5' elif len ( value ) == 40 : type = 'sha1' elif len ( value ) == 64 : type = 'sha256' else : return None return { 'type' : type , 'value' : value }
Detect an hash type
74
4
246,225
def iter_adapters ( ) : adapters = adapter_catalog . values ( ) return sorted ( adapters , key = lambda a : a . model . __name__ )
Iter over adapter in predictable way
36
6
246,226
def iter_qs ( qs , adapter ) : for obj in qs . no_cache ( ) . no_dereference ( ) . timeout ( False ) : if adapter . is_indexable ( obj ) : try : doc = adapter . from_model ( obj ) . to_dict ( include_meta = True ) yield doc except Exception as e : model = adapter . model . __name__ log . error ( 'Unable to index %s "%s": %s' , model , str ( obj . id ) , str ( e ) , exc_info = True )
Safely iterate over a DB QuerySet yielding ES documents
124
12
246,227
def index_model ( index_name , adapter ) : model = adapter . model log . info ( 'Indexing {0} objects' . format ( model . __name__ ) ) qs = model . objects if hasattr ( model . objects , 'visible' ) : qs = qs . visible ( ) if adapter . exclude_fields : qs = qs . exclude ( * adapter . exclude_fields ) docs = iter_qs ( qs , adapter ) docs = iter_for_index ( docs , index_name ) for ok , info in streaming_bulk ( es . client , docs , raise_on_error = False ) : if not ok : log . error ( 'Unable to index %s "%s": %s' , model . __name__ , info [ 'index' ] [ '_id' ] , info [ 'index' ] [ 'error' ] )
Indel all objects given a model
192
7
246,228
def enable_refresh ( index_name ) : # noqa refresh_interval = current_app . config [ 'ELASTICSEARCH_REFRESH_INTERVAL' ] es . indices . put_settings ( index = index_name , body = { 'index' : { 'refresh_interval' : refresh_interval } } ) es . indices . forcemerge ( index = index_name , request_timeout = 30 )
Enable refresh and force merge . To be used after indexing .
97
13
246,229
def set_alias ( index_name , delete = True ) : log . info ( 'Creating alias "{0}" on index "{1}"' . format ( es . index_name , index_name ) ) if es . indices . exists_alias ( name = es . index_name ) : alias = es . indices . get_alias ( name = es . index_name ) previous_indices = alias . keys ( ) if index_name not in previous_indices : es . indices . put_alias ( index = index_name , name = es . index_name ) for index in previous_indices : if index != index_name : es . indices . delete_alias ( index = index , name = es . index_name ) if delete : es . indices . delete ( index = index ) else : es . indices . put_alias ( index = index_name , name = es . index_name )
Properly end an indexation by creating an alias . Previous alias is deleted if needed .
195
19
246,230
def handle_error ( index_name , keep = False ) : # Handle keyboard interrupt signal . signal ( signal . SIGINT , signal . default_int_handler ) signal . signal ( signal . SIGTERM , signal . default_int_handler ) has_error = False try : yield except KeyboardInterrupt : print ( '' ) # Proper warning message under the "^C" display log . warning ( 'Interrupted by signal' ) has_error = True except Exception as e : log . error ( e ) has_error = True if has_error : if not keep : log . info ( 'Removing index %s' , index_name ) es . indices . delete ( index = index_name ) sys . exit ( - 1 )
Handle errors while indexing . In case of error properly log it remove the index and exit . If keep is True index is not deleted .
157
28
246,231
def index ( models = None , name = None , force = False , keep = False ) : index_name = name or default_index_name ( ) doc_types_names = [ m . __name__ . lower ( ) for m in adapter_catalog . keys ( ) ] models = [ model . lower ( ) . rstrip ( 's' ) for model in ( models or [ ] ) ] for model in models : if model not in doc_types_names : log . error ( 'Unknown model %s' , model ) sys . exit ( - 1 ) log . info ( 'Initiliazing index "{0}"' . format ( index_name ) ) if es . indices . exists ( index_name ) : if IS_TTY and not force : msg = 'Index {0} will be deleted, are you sure?' click . confirm ( msg . format ( index_name ) , abort = True ) es . indices . delete ( index_name ) es . initialize ( index_name ) with handle_error ( index_name , keep ) : disable_refresh ( index_name ) for adapter in iter_adapters ( ) : if not models or adapter . doc_type ( ) . lower ( ) in models : index_model ( index_name , adapter ) else : log . info ( 'Copying {0} objects to the new index' . format ( adapter . model . __name__ ) ) # Need upgrade to Elasticsearch-py 5.0.0 to write: # es.reindex({ # 'source': {'index': es.index_name, 'type': adapter.doc_type()}, # 'dest': {'index': index_name} # }) # # http://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.reindex # This method (introduced in Elasticsearch 2.3 but only in Elasticsearch-py 5.0.0) # triggers a server-side documents copy. # Instead we use this helper for meant for backward compatibility # but with poor performance as copy is client-side (scan+bulk) es_reindex ( es . client , es . index_name , index_name , scan_kwargs = { 'doc_type' : adapter . doc_type ( ) } ) enable_refresh ( index_name ) # At this step, we don't want error handler to delete the index # in case of error set_alias ( index_name , delete = not keep )
Initialize or rebuild the search index
543
7
246,232
def create_app ( config = 'udata.settings.Defaults' , override = None , init_logging = init_logging ) : app = UDataApp ( APP_NAME ) app . config . from_object ( config ) settings = os . environ . get ( 'UDATA_SETTINGS' , join ( os . getcwd ( ) , 'udata.cfg' ) ) if exists ( settings ) : app . settings_file = settings # Keep track of loaded settings for diagnostic app . config . from_pyfile ( settings ) if override : app . config . from_object ( override ) # Loads defaults from plugins for pkg in entrypoints . get_roots ( app ) : if pkg == 'udata' : continue # Defaults are already loaded module = '{}.settings' . format ( pkg ) if pkgutil . find_loader ( module ) : settings = pkgutil . get_loader ( module ) for key , default in settings . __dict__ . items ( ) : app . config . setdefault ( key , default ) app . json_encoder = UDataJsonEncoder app . debug = app . config [ 'DEBUG' ] and not app . config [ 'TESTING' ] app . wsgi_app = ProxyFix ( app . wsgi_app ) init_logging ( app ) register_extensions ( app ) return app
Factory for a minimal application
304
5
246,233
def standalone ( app ) : from udata import api , core , frontend core . init_app ( app ) frontend . init_app ( app ) api . init_app ( app ) register_features ( app ) return app
Factory for an all in one application
49
7
246,234
def get_migration ( plugin , filename ) : db = get_db ( ) return db . migrations . find_one ( { 'plugin' : plugin , 'filename' : filename } )
Get an existing migration record if exists
42
7
246,235
def record_migration ( plugin , filename , script , * * kwargs ) : db = get_db ( ) db . eval ( RECORD_WRAPPER , plugin , filename , script ) return True
Only record a migration without applying it
45
7
246,236
def available_migrations ( ) : migrations = [ ] for filename in resource_listdir ( 'udata' , 'migrations' ) : if filename . endswith ( '.js' ) : migrations . append ( ( 'udata' , 'udata' , filename ) ) plugins = entrypoints . get_enabled ( 'udata.models' , current_app ) for plugin , module in plugins . items ( ) : if resource_isdir ( module . __name__ , 'migrations' ) : for filename in resource_listdir ( module . __name__ , 'migrations' ) : if filename . endswith ( '.js' ) : migrations . append ( ( plugin , module . __name__ , filename ) ) return sorted ( migrations , key = lambda r : r [ 2 ] )
List available migrations for udata and enabled plugins
181
10
246,237
def log_status ( plugin , filename , status ) : display = ':' . join ( ( plugin , filename ) ) + ' ' log . info ( '%s [%s]' , '{:.<70}' . format ( display ) , status )
Properly display a migration status line
55
8
246,238
def status ( ) : for plugin , package , filename in available_migrations ( ) : migration = get_migration ( plugin , filename ) if migration : status = green ( migration [ 'date' ] . strftime ( DATE_FORMAT ) ) else : status = yellow ( 'Not applied' ) log_status ( plugin , filename , status )
Display the database migrations status
76
6
246,239
def migrate ( record , dry_run = False ) : handler = record_migration if record else execute_migration success = True for plugin , package , filename in available_migrations ( ) : migration = get_migration ( plugin , filename ) if migration or not success : log_status ( plugin , filename , cyan ( 'Skipped' ) ) else : status = magenta ( 'Recorded' ) if record else yellow ( 'Apply' ) log_status ( plugin , filename , status ) script = resource_string ( package , join ( 'migrations' , filename ) ) success &= handler ( plugin , filename , script , dryrun = dry_run )
Perform database migrations
144
5
246,240
def unrecord ( plugin_or_specs , filename ) : plugin , filename = normalize_migration ( plugin_or_specs , filename ) migration = get_migration ( plugin , filename ) if migration : log . info ( 'Removing migration %s:%s' , plugin , filename ) db = get_db ( ) db . eval ( UNRECORD_WRAPPER , migration [ '_id' ] ) else : log . error ( 'Migration not found %s:%s' , plugin , filename )
Remove a database migration record .
115
6
246,241
def validate ( url , schemes = None , tlds = None , private = None , local = None , credentials = None ) : url = url . strip ( ) private = config_for ( private , 'URLS_ALLOW_PRIVATE' ) local = config_for ( local , 'URLS_ALLOW_LOCAL' ) credentials = config_for ( credentials , 'URLS_ALLOW_CREDENTIALS' ) schemes = config_for ( schemes , 'URLS_ALLOWED_SCHEMES' ) tlds = config_for ( tlds , 'URLS_ALLOWED_TLDS' ) match = URL_REGEX . match ( url ) if not match : error ( url ) scheme = ( match . group ( 'scheme' ) or '' ) . lower ( ) if scheme and scheme not in schemes : error ( url , 'Invalid scheme {0}' . format ( scheme ) ) if not credentials and match . group ( 'credentials' ) : error ( url , 'Credentials in URL are not allowed' ) tld = match . group ( 'tld' ) if tld and tld not in tlds and tld . encode ( 'idna' ) not in tlds : error ( url , 'Invalid TLD {0}' . format ( tld ) ) ip = match . group ( 'ipv6' ) or match . group ( 'ipv4' ) if ip : try : ip = IPAddress ( ip ) except AddrFormatError : error ( url ) if ip . is_multicast ( ) : error ( url , '{0} is a multicast IP' . format ( ip ) ) elif not ip . is_loopback ( ) and ip . is_hostmask ( ) or ip . is_netmask ( ) : error ( url , '{0} is a mask IP' . format ( ip ) ) if not local : if ip and ip . is_loopback ( ) or match . group ( 'localhost' ) : error ( url , 'is a local URL' ) if not private and ip and ip . is_private ( ) : error ( url , 'is a private URL' ) return url
Validate and normalize an URL
483
7
246,242
def get_json_ld_extra ( key , value ) : value = value . serialize ( ) if hasattr ( value , 'serialize' ) else value return { '@type' : 'http://schema.org/PropertyValue' , 'name' : key , 'value' : value , }
Serialize an extras key value pair into JSON - LD
67
11
246,243
def get_resource ( id ) : dataset = Dataset . objects ( resources__id = id ) . first ( ) if dataset : return get_by ( dataset . resources , 'id' , id ) else : return CommunityResource . objects ( id = id ) . first ( )
Fetch a resource given its UUID
60
8
246,244
def guess ( cls , * strings , * * kwargs ) : license = None for string in strings : license = cls . guess_one ( string ) if license : break return license or kwargs . get ( 'default' )
Try to guess a license from a list of strings .
52
11
246,245
def guess_one ( cls , text ) : if not text : return qs = cls . objects text = text . strip ( ) . lower ( ) # Stored identifiers are lower case slug = cls . slug . slugify ( text ) # Use slug as it normalize string license = qs ( db . Q ( id = text ) | db . Q ( slug = slug ) | db . Q ( url = text ) | db . Q ( alternate_urls = text ) ) . first ( ) if license is None : # Try to single match with a low Damerau-Levenshtein distance computed = ( ( l , rdlevenshtein ( l . slug , slug ) ) for l in cls . objects ) candidates = [ l for l , d in computed if d <= MAX_DISTANCE ] # If there is more that one match, we cannot determinate # which one is closer to safely choose between candidates if len ( candidates ) == 1 : license = candidates [ 0 ] if license is None : # Try to single match with a low Damerau-Levenshtein distance computed = ( ( l , rdlevenshtein ( cls . slug . slugify ( t ) , slug ) ) for l in cls . objects for t in l . alternate_titles ) candidates = [ l for l , d in computed if d <= MAX_DISTANCE ] # If there is more that one match, we cannot determinate # which one is closer to safely choose between candidates if len ( candidates ) == 1 : license = candidates [ 0 ] return license
Try to guess license from a string .
339
8
246,246
def need_check ( self ) : min_cache_duration , max_cache_duration , ko_threshold = [ current_app . config . get ( k ) for k in ( 'LINKCHECKING_MIN_CACHE_DURATION' , 'LINKCHECKING_MAX_CACHE_DURATION' , 'LINKCHECKING_UNAVAILABLE_THRESHOLD' , ) ] count_availability = self . extras . get ( 'check:count-availability' , 1 ) is_available = self . check_availability ( ) if is_available == 'unknown' : return True elif is_available or count_availability > ko_threshold : delta = min ( min_cache_duration * count_availability , max_cache_duration ) else : delta = min_cache_duration if self . extras . get ( 'check:date' ) : limit_date = datetime . now ( ) - timedelta ( minutes = delta ) check_date = self . extras [ 'check:date' ] if not isinstance ( check_date , datetime ) : try : check_date = parse_dt ( check_date ) except ( ValueError , TypeError ) : return True if check_date >= limit_date : return False return True
Does the resource needs to be checked against its linkchecker?
277
13
246,247
def check_availability ( self ) : # Only check remote resources. remote_resources = [ resource for resource in self . resources if resource . filetype == 'remote' ] if not remote_resources : return [ ] return [ resource . check_availability ( ) for resource in remote_resources ]
Check if resources from that dataset are available .
61
9
246,248
def next_update ( self ) : delta = None if self . frequency == 'daily' : delta = timedelta ( days = 1 ) elif self . frequency == 'weekly' : delta = timedelta ( weeks = 1 ) elif self . frequency == 'fortnighly' : delta = timedelta ( weeks = 2 ) elif self . frequency == 'monthly' : delta = timedelta ( weeks = 4 ) elif self . frequency == 'bimonthly' : delta = timedelta ( weeks = 4 * 2 ) elif self . frequency == 'quarterly' : delta = timedelta ( weeks = 52 / 4 ) elif self . frequency == 'biannual' : delta = timedelta ( weeks = 52 / 2 ) elif self . frequency == 'annual' : delta = timedelta ( weeks = 52 ) elif self . frequency == 'biennial' : delta = timedelta ( weeks = 52 * 2 ) elif self . frequency == 'triennial' : delta = timedelta ( weeks = 52 * 3 ) elif self . frequency == 'quinquennial' : delta = timedelta ( weeks = 52 * 5 ) if delta is None : return else : return self . last_update + delta
Compute the next expected update date
262
7
246,249
def quality ( self ) : from udata . models import Discussion # noqa: Prevent circular imports result = { } if not self . id : # Quality is only relevant on saved Datasets return result if self . next_update : result [ 'frequency' ] = self . frequency result [ 'update_in' ] = - ( self . next_update - datetime . now ( ) ) . days if self . tags : result [ 'tags_count' ] = len ( self . tags ) if self . description : result [ 'description_length' ] = len ( self . description ) if self . resources : result [ 'has_resources' ] = True result [ 'has_only_closed_or_no_formats' ] = all ( resource . closed_or_no_format for resource in self . resources ) result [ 'has_unavailable_resources' ] = not all ( self . check_availability ( ) ) discussions = Discussion . objects ( subject = self ) if discussions : result [ 'discussions' ] = len ( discussions ) result [ 'has_untreated_discussions' ] = not all ( discussion . person_involved ( self . owner ) for discussion in discussions ) result [ 'score' ] = self . compute_quality_score ( result ) return result
Return a dict filled with metrics related to the inner
274
10
246,250
def compute_quality_score ( self , quality ) : score = 0 UNIT = 2 if 'frequency' in quality : # TODO: should be related to frequency. if quality [ 'update_in' ] < 0 : score += UNIT else : score -= UNIT if 'tags_count' in quality : if quality [ 'tags_count' ] > 3 : score += UNIT if 'description_length' in quality : if quality [ 'description_length' ] > 100 : score += UNIT if 'has_resources' in quality : if quality [ 'has_only_closed_or_no_formats' ] : score -= UNIT else : score += UNIT if quality [ 'has_unavailable_resources' ] : score -= UNIT else : score += UNIT if 'discussions' in quality : if quality [ 'has_untreated_discussions' ] : score -= UNIT else : score += UNIT if score < 0 : return 0 return score
Compute the score related to the quality of that dataset .
210
12
246,251
def add_resource ( self , resource ) : resource . validate ( ) self . update ( __raw__ = { '$push' : { 'resources' : { '$each' : [ resource . to_mongo ( ) ] , '$position' : 0 } } } ) self . reload ( ) post_save . send ( self . __class__ , document = self , resource_added = resource . id )
Perform an atomic prepend for a new resource
90
10
246,252
def update_resource ( self , resource ) : index = self . resources . index ( resource ) data = { 'resources__{index}' . format ( index = index ) : resource } self . update ( * * data ) self . reload ( ) post_save . send ( self . __class__ , document = self )
Perform an atomic update for an existing resource
69
9
246,253
def get_aggregation ( self , name ) : agg = self . aggregations [ name ] if 'buckets' in agg : return agg [ 'buckets' ] else : return agg
Fetch an aggregation result given its name
40
8
246,254
def language ( lang_code ) : ctx = None if not request : ctx = current_app . test_request_context ( ) ctx . push ( ) backup = g . get ( 'lang_code' ) g . lang_code = lang_code refresh ( ) yield g . lang_code = backup if ctx : ctx . pop ( ) refresh ( )
Force a given language
81
4
246,255
def redirect_to_lang ( * args , * * kwargs ) : endpoint = request . endpoint . replace ( '_redirect' , '' ) kwargs = multi_to_dict ( request . args ) kwargs . update ( request . view_args ) kwargs [ 'lang_code' ] = default_lang return redirect ( url_for ( endpoint , * * kwargs ) )
Redirect non lang - prefixed urls to default language .
89
13
246,256
def redirect_to_unlocalized ( * args , * * kwargs ) : endpoint = request . endpoint . replace ( '_redirect' , '' ) kwargs = multi_to_dict ( request . args ) kwargs . update ( request . view_args ) kwargs . pop ( 'lang_code' , None ) return redirect ( url_for ( endpoint , * * kwargs ) )
Redirect lang - prefixed urls to no prefixed URL .
91
14
246,257
def get_translations ( self ) : ctx = stack . top if ctx is None : return NullTranslations ( ) locale = get_locale ( ) cache = self . get_translations_cache ( ctx ) translations = cache . get ( str ( locale ) ) if translations is None : translations_dir = self . get_translations_path ( ctx ) translations = Translations . load ( translations_dir , locale , domain = self . domain ) # Load plugins translations if isinstance ( translations , Translations ) : # Load core extensions translations from wtforms . i18n import messages_path wtforms_translations = Translations . load ( messages_path ( ) , locale , domain = 'wtforms' ) translations . merge ( wtforms_translations ) import flask_security flask_security_translations = Translations . load ( join ( flask_security . __path__ [ 0 ] , 'translations' ) , locale , domain = 'flask_security' ) translations . merge ( flask_security_translations ) for pkg in entrypoints . get_roots ( current_app ) : package = pkgutil . get_loader ( pkg ) path = join ( package . filename , 'translations' ) domains = [ f . replace ( path , '' ) . replace ( '.pot' , '' ) [ 1 : ] for f in iglob ( join ( path , '*.pot' ) ) ] for domain in domains : translations . merge ( Translations . load ( path , locale , domain = domain ) ) # Allows the theme to provide or override translations from . import theme theme_translations_dir = join ( theme . current . path , 'translations' ) if exists ( theme_translations_dir ) : domain = theme . current . identifier theme_translations = Translations . load ( theme_translations_dir , locale , domain = domain ) translations . merge ( theme_translations ) cache [ str ( locale ) ] = translations return translations
Returns the correct gettext translations that should be used for this request . This will never fail and return a dummy translation object if used outside of the request or if a translation cannot be found .
427
38
246,258
def person_involved ( self , person ) : return any ( message . posted_by == person for message in self . discussion )
Return True if the given person has been involved in the
27
11
246,259
def is_ignored ( resource ) : ignored_domains = current_app . config [ 'LINKCHECKING_IGNORE_DOMAINS' ] url = resource . url if url : parsed_url = urlparse ( url ) return parsed_url . netloc in ignored_domains return True
Check of the resource s URL is part of LINKCHECKING_IGNORE_DOMAINS
65
19
246,260
def check_resource ( resource ) : linkchecker_type = resource . extras . get ( 'check:checker' ) LinkChecker = get_linkchecker ( linkchecker_type ) if not LinkChecker : return { 'error' : 'No linkchecker configured.' } , 503 if is_ignored ( resource ) : return dummy_check_response ( ) result = LinkChecker ( ) . check ( resource ) if not result : return { 'error' : 'No response from linkchecker' } , 503 elif result . get ( 'check:error' ) : return { 'error' : result [ 'check:error' ] } , 500 elif not result . get ( 'check:status' ) : return { 'error' : 'No status in response from linkchecker' } , 503 # store the check result in the resource's extras # XXX maybe this logic should be in the `Resource` model? previous_status = resource . extras . get ( 'check:available' ) check_keys = _get_check_keys ( result , resource , previous_status ) resource . extras . update ( check_keys ) resource . save ( signal_kwargs = { 'ignores' : [ 'post_save' ] } ) # Prevent signal triggering on dataset return result
Check a resource availability against a linkchecker backend
279
10
246,261
def owned_pre_save ( sender , document , * * kwargs ) : if not isinstance ( document , Owned ) : return changed_fields = getattr ( document , '_changed_fields' , [ ] ) if 'organization' in changed_fields : if document . owner : # Change from owner to organization document . _previous_owner = document . owner document . owner = None else : # Change from org to another # Need to fetch previous value in base original = sender . objects . only ( 'organization' ) . get ( pk = document . pk ) document . _previous_owner = original . organization elif 'owner' in changed_fields : if document . organization : # Change from organization to owner document . _previous_owner = document . organization document . organization = None else : # Change from owner to another # Need to fetch previous value in base original = sender . objects . only ( 'owner' ) . get ( pk = document . pk ) document . _previous_owner = original . owner
Owned mongoengine . pre_save signal handler Need to fetch original owner before the new one erase it .
224
23
246,262
def owned_post_save ( sender , document , * * kwargs ) : if isinstance ( document , Owned ) and hasattr ( document , '_previous_owner' ) : Owned . on_owner_change . send ( document , previous = document . _previous_owner )
Owned mongoengine . post_save signal handler Dispatch the Owned . on_owner_change signal once the document has been saved including the previous owner .
65
33
246,263
def get_enabled_plugins ( ) : plugins = entrypoints . get_enabled ( 'udata.preview' , current_app ) . values ( ) valid = [ p for p in plugins if issubclass ( p , PreviewPlugin ) ] for plugin in plugins : if plugin not in valid : clsname = plugin . __name__ msg = '{0} is not a valid preview plugin' . format ( clsname ) warnings . warn ( msg , PreviewWarning ) return [ p ( ) for p in sorted ( valid , key = lambda p : 1 if p . fallback else 0 ) ]
Returns enabled preview plugins .
129
5
246,264
def get_preview_url ( resource ) : candidates = ( p . preview_url ( resource ) for p in get_enabled_plugins ( ) if p . can_preview ( resource ) ) return next ( iter ( candidates ) , None )
Returns the most pertinent preview URL associated to the resource if any .
53
13
246,265
def get_by ( lst , field , value ) : for row in lst : if ( ( isinstance ( row , dict ) and row . get ( field ) == value ) or ( getattr ( row , field , None ) == value ) ) : return row
Find an object in a list given a field value
57
10
246,266
def multi_to_dict ( multi ) : return dict ( ( key , value [ 0 ] if len ( value ) == 1 else value ) for key , value in multi . to_dict ( False ) . items ( ) )
Transform a Werkzeug multidictionnary into a flat dictionnary
48
17
246,267
def daterange_start ( value ) : if not value : return None elif isinstance ( value , datetime ) : return value . date ( ) elif isinstance ( value , date ) : return value result = parse_dt ( value ) . date ( ) dashes = value . count ( '-' ) if dashes >= 2 : return result elif dashes == 1 : # Year/Month only return result . replace ( day = 1 ) else : # Year only return result . replace ( day = 1 , month = 1 )
Parse a date range start boundary
114
7
246,268
def daterange_end ( value ) : if not value : return None elif isinstance ( value , datetime ) : return value . date ( ) elif isinstance ( value , date ) : return value result = parse_dt ( value ) . date ( ) dashes = value . count ( '-' ) if dashes >= 2 : # Full date return result elif dashes == 1 : # Year/Month return result + relativedelta ( months = + 1 , days = - 1 , day = 1 ) else : # Year only return result . replace ( month = 12 , day = 31 )
Parse a date range end boundary
128
7
246,269
def to_iso ( dt ) : if isinstance ( dt , datetime ) : return to_iso_datetime ( dt ) elif isinstance ( dt , date ) : return to_iso_date ( dt )
Format a date or datetime into an ISO - 8601 string
52
13
246,270
def to_iso_datetime ( dt ) : if dt : date_str = to_iso_date ( dt ) time_str = '{dt.hour:02d}:{dt.minute:02d}:{dt.second:02d}' . format ( dt = dt ) if isinstance ( dt , datetime ) else '00:00:00' return 'T' . join ( ( date_str , time_str ) )
Format a date or datetime into an ISO - 8601 datetime string .
103
16
246,271
def recursive_get ( obj , key ) : if not obj or not key : return parts = key . split ( '.' ) if isinstance ( key , basestring ) else key key = parts . pop ( 0 ) if isinstance ( obj , dict ) : value = obj . get ( key , None ) else : value = getattr ( obj , key , None ) return recursive_get ( value , parts ) if parts else value
Get an attribute or a key recursively .
92
10
246,272
def unique_string ( length = UUID_LENGTH ) : # We need a string at least as long as length string = str ( uuid4 ( ) ) * int ( math . ceil ( length / float ( UUID_LENGTH ) ) ) return string [ : length ] if length else string
Generate a unique string
65
5
246,273
def safe_unicode ( string ) : if not isinstance ( string , basestring ) : string = unicode ( string ) if isinstance ( string , unicode ) : string = string . encode ( 'utf8' ) return string
Safely transform any object into utf8 encoded bytes
51
11
246,274
def redirect_territory ( level , code ) : territory = GeoZone . objects . valid_at ( datetime . now ( ) ) . filter ( code = code , level = 'fr:{level}' . format ( level = level ) ) . first ( ) return redirect ( url_for ( 'territories.territory' , territory = territory ) )
Implicit redirect given the INSEE code .
79
9
246,275
def scheduled ( ) : for job in sorted ( schedulables ( ) , key = lambda s : s . name ) : for task in PeriodicTask . objects ( task = job . name ) : label = job_label ( task . task , task . args , task . kwargs ) echo ( SCHEDULE_LINE . format ( name = white ( task . name . encode ( 'utf8' ) ) , label = label , schedule = task . schedule_display ) . encode ( 'utf8' ) )
List scheduled jobs .
110
4
246,276
def purge ( datasets , reuses , organizations ) : purge_all = not any ( ( datasets , reuses , organizations ) ) if purge_all or datasets : log . info ( 'Purging datasets' ) purge_datasets ( ) if purge_all or reuses : log . info ( 'Purging reuses' ) purge_reuses ( ) if purge_all or organizations : log . info ( 'Purging organizations' ) purge_organizations ( ) success ( 'Done' )
Permanently remove data flagged as deleted .
105
9
246,277
def clean_parameters ( self , params ) : return { k : v for k , v in params . items ( ) if k in self . adapter . facets }
Only keep known parameters
35
4
246,278
def extract_sort ( self , params ) : sorts = params . pop ( 'sort' , [ ] ) sorts = [ sorts ] if isinstance ( sorts , basestring ) else sorts sorts = [ ( s [ 1 : ] , 'desc' ) if s . startswith ( '-' ) else ( s , 'asc' ) for s in sorts ] self . sorts = [ { self . adapter . sorts [ s ] : d } for s , d in sorts if s in self . adapter . sorts ]
Extract and build sort query from parameters
109
8
246,279
def extract_pagination ( self , params ) : try : params_page = int ( params . pop ( 'page' , 1 ) or 1 ) self . page = max ( params_page , 1 ) except : # Failsafe, if page cannot be parsed, we falback on first page self . page = 1 try : params_page_size = params . pop ( 'page_size' , DEFAULT_PAGE_SIZE ) self . page_size = int ( params_page_size or DEFAULT_PAGE_SIZE ) except : # Failsafe, if page_size cannot be parsed, we falback on default self . page_size = DEFAULT_PAGE_SIZE self . page_start = ( self . page - 1 ) * self . page_size self . page_end = self . page_start + self . page_size
Extract and build pagination from parameters
186
8
246,280
def aggregate ( self , search ) : for f , facet in self . facets . items ( ) : agg = facet . get_aggregation ( ) if isinstance ( agg , Bucket ) : search . aggs . bucket ( f , agg ) elif isinstance ( agg , Pipeline ) : search . aggs . pipeline ( f , agg ) else : search . aggs . metric ( f , agg )
Add aggregations representing the facets selected
85
7
246,281
def filter ( self , search ) : if not self . _filters : return search filters = Q ( 'match_all' ) for f in self . _filters . values ( ) : filters &= f return search . filter ( filters )
Perform filtering instead of default post - filtering .
52
10
246,282
def query ( self , search , query ) : if not query : return search included , excluded = [ ] , [ ] for term in query . split ( ' ' ) : if not term . strip ( ) : continue if term . startswith ( '-' ) : excluded . append ( term [ 1 : ] ) else : included . append ( term ) if included : search = search . query ( self . multi_match ( included ) ) for term in excluded : search = search . query ( ~ self . multi_match ( [ term ] ) ) return search
Customize the search query if necessary .
117
8
246,283
def to_url ( self , url = None , replace = False , * * kwargs ) : params = copy . deepcopy ( self . filter_values ) if self . _query : params [ 'q' ] = self . _query if self . page_size != DEFAULT_PAGE_SIZE : params [ 'page_size' ] = self . page_size if kwargs : for key , value in kwargs . items ( ) : if not replace and key in params : if not isinstance ( params [ key ] , ( list , tuple ) ) : params [ key ] = [ params [ key ] , value ] else : params [ key ] . append ( value ) else : params [ key ] = value else : params [ 'page' ] = self . page href = Href ( url or request . base_url ) return href ( params )
Serialize the query into an URL
186
7
246,284
def safestr ( value ) : if not value or isinstance ( value , ( int , float , bool , long ) ) : return value elif isinstance ( value , ( date , datetime ) ) : return value . isoformat ( ) else : return unicode ( value )
Ensure type to string serialization
59
7
246,285
def yield_rows ( adapter ) : csvfile = StringIO ( ) writer = get_writer ( csvfile ) # Generate header writer . writerow ( adapter . header ( ) ) yield csvfile . getvalue ( ) del csvfile for row in adapter . rows ( ) : csvfile = StringIO ( ) writer = get_writer ( csvfile ) writer . writerow ( row ) yield csvfile . getvalue ( ) del csvfile
Yield a dataset catalog line by line
101
8
246,286
def stream ( queryset_or_adapter , basename = None ) : if isinstance ( queryset_or_adapter , Adapter ) : adapter = queryset_or_adapter elif isinstance ( queryset_or_adapter , ( list , tuple ) ) : if not queryset_or_adapter : raise ValueError ( 'Type detection is not possible with an empty list' ) cls = _adapters . get ( queryset_or_adapter [ 0 ] . __class__ ) adapter = cls ( queryset_or_adapter ) elif isinstance ( queryset_or_adapter , db . BaseQuerySet ) : cls = _adapters . get ( queryset_or_adapter . _document ) adapter = cls ( queryset_or_adapter ) else : raise ValueError ( 'Unsupported object type' ) timestamp = datetime . now ( ) . strftime ( '%Y-%m-%d-%H-%M' ) headers = { b'Content-Disposition' : 'attachment; filename={0}-{1}.csv' . format ( basename or 'export' , timestamp ) , } streamer = stream_with_context ( yield_rows ( adapter ) ) return Response ( streamer , mimetype = "text/csv" , headers = headers )
Stream a csv file from an object list
304
9
246,287
def header ( self ) : return ( super ( NestedAdapter , self ) . header ( ) + [ name for name , getter in self . get_nested_fields ( ) ] )
Generate the CSV header row
41
6
246,288
def rows ( self ) : return ( self . nested_row ( o , n ) for o in self . queryset for n in getattr ( o , self . attribute , [ ] ) )
Iterate over queryset objects
42
7
246,289
def nested_row ( self , obj , nested ) : row = self . to_row ( obj ) for name , getter in self . get_nested_fields ( ) : content = '' if getter is not None : try : content = safestr ( getter ( nested ) ) except Exception , e : # Catch all errors intentionally. log . error ( 'Error exporting CSV for {name}: {error}' . format ( name = self . __class__ . __name__ , error = e ) ) row . append ( content ) return row
Convert an object into a flat csv row
117
10
246,290
def transfer_request_notifications ( user ) : orgs = [ o for o in user . organizations if o . is_member ( user ) ] notifications = [ ] qs = Transfer . objects ( recipient__in = [ user ] + orgs , status = 'pending' ) # Only fetch required fields for notification serialization # Greatly improve performances and memory usage qs = qs . only ( 'id' , 'created' , 'subject' ) # Do not dereference subject (so it's a DBRef) # Also improve performances and memory usage for transfer in qs . no_dereference ( ) : notifications . append ( ( transfer . created , { 'id' : transfer . id , 'subject' : { 'class' : transfer . subject [ '_cls' ] . lower ( ) , 'id' : transfer . subject [ '_ref' ] . id } } ) ) return notifications
Notify user about pending transfer requests
196
7
246,291
def send ( subject , recipients , template_base , * * kwargs ) : sender = kwargs . pop ( 'sender' , None ) if not isinstance ( recipients , ( list , tuple ) ) : recipients = [ recipients ] debug = current_app . config . get ( 'DEBUG' , False ) send_mail = current_app . config . get ( 'SEND_MAIL' , not debug ) connection = send_mail and mail . connect or dummyconnection with connection ( ) as conn : for recipient in recipients : lang = i18n . _default_lang ( recipient ) with i18n . language ( lang ) : log . debug ( 'Sending mail "%s" to recipient "%s"' , subject , recipient ) msg = Message ( subject , sender = sender , recipients = [ recipient . email ] ) msg . body = theme . render ( 'mail/{0}.txt' . format ( template_base ) , subject = subject , sender = sender , recipient = recipient , * * kwargs ) msg . html = theme . render ( 'mail/{0}.html' . format ( template_base ) , subject = subject , sender = sender , recipient = recipient , * * kwargs ) conn . send ( msg )
Send a given email to multiple recipients .
267
8
246,292
def public_dsn ( dsn ) : m = RE_DSN . match ( dsn ) if not m : log . error ( 'Unable to parse Sentry DSN' ) public = '{scheme}://{client_id}@{domain}/{site_id}' . format ( * * m . groupdict ( ) ) return public
Transform a standard Sentry DSN into a public one
79
11
246,293
def update ( ctx , migrate = False ) : msg = 'Update all dependencies' if migrate : msg += ' and migrate data' header ( msg ) info ( 'Updating Python dependencies' ) lrun ( 'pip install -r requirements/develop.pip' ) lrun ( 'pip install -e .' ) info ( 'Updating JavaScript dependencies' ) lrun ( 'npm install' ) if migrate : info ( 'Migrating database' ) lrun ( 'udata db migrate' )
Perform a development update
110
5
246,294
def output_json ( data , code , headers = None ) : resp = make_response ( json . dumps ( data ) , code ) resp . headers . extend ( headers or { } ) return resp
Use Flask JSON to serialize
42
6
246,295
def extract_name_from_path ( path ) : base_path , query_string = path . split ( '?' ) infos = base_path . strip ( '/' ) . split ( '/' ) [ 2 : ] # Removes api/version. if len ( infos ) > 1 : # This is an object. name = '{category} / {name}' . format ( category = infos [ 0 ] . title ( ) , name = infos [ 1 ] . replace ( '-' , ' ' ) . title ( ) ) else : # This is a collection. name = '{category}' . format ( category = infos [ 0 ] . title ( ) ) return safe_unicode ( name )
Return a readable name from a URL path .
156
9
246,296
def handle_unauthorized_file_type ( error ) : url = url_for ( 'api.allowed_extensions' , _external = True ) msg = ( 'This file type is not allowed.' 'The allowed file type list is available at {url}' ) . format ( url = url ) return { 'message' : msg } , 400
Error occuring when the user try to upload a non - allowed file type
76
15
246,297
def authentify ( self , func ) : @ wraps ( func ) def wrapper ( * args , * * kwargs ) : if current_user . is_authenticated : return func ( * args , * * kwargs ) apikey = request . headers . get ( HEADER_API_KEY ) if apikey : try : user = User . objects . get ( apikey = apikey ) except User . DoesNotExist : self . abort ( 401 , 'Invalid API Key' ) if not login_user ( user , False ) : self . abort ( 401 , 'Inactive user' ) else : oauth2 . check_credentials ( ) return func ( * args , * * kwargs ) return wrapper
Authentify the user if credentials are given
159
8
246,298
def validate ( self , form_cls , obj = None ) : if 'application/json' not in request . headers . get ( 'Content-Type' ) : errors = { 'Content-Type' : 'expecting application/json' } self . abort ( 400 , errors = errors ) form = form_cls . from_json ( request . json , obj = obj , instance = obj , csrf_enabled = False ) if not form . validate ( ) : self . abort ( 400 , errors = form . errors ) return form
Validate a form from the request and handle errors
116
10
246,299
def unauthorized ( self , response ) : realm = current_app . config . get ( 'HTTP_OAUTH_REALM' , 'uData' ) challenge = 'Bearer realm="{0}"' . format ( realm ) response . headers [ 'WWW-Authenticate' ] = challenge return response
Override to change the WWW - Authenticate challenge
66
10