idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
33,400
def get_draft ( self ) : if self . is_draft : return self elif self . is_published : draft = self . publishing_draft if hasattr ( draft , 'get_draft_payload' ) : draft = draft . get_draft_payload ( ) return draft raise ValueError ( "Publishable object %r is neither draft nor published" % self )
Return self if this object is a draft otherwise return the draft copy of a published item .
33,401
def get_published ( self ) : if self . is_published : return self elif self . is_draft : return self . publishing_linked raise ValueError ( "Publishable object %r is neither draft nor published" % self )
Return self is this object is published otherwise return the published copy of a draft item . If this object is a draft with no published copy it will return None .
33,402
def get_published_or_draft ( self ) : if self . is_published : return self elif self . publishing_linked_id : return self . publishing_linked if is_draft_request_context ( ) : return self . get_draft ( ) return None
Return the published item if it exists otherwise for privileged users return the draft version .
33,403
def publish ( self ) : if self . is_draft : if self . publishing_linked : self . patch_placeholders ( ) type ( self . publishing_linked ) . objects . filter ( pk = self . publishing_linked . pk ) . delete ( ) else : self . publishing_published_at = timezone . now ( ) publish_obj = deepcopy ( self ) for fld in self . pu...
Publishes the object .
33,404
def unpublish ( self ) : if self . is_draft and self . publishing_linked : publishing_signals . publishing_pre_unpublish . send ( sender = type ( self ) , instance = self ) type ( self . publishing_linked ) . objects . filter ( pk = self . publishing_linked . pk ) . delete ( ) self . publishing_linked = None self . pub...
Un - publish the current object .
33,405
def publishing_prepare_published_copy ( self , draft_obj ) : mysuper = super ( PublishingModel , self ) if hasattr ( mysuper , 'publishing_prepare_published_copy' ) : mysuper . publishing_prepare_published_copy ( draft_obj )
Prepare published copy of draft prior to saving it
33,406
def clone_fluent_placeholders_and_content_items ( self , dst_obj ) : if not self . has_placeholder_relationships ( ) : return for src_placeholder in Placeholder . objects . parent ( self ) : dst_placeholder = Placeholder . objects . create_for_object ( dst_obj , slot = src_placeholder . slot , role = src_placeholder . ...
Clone each Placeholder and its ContentItem s .
33,407
def handle_noargs ( self , ** options ) : is_dry_run = options . get ( 'dry-run' , False ) mptt_only = options . get ( 'mptt-only' , False ) slugs = { } overrides = { } parents = dict ( UrlNode . objects . filter ( status = UrlNode . DRAFT ) . values_list ( 'id' , 'parent_id' ) ) self . stdout . write ( "Updated MPTT c...
By default this function runs on all objects .
33,408
def full_prepare ( self , obj ) : prepared_data = super ( AbstractLayoutIndex , self ) . full_prepare ( obj ) prepared_data [ 'django_ct' ] = get_model_ct ( self . get_model ( ) ) return prepared_data
Make django_ct equal to the type of get_model to make polymorphic children show up in results .
33,409
def grammatical_join ( l , initial_joins = ", " , final_join = " and " ) : return initial_joins . join ( l [ : - 2 ] + [ final_join . join ( l [ - 2 : ] ) ] )
Display a list of items nicely with a different string before the final item . Useful for using lists in sentences .
33,410
def update_GET ( parser , token ) : try : args = token . split_contents ( ) [ 1 : ] triples = list ( _chunks ( args , 3 ) ) if triples and len ( triples [ - 1 ] ) != 3 : raise template . TemplateSyntaxError , "%r tag requires arguments in groups of three (op, attr, value)." % token . contents . split ( ) [ 0 ] ops = se...
update_GET allows you to substitute parameters into the current request s GET parameters . This is useful for updating search filters page numbers without losing the current set .
33,411
def oembed ( url , params = "" ) : kwargs = dict ( urlparse . parse_qsl ( params ) ) try : return mark_safe ( get_oembed_data ( url , ** kwargs ) [ 'html' ] ) except ( KeyError , ProviderException ) : if settings . DEBUG : return "No OEmbed data returned" return ""
Render an OEmbed - compatible link as an embedded item .
33,412
def admin_link ( obj ) : if hasattr ( obj , 'get_admin_link' ) : return mark_safe ( obj . get_admin_link ( ) ) return mark_safe ( admin_link_fn ( obj ) )
Returns a link to the admin URL of an object .
33,413
def admin_url ( obj ) : if hasattr ( obj , 'get_admin_url' ) : return mark_safe ( obj . get_admin_url ( ) ) return mark_safe ( admin_url_fn ( obj ) )
Returns the admin URL of the object .
33,414
def sharedcontent_exists ( slug ) : from django . contrib . sites . models import Site from fluent_contents . plugins . sharedcontent . models import SharedContent site = Site . objects . get_current ( ) return SharedContent . objects . parent_site ( site ) . filter ( slug = slug ) . exists ( )
Return True if shared content with the given slug name exists .
33,415
def render_field_error ( self , obj_id , obj , exception , request ) : if obj is None : msg = 'No match for ID={0}' . format ( obj_id ) else : msg = unicode ( exception ) return u'<p class="error">{0}</p>' . format ( msg )
Default rendering for items in field where the the usual rendering method raised an exception .
33,416
def render_field_previews ( self , id_and_obj_list , admin , request , field_name ) : obj_preview_list = [ ] for obj_id , obj in id_and_obj_list : try : if obj is None : obj_preview = self . render_field_error ( obj_id , obj , None , request ) else : try : obj_preview = admin . preview ( obj , request ) except Attribut...
Override this to customise the preview representation of all objects .
33,417
def get_item ( self ) : "If the item is publishable, get the visible version" if hasattr ( self , 'get_draft' ) : draft = self . get_draft ( ) else : draft = self if not hasattr ( self , '_item_cache' ) : try : self . _item_cache = draft . item . get_published_or_draft ( ) except AttributeError : self . _item_cache = d...
If the item is publishable get the visible version
33,418
def render ( self , request , instance , ** kwargs ) : if instance . get_item ( ) : return super ( LinkPlugin , self ) . render ( request , instance , ** kwargs ) return ""
Only render the plugin if the item can be shown to the user
33,419
def get_fields ( self ) : prefix = getattr ( self . Meta , 'source_prefix' , '' ) fields = super ( ModelSubSerializer , self ) . get_fields ( ) fields_without_prefix = OrderedDict ( ) for field_name , field in fields . items ( ) : if field_name . startswith ( prefix ) : if not field . source : field . source = field_na...
Convert default field names for this sub - serializer into versions where the field name has the prefix removed but each field object knows the real model field name by setting the field s source attribute .
33,420
def _populate_validated_data_with_sub_field_data ( self , validated_data ) : for fieldname , field in self . get_fields ( ) . items ( ) : if isinstance ( field , ModelSubSerializer ) : field_data = validated_data . pop ( fieldname , None ) if field_data : validated_data . update ( field_data )
Move field data nested in ModelSubSerializer fields back into the overall validated data dict .
33,421
def _prepare_related_single_or_m2m_relations ( self , validated_data ) : many_to_many_relationships = { } for fieldname , field in self . get_fields ( ) . items ( ) : if ( isinstance ( field , ModelSubSerializer ) or field . read_only or not ( isinstance ( field , serializers . ModelSerializer ) or isinstance ( field ,...
Handle writing to nested related model fields for both single and many - to - many relationships .
33,422
def _get_or_update_or_create_related_instance ( self , ModelClass , fieldname , field_data ) : writable_related_fields = getattr ( self . Meta , 'writable_related_fields' , { } ) if fieldname not in writable_related_fields : raise TypeError ( "Cannot write related model field '%s' for %s on %s" " without corresponding ...
Handle lookup update or creation of related instances based on the field data provided and the field s writable_related_fields settings as defined on the serializer s Meta .
33,423
def _write_related_m2m_relations ( self , obj , many_to_many_relationships ) : for fieldname , related_objs in many_to_many_relationships . items ( ) : setattr ( obj , fieldname , related_objs )
For the given many_to_many_relationships dict mapping field names to a list of object instances apply the instance listing to the obj s named many - to - many relationship field .
33,424
def get_choices ( self ) : choices = [ ] for label_prefix , templates_dir , template_name_prefix in appsettings . LAYOUT_TEMPLATES : source_dir = os . path . join ( templates_dir , template_name_prefix ) for local , dirs , files in os . walk ( source_dir , followlinks = True ) : for source_file in files : template_name...
Return a list of choices for source files found in configured layout template directories .
33,425
def environment ( ** options ) : env = Environment ( ** options ) env . globals . update ( { 'static' : staticfiles_storage . url , 'url' : reverse , } ) env . globals . update ( context_processors . environment ( ) ) return env
Add static and url functions to the environment context processor and return as a Jinja2 Environment object .
33,426
def update_site ( sender , ** kwargs ) : Site = apps . get_model ( 'sites' , 'Site' ) domain = settings . SITE_DOMAIN if settings . SITE_PORT : domain += ':%s' % settings . SITE_PORT Site . objects . update_or_create ( pk = settings . SITE_ID , defaults = dict ( domain = domain , name = settings . SITE_NAME ) ) sequenc...
Update Site object matching SITE_ID setting with SITE_DOMAIN and SITE_PORT settings .
33,427
def _format_with_same_year ( format_specifier ) : test_format_specifier = format_specifier + "_SAME_YEAR" test_format = get_format ( test_format_specifier , use_l10n = True ) if test_format == test_format_specifier : return re . sub ( YEAR_RE , '' , get_format ( format_specifier ) ) else : return test_format
Return a version of format_specifier that renders a date assuming it has the same year as another date . Usually this means ommitting the year . This can be overridden by specifying a format that has _SAME_YEAR appended to the name in the project s formats spec .
33,428
def _format_with_same_year_and_month ( format_specifier ) : test_format_specifier = format_specifier + "_SAME_YEAR_SAME_MONTH" test_format = get_format ( test_format_specifier , use_l10n = True ) if test_format == test_format_specifier : no_year = re . sub ( YEAR_RE , '' , get_format ( format_specifier ) ) return re . ...
Return a version of format_specifier that renders a date assuming it has the same year and month as another date . Usually this means ommitting the year and month .
33,429
def _get_ctypes ( self ) : ctypes = [ ] for related_object in self . model . _meta . get_all_related_objects ( ) : model = getattr ( related_object , 'related_model' , related_object . model ) ctypes . append ( ContentType . objects . get_for_model ( model ) . pk ) if model . __subclasses__ ( ) : for child in model . _...
Returns all related objects for this model .
33,430
def placeholder_data_view ( self , request , id ) : try : layout = models . Layout . objects . get ( pk = id ) except models . Layout . DoesNotExist : json = { 'success' : False , 'error' : 'Layout not found' } status = 404 else : placeholders = layout . get_placeholder_data ( ) status = 200 placeholders = [ p . as_dic...
Return placeholder data for the given layout s template .
33,431
def get_urls ( self ) : urls = super ( LayoutAdmin , self ) . get_urls ( ) my_urls = patterns ( '' , url ( r'^placeholder_data/(?P<id>\d+)/$' , self . admin_site . admin_view ( self . placeholder_data_view ) , name = 'layout_placeholder_data' , ) ) return my_urls + urls
Add layout_placeholder_data URL .
33,432
def decompress ( self , value ) : if value : try : pk = self . queryset . get ( recurrence_rule = value ) . pk except self . queryset . model . DoesNotExist : pk = None return [ pk , None , value ] return [ None , None , None ]
Return the primary key value for the Select widget if the given recurrence rule exists in the queryset .
33,433
def _set_queryset ( self , queryset ) : self . fields [ 0 ] . queryset = self . widget . queryset = queryset self . widget . choices = self . fields [ 0 ] . choices
Set the queryset on the ModelChoiceField and choices on the widget .
33,434
def filter_content_types ( self , content_type_qs ) : valid_ct_ids = [ ] for ct in content_type_qs : model = ct . model_class ( ) if model and issubclass ( model , EventBase ) : valid_ct_ids . append ( ct . id ) return content_type_qs . filter ( pk__in = valid_ct_ids )
Filter the content types selectable to only event subclasses
33,435
def describe_page_numbers ( current_page , total_count , per_page , page_numbers_at_ends = 3 , pages_numbers_around_current = 3 ) : if total_count : page_count = int ( math . ceil ( 1.0 * total_count / per_page ) ) if page_count < current_page : raise PageNumberOutOfBounds page_numbers = get_page_numbers ( current_page...
Produces a description of how to display a paginated list s page numbers . Rather than just spitting out a list of every page available the page numbers returned will be trimmed to display only the immediate numbers around the start end and the current page .
33,436
def render_stats ( stats , sort , format ) : output = StdoutWrapper ( ) if hasattr ( stats , "stream" ) : stats . stream = output . stream stats . sort_stats ( * sort ) getattr ( stats , format ) ( ) return output . stream
Returns a StringIO containing the formatted statistics from _statsfile_ .
33,437
def render_queries ( queries , sort ) : output = StringIO ( ) if sort == 'order' : print >> output , " time query" for query in queries : print >> output , " %8s %s" % ( query [ "time" ] , query [ "sql" ] ) return output if sort == 'time' : def sorter ( x , y ) : return cmp ( x [ 1 ] [ 1 ] , y [ 1 ] [ 1 ] ) elif so...
Returns a StringIO containing the formatted SQL queries .
33,438
def unpickle_stats ( stats ) : stats = cPickle . loads ( stats ) stats . stream = True return stats
Unpickle a pstats . Stats object
33,439
def display_stats ( request , stats , queries ) : sort = [ request . REQUEST . get ( 'sort_first' , 'time' ) , request . REQUEST . get ( 'sort_second' , 'calls' ) ] fmt = request . REQUEST . get ( 'format' , 'print_stats' ) sort_first_buttons = RadioButtons ( 'sort_first' , sort [ 0 ] , sort_categories ) sort_second_bu...
Generate a HttpResponse of functions for a profiling run .
33,440
def display_queries ( request , stats , queries ) : sort = request . REQUEST . get ( 'sort_by' , 'time' ) sort_buttons = RadioButtons ( 'sort_by' , sort , ( ( 'order' , 'by order' ) , ( 'time' , 'time' ) , ( 'queries' , 'query count' ) ) ) output = render_queries ( queries , sort ) output . reset ( ) output = [ html . ...
Generate a HttpResponse of SQL queries for a profiling run .
33,441
def process_request ( self , request ) : def unpickle ( params ) : stats = unpickle_stats ( b64decode ( params . get ( 'stats' , '' ) ) ) queries = cPickle . loads ( b64decode ( params . get ( 'queries' , '' ) ) ) return stats , queries if request . method != 'GET' and not ( request . META . get ( 'HTTP_CONTENT_TYPE' ,...
Setup the profiler for a profiling run and clear the SQL query log .
33,442
def process_view ( self , request , view_func , view_args , view_kwargs ) : profiler = getattr ( request , 'profiler' , None ) if profiler : original_get = request . GET request . GET = original_get . copy ( ) request . GET . pop ( 'profile' , None ) request . GET . pop ( 'show_queries' , None ) request . GET . pop ( '...
Run the profiler on _view_func_ .
33,443
def process_response ( self , request , response ) : profiler = getattr ( request , 'profiler' , None ) if profiler : profiler . close ( ) params = request . REQUEST stats = hotshot . stats . load ( request . statsfile . name ) queries = connection . queries if ( params . get ( 'show_queries' , False ) and params . get...
Finish profiling and render the results .
33,444
def items_to_extract ( self , offset = 0 , length = None ) : endoffset = length and offset + length qs = self . origin_data ( ) [ offset : endoffset ] self . items_to_extract_length = qs . count ( ) return qs
Return an iterable of specific items to extract . As a side - effect set self . items_to_extract_length .
33,445
def dedupe_and_sort ( sequence , first = None , last = None ) : first = first or [ ] last = last or [ ] new_sequence = [ i for i in first if i in sequence ] for item in sequence : if item not in new_sequence and item not in last : new_sequence . append ( item ) new_sequence . extend ( [ i for i in last if i in sequence...
De - dupe and partially sort a sequence .
33,446
def slice_sequences ( sequences , start , end , apply_slice = None ) : if start < 0 or end < 0 or end <= start : raise ValueError ( 'Start and/or End out of range. Start: %s. End: %s' % ( start , end ) ) items_to_take = end - start items_passed = 0 collected_items = [ ] if apply_slice is None : apply_slice = _apply_sli...
Performs a slice across multiple sequences . Useful when paginating across chained collections .
33,447
def quote ( key , value ) : if key in quoted_options and isinstance ( value , string_types ) : return "'%s'" % value if key in quoted_bool_options and isinstance ( value , bool ) : return { True : 'true' , False : 'false' } [ value ] return value
Certain options support string values . We want clients to be able to pass Python strings in but we need them to be quoted in the output . Unfortunately some of those options also allow numbers so we type check the value before wrapping it in quotes .
33,448
def scale_and_crop_with_ranges ( im , size , size_range = None , crop = False , upscale = False , zoom = None , target = None , ** kwargs ) : min_width , min_height = size if min_width == 0 or min_height == 0 or not size_range : return scale_and_crop ( im , size , crop , upscale , zoom , target , ** kwargs ) max_width ...
An easy_thumbnails processor that accepts a size_range tuple which indicates that one or both dimensions can give by a number of pixels in order to minimize cropping .
33,449
def check_settings ( required_settings ) : defined_settings = [ setting if hasattr ( settings , setting ) else None for setting in required_settings ] if not all ( defined_settings ) : raise NotImplementedError ( 'The following settings have not been set: %s' % ', ' . join ( set ( required_settings ) - set ( defined_se...
Checks all settings required by a module have been set .
33,450
def available_on_day ( self , day ) : if isinstance ( day , datetime ) : d = day . date ( ) else : d = day return self . starts_within ( d , d )
Return events that are available on a given day .
33,451
def cleanup ( self ) : for alias in db . connections . databases : logger . info ( 'Closing database connection: %s' , alias ) db . connections [ alias ] . close ( )
Performs clean - up after task is completed before it is executed again in the next internal .
33,452
def get_plugins ( cls , * args , ** kwargs ) : return [ plugin ( * args , ** kwargs ) for plugin in cls . plugins ]
Return a list of plugin instances and pass through arguments .
33,453
def pre_facet_sqs ( self ) : sqs = SearchQuerySet ( ) if self . query : sqs = sqs . filter ( SQ ( content = AutoQuery ( self . query ) ) | SQ ( get_title = AutoQuery ( self . query ) ) | SQ ( boosted_search_terms = AutoQuery ( self . query ) ) ) return sqs
Return the queryset used for generating facets before any facets are applied
33,454
def get ( self , request , * args , ** kwargs ) : form_class = self . get_form_class ( ) form = self . get_form ( form_class ) top_value = self . get_top_level_facet_value ( ) subfacets = SEARCH_SUBFACETS . get ( top_value , [ ] ) self . active_facets = [ self . top_facet ] + subfacets if form . is_valid ( ) : self . q...
User has conducted a search or default state
33,455
def index ( request ) : warnings . warn ( "icekit_events.views.index is deprecated and will disappear in a " "future version. If you need this code, copy it into your project." , DeprecationWarning ) occurrences = models . Occurrence . objects . visible ( ) context = { 'occurrences' : occurrences , } return TemplateRes...
Listing page for event Occurrence s .
33,456
def get_assigned_to_user ( parser , token ) : tokens = token . contents . split ( ) if len ( tokens ) < 4 : raise template . TemplateSyntaxError ( "'get_assigned_to_user' statements require two arguments" ) if not tokens [ 1 ] . isdigit ( ) : raise template . TemplateSyntaxError ( "First argument to 'get_assigned_to_us...
Populates a template variable with the content with WorkflowState assignd for the given criteria .
33,457
def forwards ( apps , schema_editor ) : RecurrenceRule = apps . get_model ( 'icekit_events' , 'RecurrenceRule' ) for description , recurrence_rule in RULES : RecurrenceRule . objects . get_or_create ( description = description , defaults = dict ( recurrence_rule = recurrence_rule ) , )
Create initial recurrence rules .
33,458
def backwards ( apps , schema_editor ) : RecurrenceRule = apps . get_model ( 'icekit_events' , 'RecurrenceRule' ) descriptions = [ d for d , rr in RULES ] RecurrenceRule . objects . filter ( description__in = descriptions ) . delete ( )
Delete initial recurrence rules .
33,459
def environment ( request = None ) : context = { 'COMPRESS_ENABLED' : settings . COMPRESS_ENABLED , 'SITE_NAME' : settings . SITE_NAME , } for key in settings . ICEKIT_CONTEXT_PROCESSOR_SETTINGS : context [ key ] = getattr ( settings , key , None ) return context
Return COMPRESS_ENABLED SITE_NAME and any settings listed in ICEKIT_CONTEXT_PROCESSOR_SETTINGS as context .
33,460
def get_proxy_ancestor_classes ( klass ) : proxy_ancestor_classes = set ( ) for superclass in klass . __bases__ : if hasattr ( superclass , '_meta' ) and superclass . _meta . proxy : proxy_ancestor_classes . add ( superclass ) proxy_ancestor_classes . update ( get_proxy_ancestor_classes ( superclass ) ) return proxy_an...
Return a set containing all the proxy model classes that are ancestors of the given class .
33,461
def derive_and_set_name_fields_and_slug ( self , set_name_sort = True , set_slug = True ) : super ( PersonCreator , self ) . derive_and_set_name_fields_and_slug ( set_name_sort = False , set_slug = False ) person_names = [ name for name in [ self . name_family , self . name_given ] if not is_empty ( name ) ] if set_nam...
Override this method from CreatorBase to handle additional name fields for Person creators .
33,462
def for_model ( self , model , ** kwargs ) : queryset = self . filter ( content_types = ContentType . objects . get_for_model ( model ) , ** kwargs ) return queryset
Return layouts that are allowed for the given model .
33,463
def _order_by_pks ( qs , pks ) : pk_colname = '%s.%s' % ( qs . model . _meta . db_table , qs . model . _meta . pk . column ) clauses = ' ' . join ( [ 'WHEN %s=%s THEN %s' % ( pk_colname , pk , i ) for i , pk in enumerate ( pks ) ] ) ordering = 'CASE %s END' % clauses return qs . extra ( select = { 'pk_ordering' : order...
Adjust the given queryset to order items according to the explicit ordering of PKs provided .
33,464
def _queryset_iterator ( qs ) : if issubclass ( type ( qs ) , UrlNodeQuerySet ) : super_without_boobytrap_iterator = super ( UrlNodeQuerySet , qs ) else : super_without_boobytrap_iterator = super ( PublishingQuerySet , qs ) if is_publishing_middleware_active ( ) and not is_draft_request_context ( ) : for item in super_...
Override default iterator to wrap returned items in a publishing sanity - checker booby trap to lazily raise an exception if DRAFT items are mistakenly returned and mis - used in a public context where only PUBLISHED items should be used .
33,465
def published ( self , for_user = UNSET , force_exchange = False ) : if for_user is not UNSET : return self . visible ( ) queryset = super ( PublishingUrlNodeQuerySet , self ) . published ( for_user = for_user , force_exchange = force_exchange ) queryset = queryset . exclude ( Q ( publishing_is_draft = True ) & Q ( Q (...
Apply additional filtering of published items over that done in PublishingQuerySet . published to filter based on additional publising date fields used by Fluent .
33,466
def redis_from_url ( url ) : import redis url = url or "" parsed_url = urlparse ( url ) if parsed_url . scheme != "redis" : return None kwargs = { } match = PASS_HOST_PORT . match ( parsed_url . netloc ) if match . group ( 'password' ) is not None : kwargs [ 'password' ] = match . group ( 'password' ) if match . group ...
Converts a redis URL used by celery into a redis . Redis object .
33,467
def process_model_scores ( self , model_names , root_cache , include_features = False ) : model_scores = { } for model_name in model_names : model_scores [ model_name ] = { } model_scores [ model_name ] [ 'score' ] = self . _process_score ( model_name , dependency_cache = root_cache ) if include_features : base_feature...
Generates a score map for a set of models based on a root_cache . This method performs no substantial IO but may incur substantial CPU usage .
33,468
def _process_score ( self , model_name , dependency_cache = None ) : version = self [ model_name ] . version start = time . time ( ) feature_values = self . _solve_features ( model_name , dependency_cache ) logger . debug ( "Extracted features for {0}:{1}:{2} in {3} secs" . format ( self . name , model_name , version ,...
Generates a score for a given model using the dependency_cache .
33,469
def map_from_config ( cls , config , context_names , section_key = "scoring_contexts" ) : model_key_map = { } context_map = { } for context_name in context_names : section = config [ section_key ] [ context_name ] model_map = { } for model_name , key in section [ 'scorer_models' ] . items ( ) : if key in model_key_map ...
Loads a whole set of ScoringContext s from a configuration file while maintaining a cache of model names . This aids in better memory management and allows model aliases to be implemented at the configuration level .
33,470
def build_event_set ( event ) : event_set = set ( ) if re . match ( r"([^\.]+.)?mediawiki\.revision-create$" , event [ 'meta' ] [ 'topic' ] ) : event_set . add ( 'edit' ) user_groups = event . get ( 'performer' , { } ) . get ( 'user_groups' , [ ] ) if 'bot' in user_groups : event_set . add ( 'bot_edit' ) else : event_s...
Turn an EventStream event into a set of event types that ORES uses internally .
33,471
def build_precache_map ( config ) : precache_map = { } ss_name = config [ 'ores' ] [ 'scoring_system' ] for context in config [ 'scoring_systems' ] [ ss_name ] [ 'scoring_contexts' ] : precache_map [ context ] = { } for model in config [ 'scoring_contexts' ] [ context ] . get ( 'precache' , [ ] ) : precached_config = c...
Build a mapping of contexts and models from the configuration
33,472
def calculate_statistics ( self ) : "Jam some data through to generate statistics" rev_ids = range ( 0 , 100 , 1 ) feature_values = zip ( rev_ids , [ 0 ] * 100 ) scores = [ self . score ( f ) for f in feature_values ] labels = [ s [ 'prediction' ] for s in scores ] statistics = Classification ( labels , threshold_ndigi...
Jam some data through to generate statistics
33,473
def read_hector_input ( csv_file ) : df = pd . read_csv ( csv_file , skiprows = 3 , index_col = 0 ) df . name = os . path . splitext ( os . path . basename ( csv_file ) ) [ 0 ] return df
Reads a Hector CSV file and returns it as a Pandas DataFrame .
33,474
def write_hector_input ( scenario , path = None ) : out = "" try : name = "; " + scenario . name + "\n" except AttributeError : name = "; Hector Scenario\n" out += name out += "; Written with pyhector\n" unit_names = [ units [ source ] for source in scenario . columns ] out += ";UNITS:," + "," . join ( unit_names ) + "...
Writes a scenario DataFrame to a CSV emissions file as used in Hector .
33,475
def read_hector_constraint ( constraint_file ) : df = pd . read_csv ( constraint_file , index_col = 0 , comment = ";" ) df = df [ df . applymap ( lambda x : isinstance ( x , ( int , float ) ) ) ] df . index = df . index . astype ( int ) return df . iloc [ : , 0 ]
Reads a Hector contraint CSV file and returns it as a Pandas Series
33,476
def read_hector_output ( csv_file ) : start_year = 1746 output_stream = pd . read_csv ( csv_file , skiprows = 1 ) wide = output_stream [ output_stream . year >= start_year ] . pivot_table ( index = "year" , columns = "variable" , values = "value" ) return wide
Reads a Hector output stream CSV file and returns a wide DataFrame with Hector output data .
33,477
def run ( scenario , config = None , base_config = None , outputs = None , return_config = False ) : if outputs is None : outputs = [ "temperature.Tgav" , "simpleNbox.Ca" , "forcing.Ftot" ] if base_config is None : parameters = deepcopy ( _default_config ) else : parameters = deepcopy ( base_config ) if config : for ke...
Runs a scenario through the Hector climate model .
33,478
def config ( self , config ) : for section , data in config . items ( ) : for variable , value in data . items ( ) : self . set_value ( section , variable , value )
Set config values from config dictionary .
33,479
def set_emissions ( self , scenario ) : for section in emissions : for source in emissions [ section ] : if source not in scenario . columns : continue self . _set_timed_array ( section , source , list ( scenario . index ) , list ( scenario [ source ] ) )
Set emissions from Pandas DataFrame .
33,480
def requirements ( fname ) : with open ( fname ) as f : for line in f : match = re . search ( '#egg=(.*)$' , line ) if match : yield match . groups ( ) [ 0 ] else : yield line . strip ( )
Generator to parse requirements . txt file
33,481
def score ( self , context , models , revids ) : if isinstance ( revids , int ) : rev_ids = [ revids ] else : rev_ids = [ int ( rid ) for rid in revids ] return self . _score ( context , models , rev_ids )
Genetate scores for model applied to a sequence of revisions .
33,482
def principal_angle ( A , B ) : from numpy . linalg import qr , svd qA , _ = qr ( A ) qB , _ = qr ( B ) U , S , V = svd ( qA . T . dot ( qB ) ) return np . arccos ( min ( S . min ( ) , 1.0 ) )
Find the principal angle between two subspaces spanned by columns of A and B
33,483
def resample ( self , data , stats = None , mask = None , niter = None ) : stats = self . _get_statistics ( data , mask = mask ) if stats is None else stats stats = self . _stats_ensure_array ( stats ) niter = niter if niter else self . niter for itr in range ( niter ) : self . _resample_A ( stats ) self . _resample_si...
Introduce a mask that allows for missing data
33,484
def sample_truncated_gaussian ( mu = 0 , sigma = 1 , lb = - np . Inf , ub = np . Inf ) : mu , sigma , lb , ub = np . broadcast_arrays ( mu , sigma , lb , ub ) shp = mu . shape if np . allclose ( sigma , 0.0 ) : return mu cdflb = normal_cdf ( lb , mu , sigma ) cdfub = normal_cdf ( ub , mu , sigma ) cdfsamples = cdflb + ...
Sample a truncated normal with the specified params . This is not the most stable way but it works as long as the truncation region is not too far from the mean .
33,485
def log_likelihood ( self , x , K_extra = 1 ) : x = np . asarray ( x ) ks = self . _get_occupied ( ) K = len ( ks ) K_total = K + K_extra obs_distns = [ ] for k in range ( K ) : o = copy . deepcopy ( self . obs_distn ) o . resample ( data = self . _get_data_withlabel ( k ) ) obs_distns . append ( o ) for k in range ( K...
Estimate the log likelihood with samples from the model . Draw k_extra components which were not populated by the current model in order to create a truncated approximate mixture model .
33,486
def debug_print ( self ) : ring = self . _fetch_all ( ) print ( 'Hash ring "{key}" replicas:' . format ( key = self . key ) ) now = time . time ( ) n_replicas = len ( ring ) if ring : print ( '{:10} {:6} {:7} {}' . format ( 'Start' , 'Range' , 'Delay' , 'Node' ) ) else : print ( '(no replicas)' ) nodes = collections . ...
Prints the ring for debugging purposes .
33,487
def update ( self ) : ring = self . _fetch ( ) n_replicas = len ( ring ) replica_set = set ( [ r [ 1 ] for r in self . replicas ] ) self . ranges = [ ] for n , ( start , replica ) in enumerate ( ring ) : if replica in replica_set : end = ring [ ( n + 1 ) % n_replicas ] [ 0 ] % RING_SIZE if start < end : self . ranges ....
Fetches the updated ring from Redis and updates the current ranges .
33,488
def contains ( self , key ) : n = binascii . crc32 ( key . encode ( ) ) % RING_SIZE for start , end in self . ranges : if start <= n < end : return True return False
Returns a boolean indicating if this node is responsible for handling the given key .
33,489
def gevent_start ( self ) : import gevent import gevent . select self . _poller_greenlet = gevent . spawn ( self . poll ) self . _select = gevent . select . select self . heartbeat ( ) self . update ( )
Helper method to start the node for gevent - based applications .
33,490
def gevent_stop ( self ) : import gevent gevent . kill ( self . _poller_greenlet ) self . remove ( ) self . _select = select . select
Helper method to stop the node for gevent - based applications .
33,491
def suggest_implied_attributes ( prj ) : def suggest ( key ) : return "To declare {}, consider using {}" . format ( key , IMPLICATIONS_DECLARATION ) return [ suggest ( k ) for k in prj if k in IDEALLY_IMPLIED ]
If given project contains what could be implied attributes suggest that .
33,492
def check_bam ( bam , o ) : try : p = sp . Popen ( [ 'samtools' , 'view' , bam ] , stdout = sp . PIPE ) paired = 0 read_lengths = defaultdict ( int ) while o > 0 : line = p . stdout . readline ( ) . decode ( ) . split ( "\t" ) flag = int ( line [ 1 ] ) read_lengths [ len ( line [ 9 ] ) ] += 1 if 1 & flag : paired += 1 ...
Check reads in BAM file for read type and lengths .
33,493
def grab_project_data ( prj ) : if not prj : return { } data = { } for section in SAMPLE_INDEPENDENT_PROJECT_SECTIONS : try : data [ section ] = getattr ( prj , section ) except AttributeError : _LOGGER . debug ( "Project lacks section '%s', skipping" , section ) return data
From the given Project grab Sample - independent data .
33,494
def import_from_source ( module_filepath ) : import sys if not os . path . exists ( module_filepath ) : raise ValueError ( "Path to alleged module file doesn't point to an " "extant file: '{}'" . format ( module_filepath ) ) fname_chars = string . ascii_letters + string . digits name = "" . join ( random . choice ( fna...
Import a module from a particular filesystem location .
33,495
def infer_delimiter ( filepath ) : ext = os . path . splitext ( filepath ) [ 1 ] [ 1 : ] . lower ( ) return { "txt" : "\t" , "tsv" : "\t" , "csv" : "," } . get ( ext )
From extension infer delimiter used in a separated values file .
33,496
def is_null_like ( x ) : return x in [ None , "" ] or ( coll_like ( x ) and isinstance ( x , Sized ) and 0 == len ( x ) )
Determine whether an object is effectively null .
33,497
def parse_ftype ( input_file ) : if input_file . endswith ( ".bam" ) : return "bam" elif input_file . endswith ( ".fastq" ) or input_file . endswith ( ".fq" ) or input_file . endswith ( ".fq.gz" ) or input_file . endswith ( ".fastq.gz" ) : return "fastq" else : raise TypeError ( "Type of input file ends in neither '.ba...
Checks determine filetype from extension .
33,498
def parse_text_data ( lines_or_path , delimiter = os . linesep ) : if os . path . isfile ( lines_or_path ) : with open ( lines_or_path , 'r' ) as f : return f . readlines ( ) else : _LOGGER . debug ( "Not a file: '{}'" . format ( lines_or_path ) ) if isinstance ( lines_or_path , str ) : return lines_or_path . split ( d...
Interpret input argument as lines of data . This is intended to support multiple input argument types to core model constructors .
33,499
def sample_folder ( prj , sample ) : return os . path . join ( prj . metadata . results_subdir , sample [ "sample_name" ] )
Get the path to this Project s root folder for the given Sample .