idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
3,600
def _get_func ( cls , source_ver , target_ver ) : matches = ( func for func in cls . _upgrade_funcs if func . source == source_ver and func . target == target_ver ) try : match , = matches except ValueError : raise ValueError ( f"No migration from {source_ver} to {target_ver}" ) return match
Return exactly one function to convert from source to target
84
10
3,601
def get_uid ( brain_or_object ) : if is_portal ( brain_or_object ) : return '0' if is_brain ( brain_or_object ) and base_hasattr ( brain_or_object , "UID" ) : return brain_or_object . UID return get_object ( brain_or_object ) . UID ( )
Get the Plone UID for this object
80
8
3,602
def get_icon ( brain_or_object , html_tag = True ) : # Manual approach, because `plone.app.layout.getIcon` does not reliable # work for Bika Contents coming from other catalogs than the # `portal_catalog` portal_types = get_tool ( "portal_types" ) fti = portal_types . getTypeInfo ( brain_or_object . portal_type ) icon = fti . getIcon ( ) if not icon : return "" url = "%s/%s" % ( get_url ( get_portal ( ) ) , icon ) if not html_tag : return url tag = '<img width="16" height="16" src="{url}" title="{title}" />' . format ( url = url , title = get_title ( brain_or_object ) ) return tag
Get the icon of the content object
188
7
3,603
def get_review_history ( brain_or_object , rev = True ) : obj = get_object ( brain_or_object ) review_history = [ ] try : workflow = get_tool ( "portal_workflow" ) review_history = workflow . getInfoFor ( obj , 'review_history' ) except WorkflowException as e : message = str ( e ) logger . error ( "Cannot retrieve review_history on {}: {}" . format ( obj , message ) ) if not isinstance ( review_history , ( list , tuple ) ) : logger . error ( "get_review_history: expected list, recieved {}" . format ( review_history ) ) review_history = [ ] if rev is True : review_history . reverse ( ) return review_history
Get the review history for the given brain or context .
171
11
3,604
def get_cancellation_status ( brain_or_object , default = "active" ) : if is_brain ( brain_or_object ) : return getattr ( brain_or_object , "cancellation_state" , default ) workflows = get_workflows_for ( brain_or_object ) if 'bika_cancellation_workflow' not in workflows : return default return get_workflow_status_of ( brain_or_object , 'cancellation_state' )
Get the cancellation_state of an object
117
8
3,605
def get_inactive_status ( brain_or_object , default = "active" ) : if is_brain ( brain_or_object ) : return getattr ( brain_or_object , "inactive_state" , default ) workflows = get_workflows_for ( brain_or_object ) if 'bika_inactive_workflow' not in workflows : return default return get_workflow_status_of ( brain_or_object , 'inactive_state' )
Get the cancellation_state of an objct
109
9
3,606
def set_log_level ( verbose , quiet ) : if quiet : verbose = - 1 if verbose < 0 : verbose = logging . CRITICAL elif verbose == 0 : verbose = logging . WARNING elif verbose == 1 : verbose = logging . INFO elif 1 < verbose : verbose = logging . DEBUG LOGGER . setLevel ( verbose )
Ses the logging level of the script based on command line options .
83
14
3,607
def detect_pattern_format ( pattern_filename , encoding , on_word_boundaries ) : tsv = True boundaries = on_word_boundaries with open_file ( pattern_filename ) as input_file : for line in input_file : line = line . decode ( encoding ) if line . count ( '\t' ) != 1 : tsv = False if '\\b' in line : boundaries = True if boundaries and not tsv : break return tsv , boundaries
Automatically detects the pattern file format and determines whether the Aho - Corasick string matching should pay attention to word boundaries or not .
103
28
3,608
def sub_escapes ( sval ) : sval = sval . replace ( '\\a' , '\a' ) sval = sval . replace ( '\\b' , '\x00' ) sval = sval . replace ( '\\f' , '\f' ) sval = sval . replace ( '\\n' , '\n' ) sval = sval . replace ( '\\r' , '\r' ) sval = sval . replace ( '\\t' , '\t' ) sval = sval . replace ( '\\v' , '\v' ) sval = sval . replace ( '\\\\' , '\\' ) return sval
Process escaped characters in sval .
156
7
3,609
def build_trie ( pattern_filename , pattern_format , encoding , on_word_boundaries ) : boundaries = on_word_boundaries if pattern_format == 'auto' or not on_word_boundaries : tsv , boundaries = detect_pattern_format ( pattern_filename , encoding , on_word_boundaries ) if pattern_format == 'auto' : if tsv : pattern_format = 'tsv' else : pattern_format = 'sed' trie = fsed . ahocorasick . AhoCorasickTrie ( ) num_candidates = 0 with open_file ( pattern_filename ) as pattern_file : for lineno , line in enumerate ( pattern_file ) : line = line . decode ( encoding ) . rstrip ( '\n' ) if not line . strip ( ) : continue # decode the line if pattern_format == 'tsv' : fields = line . split ( '\t' ) if len ( fields ) != 2 : LOGGER . warning ( ( 'skipping line {} of pattern file (not ' 'in tab-separated format): {}' ) . format ( lineno , line ) ) continue before , after = fields elif pattern_format == 'sed' : before = after = None line = line . lstrip ( ) if line [ 0 ] == 's' : delim = line [ 1 ] # delim might be a regex special character; # escape it if necessary if delim in '.^$*+?[](){}|\\' : delim = '\\' + delim fields = re . split ( r'(?<!\\){}' . format ( delim ) , line ) if len ( fields ) == 4 : before , after = fields [ 1 ] , fields [ 2 ] before = re . sub ( r'(?<!\\)\\{}' . format ( delim ) , delim , before ) after = re . sub ( r'(?<!\\)\\{}' . format ( delim ) , delim , after ) if before is None or after is None : LOGGER . warning ( ( 'skipping line {} of pattern file (not ' 'in sed format): {}' ) . format ( lineno , line ) ) continue num_candidates += 1 if on_word_boundaries and before != before . strip ( ) : LOGGER . warning ( ( 'before pattern on line {} padded whitespace; ' 'this may interact strangely with the --words ' 'option: {}' ) . format ( lineno , line ) ) before = sub_escapes ( before ) after = sub_escapes ( after ) if boundaries : before = fsed . ahocorasick . boundary_transform ( before , on_word_boundaries ) trie [ before ] = after LOGGER . info ( '{} patterns loaded from {}' . format ( num_candidates , pattern_filename ) ) return trie , boundaries
Constructs a finite state machine for performing string rewriting .
626
11
3,610
def warn_prefix_values ( trie ) : for current , _parent in trie . dfs ( ) : if current . has_value and current . longest_prefix is not None : LOGGER . warn ( ( 'pattern {} (value {}) is a superstring of pattern ' '{} (value {}) and will never be matched' ) . format ( current . prefix , current . value , current . longest_prefix . prefix , current . longest_prefix . value ) )
Prints warning messages for every node that has both a value and a longest_prefix .
103
18
3,611
def rewrite_str_with_trie ( sval , trie , boundaries = False , slow = False ) : if boundaries : sval = fsed . ahocorasick . boundary_transform ( sval ) if slow : sval = trie . replace ( sval ) else : sval = trie . greedy_replace ( sval ) if boundaries : sval = '' . join ( fsed . ahocorasick . boundary_untransform ( sval ) ) return sval
Rewrites a string using the given trie object .
105
11
3,612
def register_function ( cls , fn , label ) : if label in cls . registered_functions : log . warning ( "Overwriting existing registered function %s" , label ) fn . label = label cls . registered_functions [ fn . label ] = fn
Register a function with the pipeline .
59
7
3,613
def load ( cls , serialised ) : pipeline = cls ( ) for fn_name in serialised : try : fn = cls . registered_functions [ fn_name ] except KeyError : raise BaseLunrException ( "Cannot load unregistered function " . format ( fn_name ) ) else : pipeline . add ( fn ) return pipeline
Loads a previously serialised pipeline .
77
8
3,614
def add ( self , * args ) : for fn in args : self . warn_if_function_not_registered ( fn ) self . _stack . append ( fn )
Adds new functions to the end of the pipeline .
37
10
3,615
def after ( self , existing_fn , new_fn ) : self . warn_if_function_not_registered ( new_fn ) try : index = self . _stack . index ( existing_fn ) self . _stack . insert ( index + 1 , new_fn ) except ValueError as e : six . raise_from ( BaseLunrException ( "Cannot find existing_fn" ) , e )
Adds a single function after a function that already exists in the pipeline .
90
14
3,616
def run ( self , tokens ) : for fn in self . _stack : results = [ ] for i , token in enumerate ( tokens ) : # JS ignores additional arguments to the functions but we # force pipeline functions to declare (token, i, tokens) # or *args result = fn ( token , i , tokens ) if not result : continue if isinstance ( result , ( list , tuple ) ) : # simulate Array.concat results . extend ( result ) else : results . append ( result ) tokens = results return tokens
Runs the current list of functions that make up the pipeline against the passed tokens .
111
17
3,617
def run_string ( self , string , metadata = None ) : token = Token ( string , metadata ) return [ str ( tkn ) for tkn in self . run ( [ token ] ) ]
Convenience method for passing a string through a pipeline and getting strings out . This method takes care of wrapping the passed string in a token and mapping the resulting tokens back to strings .
42
37
3,618
def get_client ( ) : with contextlib . suppress ( Exception ) : store = Storage . from_URI ( ) assert isinstance ( store , pmxbot . storage . MongoDBStorage ) return store . db . database . client
Use the same MongoDB client as pmxbot if available .
49
13
3,619
def create_db_in_shard ( db_name , shard , client = None ) : client = client or pymongo . MongoClient ( ) # flush the router config to ensure it's not stale res = client . admin . command ( 'flushRouterConfig' ) if not res . get ( 'ok' ) : raise RuntimeError ( "unable to flush router config" ) if shard not in get_ids ( client . config . shards ) : raise ValueError ( f"Unknown shard {shard}" ) if db_name in get_ids ( client . config . databases ) : raise ValueError ( "database already exists" ) # MongoDB doesn't have a 'create database' command, so insert an # item into a collection and then drop the collection. client [ db_name ] . foo . insert ( { 'foo' : 1 } ) client [ db_name ] . foo . drop ( ) if client [ db_name ] . collection_names ( ) : raise ValueError ( "database has collections" ) primary = client [ 'config' ] . databases . find_one ( db_name ) [ 'primary' ] if primary != shard : res = client . admin . command ( 'movePrimary' , value = db_name , to = shard ) if not res . get ( 'ok' ) : raise RuntimeError ( str ( res ) ) return ( f"Successfully created {db_name} in {shard} via {client.nodes} " f"from {hostname}" )
In a sharded cluster create a database in a particular shard .
329
14
3,620
def luhn_checksum ( number , chars = DIGITS ) : length = len ( chars ) number = [ chars . index ( n ) for n in reversed ( str ( number ) ) ] return ( sum ( number [ : : 2 ] ) + sum ( sum ( divmod ( i * 2 , length ) ) for i in number [ 1 : : 2 ] ) ) % length
Calculates the Luhn checksum for number
83
10
3,621
def luhn_calc ( number , chars = DIGITS ) : checksum = luhn_checksum ( str ( number ) + chars [ 0 ] , chars ) return chars [ - checksum ]
Calculate the Luhn check digit for number .
46
11
3,622
def to_decimal ( number , strip = '- ' ) : if isinstance ( number , six . integer_types ) : return str ( number ) number = str ( number ) number = re . sub ( r'[%s]' % re . escape ( strip ) , '' , number ) # hexadecimal if number . startswith ( '0x' ) : return to_decimal ( int ( number [ 2 : ] , 16 ) ) # octal elif number . startswith ( 'o' ) : return to_decimal ( int ( number [ 1 : ] , 8 ) ) # binary elif number . startswith ( 'b' ) : return to_decimal ( int ( number [ 1 : ] , 2 ) ) else : return str ( int ( number ) )
Converts a number to a string of decimals in base 10 .
173
15
3,623
def get_class_method ( cls_or_inst , method_name ) : cls = cls_or_inst if isinstance ( cls_or_inst , type ) else cls_or_inst . __class__ meth = getattr ( cls , method_name , None ) if isinstance ( meth , property ) : meth = meth . fget elif isinstance ( meth , cached_property ) : meth = meth . func return meth
Returns a method from a given class or instance . When the method doest not exist it returns None . Also works with properties and cached properties .
100
29
3,624
def manage_fits ( list_of_frame ) : import astropy . io . fits as fits import numina . types . dataframe as df refs = [ ] for frame in list_of_frame : if isinstance ( frame , str ) : ref = fits . open ( frame ) refs . append ( ref ) elif isinstance ( frame , fits . HDUList ) : refs . append ( frame ) elif isinstance ( frame , df . DataFrame ) : ref = frame . open ( ) refs . append ( ref ) else : refs . append ( frame ) try : yield refs finally : # release for obj in refs : obj . close ( )
Manage a list of FITS resources
146
8
3,625
def logging_from_debugplot ( debugplot ) : if isinstance ( debugplot , int ) : if abs ( debugplot ) >= 10 : logging . basicConfig ( level = logging . DEBUG ) else : logging . basicConfig ( level = logging . INFO ) else : raise ValueError ( "Unexpected debugplot=" + str ( debugplot ) )
Set debugging level based on debugplot value .
74
9
3,626
def ximplot ( ycut , title = None , show = True , plot_bbox = ( 0 , 0 ) , geometry = ( 0 , 0 , 640 , 480 ) , tight_layout = True , debugplot = None ) : # protections if type ( ycut ) is not np . ndarray : raise ValueError ( "ycut=" + str ( ycut ) + " must be a numpy.ndarray" ) elif ycut . ndim is not 1 : raise ValueError ( "ycut.ndim=" + str ( ycut . dim ) + " must be 1" ) # read bounding box limits nc1 , nc2 = plot_bbox plot_coord = ( nc1 == 0 and nc2 == 0 ) naxis1_ = ycut . size if not plot_coord : # check that ycut size corresponds to bounding box size if naxis1_ != nc2 - nc1 + 1 : raise ValueError ( "ycut.size=" + str ( ycut . size ) + " does not correspond to bounding box size" ) # display image from numina . array . display . matplotlib_qt import plt if not show : plt . ioff ( ) fig = plt . figure ( ) ax = fig . add_subplot ( 111 ) ax . autoscale ( False ) ymin = ycut . min ( ) ymax = ycut . max ( ) if plot_coord : xmin = - 0.5 xmax = ( naxis1_ - 1 ) + 0.5 xcut = np . arange ( naxis1_ , dtype = np . float ) ax . set_xlabel ( 'image array index in the X direction' ) ax . set_ylabel ( 'pixel value' ) else : xmin = float ( nc1 ) - 0.5 xmax = float ( nc2 ) + 0.5 xcut = np . linspace ( start = nc1 , stop = nc2 , num = nc2 - nc1 + 1 ) ax . set_xlabel ( 'image pixel in the X direction' ) ax . set_ylabel ( 'pixel value' ) ax . set_xlim ( xmin , xmax ) ax . set_ylim ( ymin , ymax ) ax . plot ( xcut , ycut , '-' ) if title is not None : ax . set_title ( title ) # set the geometry set_window_geometry ( geometry ) if show : pause_debugplot ( debugplot , pltshow = show , tight_layout = tight_layout ) else : if tight_layout : plt . tight_layout ( ) # return axes return ax
Auxiliary function to display 1d plot .
589
10
3,627
def oversample1d ( sp , crval1 , cdelt1 , oversampling = 1 , debugplot = 0 ) : if sp . ndim != 1 : raise ValueError ( 'Unexpected array dimensions' ) naxis1 = sp . size naxis1_over = naxis1 * oversampling cdelt1_over = cdelt1 / oversampling xmin = crval1 - cdelt1 / 2 # left border of first pixel crval1_over = xmin + cdelt1_over / 2 sp_over = np . zeros ( naxis1_over ) for i in range ( naxis1 ) : i1 = i * oversampling i2 = i1 + oversampling sp_over [ i1 : i2 ] = sp [ i ] if abs ( debugplot ) in ( 21 , 22 ) : crvaln = crval1 + ( naxis1 - 1 ) * cdelt1 crvaln_over = crval1_over + ( naxis1_over - 1 ) * cdelt1_over xover = np . linspace ( crval1_over , crvaln_over , naxis1_over ) ax = ximplotxy ( np . linspace ( crval1 , crvaln , naxis1 ) , sp , 'bo' , label = 'original' , show = False ) ax . plot ( xover , sp_over , 'r+' , label = 'resampled' ) pause_debugplot ( debugplot , pltshow = True ) return sp_over , crval1_over , cdelt1_over
Oversample spectrum .
353
5
3,628
def map_borders ( wls ) : midpt_wl = 0.5 * ( wls [ 1 : ] + wls [ : - 1 ] ) all_borders = np . zeros ( ( wls . shape [ 0 ] + 1 , ) ) all_borders [ 1 : - 1 ] = midpt_wl all_borders [ 0 ] = 2 * wls [ 0 ] - midpt_wl [ 0 ] all_borders [ - 1 ] = 2 * wls [ - 1 ] - midpt_wl [ - 1 ] return all_borders
Compute borders of pixels for interpolation .
127
9
3,629
def import_object ( path ) : spl = path . split ( '.' ) if len ( spl ) == 1 : return importlib . import_module ( path ) # avoid last part for the moment cls = spl [ - 1 ] mods = '.' . join ( spl [ : - 1 ] ) mm = importlib . import_module ( mods ) # try to get the last part as an attribute try : obj = getattr ( mm , cls ) return obj except AttributeError : pass # Try to import the last part rr = importlib . import_module ( path ) return rr
Import an object given its fully qualified name .
127
9
3,630
def make_parser ( add_help = True , exclude_args = None ) : if exclude_args is None : exclude_args = [ ] parser = argparse . ArgumentParser ( add_help = add_help ) parser . description = ( "Filter, transform and export a list of JSON " "objects on stdin to JSON or CSV on stdout" ) if "--columns" not in exclude_args : parser . add_argument ( "--columns" , dest = "columns_file" , help = "the JSON file specifying the columns to be output" , ) if ( "-i" not in exclude_args ) and ( "--input" not in exclude_args ) : parser . add_argument ( "-i" , "--input" , help = "read input from the given file instead of from stdin" , dest = 'input_data' , # Because input is a Python builtin. ) if ( "-c" not in exclude_args ) and ( "--column" not in exclude_args ) : parser . add_argument ( "-c" , "--column" , action = ColumnsAction ) if "--pattern" not in exclude_args : parser . add_argument ( "--pattern" , action = ColumnsAction , nargs = '+' ) if "--max-length" not in exclude_args : parser . add_argument ( "--max-length" , action = ColumnsAction ) if "--strip" not in exclude_args : parser . add_argument ( "--strip" , nargs = "?" , action = ColumnsAction ) if "--deduplicate" not in exclude_args : parser . add_argument ( "--deduplicate" , nargs = '?' , action = ColumnsAction ) if "--case-sensitive" not in exclude_args : parser . add_argument ( "--case-sensitive" , nargs = '?' , action = ColumnsAction ) if "--unique" not in exclude_args : parser . add_argument ( "--unique" , nargs = "?" , action = ColumnsAction ) if ( "-p" not in exclude_args ) and ( "--pretty" not in exclude_args ) : parser . add_argument ( "-p" , "--pretty" , action = "store_true" ) return parser
Return an argparse . ArgumentParser object with losser s arguments .
505
14
3,631
def parse ( parser = None , args = None ) : if not parser : parser = make_parser ( ) try : parsed_args = parser . parse_args ( args ) except SystemExit as err : raise CommandLineExit ( err . code ) try : columns = parsed_args . columns except AttributeError : columns = collections . OrderedDict ( ) parsed_args . columns = columns for title , spec in columns . items ( ) : if "pattern" not in spec : raise ColumnWithoutPatternError ( 'Column "{0}" needs a pattern' . format ( title ) ) # Change length-1 patterns into strings (not lists of one string). if len ( spec [ "pattern" ] ) == 1 : spec [ "pattern" ] = spec [ "pattern" ] [ 0 ] if columns and parsed_args . columns_file : raise ColumnsAndColumnsFileError ( "You can't use the --column and --columns options together (yet)" ) elif parsed_args . columns_file and not columns : parsed_args . columns = parsed_args . columns_file elif ( not columns ) and ( not parsed_args . columns_file ) : # Crash if no columns specified. # In the future we'll support simply converting all JSON fields to CSV # columns if no columns are specified, and this will be removed. raise NoColumnsError ( "You must give either a --columns or at least one -c/--column " "argument" ) else : assert columns return parsed_args
Parse the command line arguments return an argparse namespace object .
321
13
3,632
def do ( parser = None , args = None , in_ = None , table_function = None ) : in_ = in_ or sys . stdin table_function = table_function or losser . table parsed_args = parse ( parser = parser , args = args ) # Read the input data from stdin or a file. if parsed_args . input_data : input_data = open ( parsed_args . input_data , 'r' ) . read ( ) else : input_data = in_ . read ( ) dicts = json . loads ( input_data ) csv_string = table_function ( dicts , parsed_args . columns , csv = True , pretty = parsed_args . pretty ) return csv_string
Read command - line args and stdin return the result .
161
12
3,633
def generate_gaussian_profile ( seeing_fwhm ) : FWHM_G = 2 * math . sqrt ( 2 * math . log ( 2 ) ) sigma = seeing_fwhm / FWHM_G amplitude = 1.0 / ( 2 * math . pi * sigma * sigma ) seeing_model = Gaussian2D ( amplitude = amplitude , x_mean = 0.0 , y_mean = 0.0 , x_stddev = sigma , y_stddev = sigma ) return seeing_model
Generate a normalized Gaussian profile from its FWHM
121
12
3,634
def generate_moffat_profile ( seeing_fwhm , alpha ) : scale = 2 * math . sqrt ( 2 ** ( 1.0 / alpha ) - 1 ) gamma = seeing_fwhm / scale amplitude = 1.0 / math . pi * ( alpha - 1 ) / gamma ** 2 seeing_model = Moffat2D ( amplitude = amplitude , x_mean = 0.0 , y_mean = 0.0 , gamma = gamma , alpha = alpha ) return seeing_model
Generate a normalized Moffat profile from its FWHM and alpha
108
14
3,635
def field_to_dict ( field , instance ) : # avoid a circular import from django . db . models . fields . related import ManyToManyField return ( many_to_many_field_to_dict ( field , instance ) if isinstance ( field , ManyToManyField ) else field . value_from_object ( instance ) )
Converts a model field to a dictionary
74
8
3,636
def model_to_dict ( instance , fields = None , exclude = None ) : return { field . name : field_to_dict ( field , instance ) for field in chain ( instance . _meta . concrete_fields , instance . _meta . many_to_many ) # pylint: disable=W0212 if not should_exclude_field ( field , fields , exclude ) }
The same implementation as django model_to_dict but editable fields are allowed
85
17
3,637
def change_and_save ( self , update_only_changed_fields = False , * * changed_fields ) : bulk_change_and_save ( self , update_only_changed_fields = update_only_changed_fields , * * changed_fields ) return self . filter ( )
Changes a given changed_fields on each object in the queryset saves objects and returns the changed objects in the queryset .
64
27
3,638
def extent ( self ) : return ( self . intervals [ 1 ] . pix1 - 0.5 , self . intervals [ 1 ] . pix2 - 0.5 , self . intervals [ 0 ] . pix1 - 0.5 , self . intervals [ 0 ] . pix2 - 0.5 , )
Helper for matplotlib imshow
69
7
3,639
def readout ( self ) : elec = self . simulate_poisson_variate ( ) elec_pre = self . saturate ( elec ) elec_f = self . pre_readout ( elec_pre ) adu_r = self . base_readout ( elec_f ) adu_p = self . post_readout ( adu_r ) self . clean_up ( ) return adu_p
Readout the detector .
97
5
3,640
def parse_arg_line ( fargs ) : # Convert to literal dict fargs = fargs . strip ( ) if fargs == '' : return { } pairs = [ s . strip ( ) for s in fargs . split ( ',' ) ] # find first "=" result = [ ] for p in pairs : fe = p . find ( "=" ) if fe == - 1 : # no equal raise ValueError ( "malformed" ) key = p [ : fe ] val = p [ fe + 1 : ] tok = "'{}': {}" . format ( key , val ) result . append ( tok ) tokj = ',' . join ( result ) result = "{{ {0} }}" . format ( tokj ) state = ast . literal_eval ( result ) return state
parse limited form of arguments of function
171
7
3,641
def natural_number_with_currency ( number , currency , show_decimal_place = True , use_nbsp = True ) : humanized = '{} {}' . format ( numberformat . format ( number = number , decimal_sep = ',' , decimal_pos = 2 if show_decimal_place else 0 , grouping = 3 , thousand_sep = ' ' , force_grouping = True ) , force_text ( currency ) ) return mark_safe ( humanized . replace ( ' ' , '\u00a0' ) ) if use_nbsp else humanized
Return a given number formatter a price for humans .
130
11
3,642
def extract_db_info ( self , obj , keys ) : objl = self . convert ( obj ) result = super ( DataFrameType , self ) . extract_db_info ( objl , keys ) ext = self . datamodel . extractor_map [ 'fits' ] if objl : with objl . open ( ) as hdulist : for field in keys : result [ field ] = ext . extract ( field , hdulist ) tags = result [ 'tags' ] for field in self . tags_keys : tags [ field ] = ext . extract ( field , hdulist ) return result else : return result
Extract tags from serialized file
140
7
3,643
def readc ( prompt , default = None , valid = None , question_mark = True ) : cresult = None # Avoid PyCharm warning # question mark if question_mark : cquestion_mark = ' ? ' else : cquestion_mark = '' # main loop loop = True while loop : # display prompt if default is None : print ( prompt + cquestion_mark , end = '' ) sys . stdout . flush ( ) else : print ( prompt + ' [' + str ( default ) + ']' + cquestion_mark , end = '' ) sys . stdout . flush ( ) # read user's input cresult = sys . stdin . readline ( ) . strip ( ) if cresult == '' and default is not None : cresult = str ( default ) if len ( cresult ) == 1 : # check that all the characters are valid loop = False if valid is not None : for c in cresult : if c not in str ( valid ) : print ( '*** Error: invalid characters found.' ) print ( '*** Valid characters are:' , valid ) print ( '*** Try again!' ) loop = True else : print ( '*** Error: invalid string length. Try again!' ) return cresult
Return a single character read from keyboard
259
7
3,644
def read_value ( ftype , prompt , default = None , minval = None , maxval = None , allowed_single_chars = None , question_mark = True ) : # avoid PyCharm warning 'might be referenced before assignment' result = None # question mark if question_mark : cquestion_mark = ' ? ' else : cquestion_mark = '' # check minimum value if minval is not None : try : iminval = ftype ( minval ) except ValueError : raise ValueError ( "'" + str ( minval ) + "' cannot " + "be used as an minval in readi()" ) else : iminval = None # check maximum value if maxval is not None : try : imaxval = ftype ( maxval ) except ValueError : raise ValueError ( "'" + str ( maxval ) + "' cannot " + "be used as an maxval in readi()" ) else : imaxval = None # minimum and maximum values if minval is None and maxval is None : cminmax = '' elif minval is None : cminmax = ' (number <= ' + str ( imaxval ) + ')' elif maxval is None : cminmax = ' (number >= ' + str ( iminval ) + ')' else : cminmax = ' (' + str ( minval ) + ' <= number <= ' + str ( maxval ) + ')' # main loop loop = True while loop : # display prompt if default is None : print ( prompt + cminmax + cquestion_mark , end = '' ) sys . stdout . flush ( ) else : print ( prompt + cminmax + ' [' + str ( default ) + ']' + cquestion_mark , end = '' ) sys . stdout . flush ( ) # read user's input cresult = sys . stdin . readline ( ) . strip ( ) if cresult == '' and default is not None : cresult = str ( default ) # if valid allowed single character, return character if len ( cresult ) == 1 : if allowed_single_chars is not None : if cresult in allowed_single_chars : return cresult # convert to ftype value try : result = ftype ( cresult ) except ValueError : print ( "*** Error: invalid " + str ( ftype ) + " value. Try again!" ) else : # check number is within expected range if minval is None and maxval is None : loop = False elif minval is None : if result <= imaxval : loop = False else : print ( "*** Error: number out of range. Try again!" ) elif maxval is None : if result >= iminval : loop = False else : print ( "*** Error: number out of range. Try again!" ) else : if iminval <= result <= imaxval : loop = False else : print ( "*** Error: number out of range. Try again!" ) return result
Return value read from keyboard
640
5
3,645
def load_product_object ( self , name ) : product_entry = self . products [ name ] product = self . _get_base_object ( product_entry ) return product
Load product object according to name
39
6
3,646
def depsolve ( self ) : # load everything requires = { } provides = { } for mode , r in self . recipes . items ( ) : l = self . load_recipe_object ( mode ) for field , vv in l . requirements ( ) . items ( ) : if vv . type . isproduct ( ) : name = vv . type . name ( ) pe = ProductEntry ( name , mode , field ) requires [ name ] = pe for field , vv in l . products ( ) . items ( ) : if vv . type . isproduct ( ) : name = vv . type . name ( ) pe = ProductEntry ( name , mode , field ) provides [ name ] = pe return requires , provides
Load all recipes to search for products
157
7
3,647
def search_mode_provides ( self , product , pipeline = 'default' ) : pipeline = self . pipelines [ pipeline ] for obj , mode , field in self . iterate_mode_provides ( self . modes , pipeline ) : # extract name from obj if obj . name ( ) == product : return ProductEntry ( obj . name ( ) , mode . key , field ) else : raise ValueError ( 'no mode provides %s' % product )
Search the mode that provides a given product
97
8
3,648
def select_configuration ( self , obresult ) : logger = logging . getLogger ( __name__ ) logger . debug ( 'calling default configuration selector' ) # get first possible image ref = obresult . get_sample_frame ( ) extr = self . datamodel . extractor_map [ 'fits' ] if ref : # get INSCONF configuration result = extr . extract ( 'insconf' , ref ) if result : # found the keyword, try to match logger . debug ( 'found insconf config uuid=%s' , result ) # Use insconf as uuid key if result in self . configurations : return self . configurations [ result ] else : # Additional check for conf.name for conf in self . configurations . values ( ) : if conf . name == result : return conf else : raise KeyError ( 'insconf {} does not match any config' . format ( result ) ) # If not, try to match by DATE date_obs = extr . extract ( 'observation_date' , ref ) for key , conf in self . configurations . items ( ) : if key == 'default' : # skip default continue if conf . date_end is not None : upper_t = date_obs < conf . date_end else : upper_t = True if upper_t and ( date_obs >= conf . date_start ) : logger . debug ( 'found date match, config uuid=%s' , key ) return conf else : logger . debug ( 'no match, using default configuration' ) return self . configurations [ 'default' ]
Select instrument configuration based on OB
339
6
3,649
def select_profile ( self , obresult ) : logger = logging . getLogger ( __name__ ) logger . debug ( 'calling default profile selector' ) # check configuration insconf = obresult . configuration if insconf != 'default' : key = insconf date_obs = None keyname = 'uuid' else : # get first possible image ref = obresult . get_sample_frame ( ) if ref is None : key = obresult . instrument date_obs = None keyname = 'name' else : extr = self . datamodel . extractor_map [ 'fits' ] date_obs = extr . extract ( 'observation_date' , ref ) key = extr . extract ( 'insconf' , ref ) if key is not None : keyname = 'uuid' else : key = extr . extract ( 'instrument' , ref ) keyname = 'name' return key , date_obs , keyname
Select instrument profile based on OB
203
6
3,650
def get_recipe_object ( self , mode_name , pipeline_name = 'default' ) : active_mode = self . modes [ mode_name ] active_pipeline = self . pipelines [ pipeline_name ] recipe = active_pipeline . get_recipe_object ( active_mode ) return recipe
Build a recipe object from a given mode name
70
9
3,651
def pause_debugplot ( debugplot , optional_prompt = None , pltshow = False , tight_layout = True ) : if debugplot not in DEBUGPLOT_CODES : raise ValueError ( 'Invalid debugplot value:' , debugplot ) if debugplot < 0 : debugplot_ = - debugplot pltclose = True else : debugplot_ = debugplot pltclose = False if pltshow : if debugplot_ in [ 1 , 2 , 11 , 12 , 21 , 22 ] : if tight_layout : plt . tight_layout ( ) if debugplot_ in [ 1 , 11 , 21 ] : plt . show ( block = False ) plt . pause ( 0.2 ) elif debugplot_ in [ 2 , 12 , 22 ] : print ( 'Press "q" to continue...' , end = '' ) sys . stdout . flush ( ) plt . show ( ) print ( '' ) else : if debugplot_ in [ 2 , 12 , 22 ] : if optional_prompt is None : print ( 'Press <RETURN> to continue...' , end = '' ) else : print ( optional_prompt , end = '' ) sys . stdout . flush ( ) cdummy = sys . stdin . readline ( ) . strip ( ) if debugplot_ in [ 1 , 2 , 11 , 12 , 21 , 22 ] and pltclose : plt . close ( )
Ask the user to press RETURN to continue after plotting .
309
12
3,652
def mode_half_sample ( a , is_sorted = False ) : a = np . asanyarray ( a ) if not is_sorted : sdata = np . sort ( a ) else : sdata = a n = len ( sdata ) if n == 1 : return sdata [ 0 ] elif n == 2 : return 0.5 * ( sdata [ 0 ] + sdata [ 1 ] ) elif n == 3 : ind = - sdata [ 0 ] + 2 * sdata [ 1 ] - sdata [ 2 ] if ind < 0 : return 0.5 * ( sdata [ 0 ] + sdata [ 1 ] ) elif ind > 0 : return 0.5 * ( sdata [ 1 ] + sdata [ 2 ] ) else : return sdata [ 1 ] else : N = int ( math . ceil ( n / 2.0 ) ) w = sdata [ ( N - 1 ) : ] - sdata [ : ( n - N + 1 ) ] ar = w . argmin ( ) return mode_half_sample ( sdata [ ar : ar + N ] , is_sorted = True )
Estimate the mode using the Half Sample mode .
249
10
3,653
def overplot_ds9reg ( filename , ax ) : # read ds9 region file with open ( filename ) as f : file_content = f . read ( ) . splitlines ( ) # check first line first_line = file_content [ 0 ] if "# Region file format: DS9" not in first_line : raise ValueError ( "Unrecognized ds9 region file format" ) for line in file_content : if line [ 0 : 4 ] == "line" : line_fields = line . split ( ) x1 = float ( line_fields [ 1 ] ) y1 = float ( line_fields [ 2 ] ) x2 = float ( line_fields [ 3 ] ) y2 = float ( line_fields [ 4 ] ) if "color" in line : i = line . find ( "color=" ) color = line [ i + 6 : i + 13 ] else : color = "green" ax . plot ( [ x1 , x2 ] , [ y1 , y2 ] , '-' , color = color ) elif line [ 0 : 4 ] == "text" : line_fields = line . split ( ) x0 = float ( line_fields [ 1 ] ) y0 = float ( line_fields [ 2 ] ) text = line_fields [ 3 ] [ 1 : - 1 ] if "color" in line : i = line . find ( "color=" ) color = line [ i + 6 : i + 13 ] else : color = "green" ax . text ( x0 , y0 , text , fontsize = 8 , bbox = dict ( boxstyle = "round,pad=0.1" , fc = "white" , ec = "grey" , ) , color = color , fontweight = 'bold' , backgroundcolor = 'white' , ha = 'center' ) else : # ignore pass
Overplot a ds9 region file .
401
9
3,654
def find_peaks_indexes ( arr , window_width = 5 , threshold = 0.0 , fpeak = 0 ) : _check_window_width ( window_width ) if ( fpeak < 0 or fpeak + 1 >= window_width ) : raise ValueError ( 'fpeak must be in the range 0- window_width - 2' ) kernel_peak = kernel_peak_function ( threshold , fpeak ) out = generic_filter ( arr , kernel_peak , window_width , mode = "reflect" ) result , = numpy . nonzero ( out ) return filter_array_margins ( arr , result , window_width )
Find indexes of peaks in a 1d array .
142
10
3,655
def refine_peaks ( arr , ipeaks , window_width ) : _check_window_width ( window_width ) step = window_width // 2 ipeaks = filter_array_margins ( arr , ipeaks , window_width ) winoff = numpy . arange ( - step , step + 1 , dtype = 'int' ) peakwin = ipeaks [ : , numpy . newaxis ] + winoff ycols = arr [ peakwin ] ww = return_weights ( window_width ) coff2 = numpy . dot ( ww , ycols . T ) uc = - 0.5 * coff2 [ 1 ] / coff2 [ 2 ] yc = coff2 [ 0 ] + uc * ( coff2 [ 1 ] + coff2 [ 2 ] * uc ) xc = ipeaks + 0.5 * ( window_width - 1 ) * uc return xc , yc
Refine the peak location previously found by find_peaks_indexes
208
15
3,656
def complete_config ( config ) : if not config . has_section ( 'run' ) : config . add_section ( 'run' ) values = { 'basedir' : os . getcwd ( ) , 'task_control' : 'control.yaml' , } for k , v in values . items ( ) : if not config . has_option ( 'run' , k ) : config . set ( 'run' , k , v ) return config
Complete config with default values
101
5
3,657
def centering_centroid ( data , xi , yi , box , nloop = 10 , toldist = 1e-3 , maxdist = 10.0 ) : # Store original center cxy = ( xi , yi ) origin = ( xi , yi ) # initial background back = 0.0 if nloop == 0 : return xi , yi , 0.0 , 0 , 'not recentering' for i in range ( nloop ) : nxy , back = _centering_centroid_loop_xy ( data , cxy , box ) # _logger.debug('new center is %s', ncenter) # if we are to far away from the initial point, break dst = distance . euclidean ( origin , nxy ) if dst > maxdist : msg = 'maximum distance (%5.2f) from origin reached' % maxdist return cxy [ 0 ] , cxy [ 1 ] , back , 2 , msg # check convergence dst = distance . euclidean ( nxy , cxy ) if dst < toldist : return nxy [ 0 ] , nxy [ 1 ] , back , 1 , 'converged in iteration %i' % i else : cxy = nxy return nxy [ 0 ] , nxy [ 1 ] , back , 3 , 'not converged in %i iterations' % nloop
returns x y background status message
298
7
3,658
def cache_for ( * * timedelta_kw ) : max_age_timedelta = timedelta ( * * timedelta_kw ) def decorate_func ( func ) : @ wraps ( func ) def decorate_func_call ( * a , * * kw ) : callback = SetCacheControlHeadersFromTimedeltaCallback ( max_age_timedelta ) registry_provider = AfterThisRequestCallbackRegistryProvider ( ) registry = registry_provider . provide ( ) registry . add ( callback ) return func ( * a , * * kw ) return decorate_func_call return decorate_func
Set Cache - Control headers and Expires - header .
136
11
3,659
def cache ( * cache_control_items , * * cache_control_kw ) : cache_control_kw . update ( cache_control_items ) def decorate_func ( func ) : @ wraps ( func ) def decorate_func_call ( * a , * * kw ) : callback = SetCacheControlHeadersCallback ( * * cache_control_kw ) registry_provider = AfterThisRequestCallbackRegistryProvider ( ) registry = registry_provider . provide ( ) registry . add ( callback ) return func ( * a , * * kw ) return decorate_func_call return decorate_func
Set Cache - Control headers .
134
6
3,660
def dont_cache ( ) : def decorate_func ( func ) : @ wraps ( func ) def decorate_func_call ( * a , * * kw ) : callback = SetCacheControlHeadersForNoCachingCallback ( ) registry_provider = AfterThisRequestCallbackRegistryProvider ( ) registry = registry_provider . provide ( ) registry . add ( callback ) return func ( * a , * * kw ) return decorate_func_call return decorate_func
Set Cache - Control headers for no caching
105
8
3,661
def filter_empty_parameters ( func ) : @ wraps ( func ) def func_wrapper ( self , * args , * * kwargs ) : my_kwargs = { key : value for key , value in kwargs . items ( ) if value not in EMPTIES } args_is_empty = all ( arg in EMPTIES for arg in args ) if ( { 'source' , 'material' } . issuperset ( my_kwargs ) or not my_kwargs ) and args_is_empty : return return func ( self , * args , * * my_kwargs ) return func_wrapper
Decorator that is filtering empty parameters .
136
9
3,662
def author_id_normalize_and_schema ( uid , schema = None ) : def _get_uid_normalized_in_schema ( _uid , _schema ) : regex , template = _RE_AUTHORS_UID [ _schema ] match = regex . match ( _uid ) if match : return template . format ( match . group ( 'uid' ) ) if idutils . is_orcid ( uid ) and schema in ( None , 'ORCID' ) : return idutils . normalize_orcid ( uid ) , 'ORCID' if schema and schema not in _RE_AUTHORS_UID : # Schema explicitly specified, but this function can't handle it raise UnknownUIDSchema ( uid ) if schema : normalized_uid = _get_uid_normalized_in_schema ( uid , schema ) if normalized_uid : return normalized_uid , schema else : raise SchemaUIDConflict ( schema , uid ) match_schema , normalized_uid = None , None for candidate_schema in _RE_AUTHORS_UID : candidate_uid = _get_uid_normalized_in_schema ( uid , candidate_schema ) if candidate_uid : if match_schema : # Valid against more than one candidate schema, ambiguous raise UnknownUIDSchema ( uid ) match_schema = candidate_schema normalized_uid = candidate_uid if match_schema : return normalized_uid , match_schema # No guessess have been found raise UnknownUIDSchema ( uid )
Detect and normalize an author UID schema .
349
9
3,663
def normalize_arxiv_category ( category ) : category = _NEW_CATEGORIES . get ( category . lower ( ) , category ) for valid_category in valid_arxiv_categories ( ) : if ( category . lower ( ) == valid_category . lower ( ) or category . lower ( ) . replace ( '-' , '.' ) == valid_category . lower ( ) ) : return valid_category return category
Normalize arXiv category to be schema compliant .
96
11
3,664
def valid_arxiv_categories ( ) : schema = load_schema ( 'elements/arxiv_categories' ) categories = schema [ 'enum' ] categories . extend ( _NEW_CATEGORIES . keys ( ) ) return categories
List of all arXiv categories that ever existed .
58
11
3,665
def classify_field ( value ) : if not ( isinstance ( value , six . string_types ) and value ) : return schema = load_schema ( 'elements/inspire_field' ) inspire_categories = schema [ 'properties' ] [ 'term' ] [ 'enum' ] for inspire_category in inspire_categories : if value . upper ( ) == inspire_category . upper ( ) : return inspire_category category = normalize_arxiv_category ( value ) return ARXIV_TO_INSPIRE_CATEGORY_MAPPING . get ( category , 'Other' )
Normalize value to an Inspire category .
135
9
3,666
def split_pubnote ( pubnote_str ) : pubnote = { } parts = pubnote_str . split ( ',' ) if len ( parts ) > 2 : pubnote [ 'journal_title' ] = parts [ 0 ] pubnote [ 'journal_volume' ] = parts [ 1 ] pubnote [ 'page_start' ] , pubnote [ 'page_end' ] , pubnote [ 'artid' ] = split_page_artid ( parts [ 2 ] ) return { key : val for ( key , val ) in six . iteritems ( pubnote ) if val is not None }
Split pubnote into journal information .
132
7
3,667
def get_schema_path ( schema , resolved = False ) : def _strip_first_path_elem ( path ) : """Pass doctests. Strip the first element of the given path, returning an empty string if there are no more elements. For example, 'something/other' will end up as 'other', but passing then 'other' will return '' """ stripped_path = path . split ( os . path . sep , 1 ) [ 1 : ] return '' . join ( stripped_path ) def _schema_to_normalized_path ( schema ) : """Pass doctests. Extracts the path from the url, makes sure to get rid of any '..' in the path and adds the json extension if not there. """ path = os . path . normpath ( os . path . sep + urlsplit ( schema ) . path ) if path . startswith ( os . path . sep ) : path = path [ 1 : ] if not path . endswith ( '.json' ) : path += '.json' return path path = _schema_to_normalized_path ( schema ) while path : if resolved : schema_path = os . path . abspath ( os . path . join ( _resolved_schema_root_path , path ) ) else : schema_path = os . path . abspath ( os . path . join ( _schema_root_path , path ) ) if os . path . exists ( schema_path ) : return os . path . abspath ( schema_path ) path = _strip_first_path_elem ( path ) raise SchemaNotFound ( schema = schema )
Retrieve the installed path for the given schema .
355
10
3,668
def load_schema ( schema_name , resolved = False ) : schema_data = '' with open ( get_schema_path ( schema_name , resolved ) ) as schema_fd : schema_data = json . loads ( schema_fd . read ( ) ) return schema_data
Load the given schema from wherever it s installed .
62
10
3,669
def _load_schema_for_record ( data , schema = None ) : if schema is None : if '$schema' not in data : raise SchemaKeyNotFound ( data = data ) schema = data [ '$schema' ] if isinstance ( schema , six . string_types ) : schema = load_schema ( schema_name = schema ) return schema
Load the schema from a given record .
82
8
3,670
def validate ( data , schema = None ) : schema = _load_schema_for_record ( data , schema ) return jsonschema_validate ( instance = data , schema = schema , resolver = LocalRefResolver . from_schema ( schema ) , format_checker = inspire_format_checker , )
Validate the given dictionary against the given schema .
72
10
3,671
def get_validation_errors ( data , schema = None ) : schema = _load_schema_for_record ( data , schema ) errors = Draft4Validator ( schema , resolver = LocalRefResolver . from_schema ( schema ) , format_checker = inspire_format_checker ) return errors . iter_errors ( data )
Validation errors for a given record .
77
8
3,672
def normalize_collaboration ( collaboration ) : if not collaboration : return [ ] collaboration = collaboration . strip ( ) if collaboration . startswith ( '(' ) and collaboration . endswith ( ')' ) : collaboration = collaboration [ 1 : - 1 ] collaborations = _RE_AND . split ( collaboration ) collaborations = ( _RE_COLLABORATION_LEADING . sub ( '' , collab ) for collab in collaborations ) collaborations = ( _RE_COLLABORATION_TRAILING . sub ( '' , collab ) for collab in collaborations ) return [ collab . strip ( ) for collab in collaborations ]
Normalize collaboration string .
139
5
3,673
def get_license_from_url ( url ) : if not url : return split_url = urlsplit ( url , scheme = 'http' ) if split_url . netloc . lower ( ) == 'creativecommons.org' : if 'publicdomain' in split_url . path : match = _RE_PUBLIC_DOMAIN_URL . match ( split_url . path ) if match is None : license = [ 'public domain' ] else : license = [ 'CC0' ] license . extend ( part for part in match . groups ( ) if part ) else : license = [ 'CC' ] match = _RE_LICENSE_URL . match ( split_url . path ) license . extend ( part . upper ( ) for part in match . groups ( ) if part ) elif split_url . netloc == 'arxiv.org' : license = [ 'arXiv' ] match = _RE_LICENSE_URL . match ( split_url . path ) license . extend ( part for part in match . groups ( ) if part ) else : raise ValueError ( 'Unknown license URL' ) return u' ' . join ( license )
Get the license abbreviation from an URL .
257
9
3,674
def convert_old_publication_info_to_new ( publication_infos ) : result = [ ] hidden_publication_infos = [ ] for publication_info in publication_infos : _publication_info = copy . deepcopy ( publication_info ) journal_title = _publication_info . get ( 'journal_title' ) try : journal_title = _JOURNALS_RENAMED_OLD_TO_NEW [ journal_title ] _publication_info [ 'journal_title' ] = journal_title result . append ( _publication_info ) continue except KeyError : pass journal_volume = _publication_info . get ( 'journal_volume' ) if journal_title in _JOURNALS_WITH_YEAR_ADDED_TO_VOLUME and journal_volume and len ( journal_volume ) == 4 : try : was_last_century = int ( journal_volume [ : 2 ] ) > 50 except ValueError : pass else : _publication_info [ 'year' ] = int ( '19' + journal_volume [ : 2 ] if was_last_century else '20' + journal_volume [ : 2 ] ) _publication_info [ 'journal_volume' ] = journal_volume [ 2 : ] result . append ( _publication_info ) continue if journal_title and journal_volume and journal_title . lower ( ) not in JOURNALS_IGNORED_IN_OLD_TO_NEW : volume_starts_with_a_letter = _RE_VOLUME_STARTS_WITH_A_LETTER . match ( journal_volume ) volume_ends_with_a_letter = _RE_VOLUME_ENDS_WITH_A_LETTER . match ( journal_volume ) match = volume_starts_with_a_letter or volume_ends_with_a_letter if match : _publication_info . pop ( 'journal_record' , None ) if journal_title in _JOURNALS_RENAMED_OLD_TO_NEW . values ( ) : _publication_info [ 'journal_title' ] = journal_title else : _publication_info [ 'journal_title' ] = '' . join ( [ journal_title , '' if journal_title . endswith ( '.' ) else ' ' , match . group ( 'letter' ) , ] ) _publication_info [ 'journal_volume' ] = match . group ( 'volume' ) hidden = _publication_info . pop ( 'hidden' , None ) if hidden : hidden_publication_infos . append ( _publication_info ) else : result . append ( _publication_info ) for publication_info in hidden_publication_infos : if publication_info not in result : publication_info [ 'hidden' ] = True result . append ( publication_info ) return result
Convert a publication_info value from the old format to the new .
638
15
3,675
def convert_new_publication_info_to_old ( publication_infos ) : def _needs_a_hidden_pubnote ( journal_title , journal_volume ) : return ( journal_title in _JOURNALS_THAT_NEED_A_HIDDEN_PUBNOTE and journal_volume in _JOURNALS_THAT_NEED_A_HIDDEN_PUBNOTE [ journal_title ] ) result = [ ] for publication_info in publication_infos : _publication_info = copy . deepcopy ( publication_info ) journal_title = _publication_info . get ( 'journal_title' ) try : journal_title = _JOURNALS_RENAMED_NEW_TO_OLD [ journal_title ] _publication_info [ 'journal_title' ] = journal_title result . append ( _publication_info ) continue except KeyError : pass journal_volume = _publication_info . get ( 'journal_volume' ) year = _publication_info . get ( 'year' ) if ( journal_title in _JOURNALS_WITH_YEAR_ADDED_TO_VOLUME and year and journal_volume and len ( journal_volume ) == 2 ) : two_digit_year = str ( year ) [ 2 : ] _publication_info [ 'journal_volume' ] = '' . join ( [ two_digit_year , journal_volume ] ) result . append ( _publication_info ) continue if journal_title and journal_volume : match = _RE_TITLE_ENDS_WITH_A_LETTER . match ( journal_title ) if match and _needs_a_hidden_pubnote ( journal_title , journal_volume ) : _publication_info [ 'journal_title' ] = match . group ( 'title' ) _publication_info [ 'journal_volume' ] = journal_volume + match . group ( 'letter' ) result . append ( _publication_info ) _publication_info = copy . deepcopy ( publication_info ) _publication_info [ 'hidden' ] = True _publication_info [ 'journal_title' ] = match . group ( 'title' ) _publication_info [ 'journal_volume' ] = match . group ( 'letter' ) + journal_volume elif match and journal_title not in _JOURNALS_ALREADY_ENDING_WITH_A_LETTER : _publication_info [ 'journal_title' ] = match . group ( 'title' ) _publication_info [ 'journal_volume' ] = match . group ( 'letter' ) + journal_volume result . append ( _publication_info ) return result
Convert back a publication_info value from the new format to the old .
606
16
3,676
def fix_reference_url ( url ) : new_url = url new_url = fix_url_bars_instead_of_slashes ( new_url ) new_url = fix_url_add_http_if_missing ( new_url ) new_url = fix_url_replace_tilde ( new_url ) try : rfc3987 . parse ( new_url , rule = "URI" ) return new_url except ValueError : return url
Used to parse an incorect url to try to fix it with the most common ocurrences for errors . If the fixed url is still incorrect it returns None .
102
34
3,677
def is_arxiv ( obj ) : arxiv_test = obj . split ( ) if not arxiv_test : return False matched_arxiv = ( RE_ARXIV_PRE_2007_CLASS . match ( arxiv_test [ 0 ] ) or RE_ARXIV_POST_2007_CLASS . match ( arxiv_test [ 0 ] ) ) if not matched_arxiv : return False if not matched_arxiv . group ( 'category' ) : return True valid_arxiv_categories_lower = [ category . lower ( ) for category in valid_arxiv_categories ( ) ] category = matched_arxiv . group ( 'category' ) . lower ( ) return ( category in valid_arxiv_categories_lower or category . replace ( '-' , '.' ) in valid_arxiv_categories_lower )
Return True if obj contains an arXiv identifier .
199
11
3,678
def normalize_arxiv ( obj ) : obj = obj . split ( ) [ 0 ] matched_arxiv_pre = RE_ARXIV_PRE_2007_CLASS . match ( obj ) if matched_arxiv_pre : return ( '/' . join ( matched_arxiv_pre . group ( "extraidentifier" , "identifier" ) ) ) . lower ( ) matched_arxiv_post = RE_ARXIV_POST_2007_CLASS . match ( obj ) if matched_arxiv_post : return matched_arxiv_post . group ( "identifier" ) return None
Return a normalized arXiv identifier from obj .
139
10
3,679
def resolve_remote ( self , uri ) : try : return super ( LocalRefResolver , self ) . resolve_remote ( uri ) except ValueError : return super ( LocalRefResolver , self ) . resolve_remote ( 'file://' + get_schema_path ( uri . rsplit ( '.json' , 1 ) [ 0 ] ) )
Resolve a uri or relative path to a schema .
79
12
3,680
def set_path ( self , path ) : if os . path . isabs ( path ) : path = os . path . normpath ( os . path . join ( self . cwd , path ) ) self . path = path self . relative = os . path . relpath ( self . path , self . base )
Set the path of the file .
68
7
3,681
def clone ( self , path = None , * , with_contents = True , * * options ) : file = File ( path if path else self . path , cwd = options . get ( "cwd" , self . cwd ) ) file . base = options . get ( "base" , self . base ) if with_contents : file . contents = options . get ( "contents" , self . contents ) return file
Clone the file .
94
5
3,682
def launch_cli ( ) : # Create the CLI argument parser parser = argparse . ArgumentParser ( prog = "pylp" , description = "Call some tasks defined in your pylpfile." ) # Version of Pylp parser . add_argument ( "-v" , "--version" , action = "version" , version = "Pylp %s" % version , help = "get the Pylp version and exit" ) # Set the pylpfile location parser . add_argument ( '--pylpfile' , nargs = 1 , help = "manually set path of pylpfile" , metavar = "<path>" ) # Set the pylpfile location parser . add_argument ( '--cwd' , nargs = 1 , help = "manually set the CWD" , metavar = "<dir path>" ) # Force Pylp to not display colors parser . add_argument ( '--no-color' , action = "store_false" , help = "force Pylp to not display colors" ) # Disable logging parser . add_argument ( '--silent' , action = "store_true" , help = "disable all Pylp logging" ) # List of tasks to execute parser . add_argument ( 'tasks' , nargs = "*" , default = [ "default" ] , help = "tasks to execute (if none, execute the 'default' task)" , metavar = "<task>" ) # Parse the CLI arguments args = parser . parse_args ( ) # Current working directory (CWD) if args . cwd : config . cwd = args . cwd [ 0 ] else : config . cwd = os . getcwd ( ) # Get the pylpfile location if args . pylpfile : pylpfile = args . pylpfile [ 0 ] if not args . pylpfile : pylpfile = path . join ( config . cwd , "pylpfile.py" ) elif not args . cwd : config . cwd = path . dirname ( pylpfile ) # Must the terminal have colors? config . color = args . no_color # Must Pylp be silent (no logging)? config . silent = args . silent # Execute the pylpfile run ( pylpfile , args . tasks )
Launch the CLI .
519
4
3,683
def add_affiliation ( self , value , curated_relation = None , record = None ) : if value : affiliation = { 'value' : value } if record : affiliation [ 'record' ] = record if curated_relation is not None : affiliation [ 'curated_relation' ] = curated_relation self . _ensure_list_field ( 'affiliations' , affiliation )
Add an affiliation .
83
4
3,684
def set_uid ( self , uid , schema = None ) : try : uid , schema = author_id_normalize_and_schema ( uid , schema ) except UnknownUIDSchema : # Explicit schema wasn't provided, and the UID is too little # to figure out the schema of it, this however doesn't mean # the UID is invalid pass self . _ensure_field ( 'ids' , [ ] ) self . obj [ 'ids' ] = [ id_ for id_ in self . obj [ 'ids' ] if id_ . get ( 'schema' ) != schema ] self . _add_uid ( uid , schema )
Set a unique ID .
144
5
3,685
def singleton ( klass ) : instances = { } def getinstance ( * args , * * kwargs ) : if klass not in instances : instances [ klass ] = klass ( * args , * * kwargs ) return instances [ klass ] return wraps ( klass ) ( getinstance )
Create singleton from class
67
5
3,686
def translation_activate_block ( function = None , language = None ) : def _translation_activate_block ( function ) : def _decorator ( * args , * * kwargs ) : tmp_language = translation . get_language ( ) try : translation . activate ( language or settings . LANGUAGE_CODE ) return function ( * args , * * kwargs ) finally : translation . activate ( tmp_language ) return wraps ( function ) ( _decorator ) if function : return _translation_activate_block ( function ) else : return _translation_activate_block
Activate language only for one method or function
127
9
3,687
async def uv_protection_window ( self , low : float = 3.5 , high : float = 3.5 ) -> dict : return await self . request ( 'get' , 'protection' , params = { 'from' : str ( low ) , 'to' : str ( high ) } )
Get data on when a UV protection window is .
67
10
3,688
def runstring ( self ) : cfile = self . template % self . last self . last += 1 return cfile
Return the run number and the file name .
25
9
3,689
def obsres_from_oblock_id ( self , obsid , configuration = None ) : este = self . ob_table [ obsid ] obsres = obsres_from_dict ( este ) _logger . debug ( "obsres_from_oblock_id id='%s', mode='%s' START" , obsid , obsres . mode ) try : this_drp = self . drps . query_by_name ( obsres . instrument ) except KeyError : raise ValueError ( 'no DRP for instrument {}' . format ( obsres . instrument ) ) # Reserved names if obsres . mode in self . _RESERVED_MODE_NAMES : selected_mode = None # null mode else : selected_mode = this_drp . modes [ obsres . mode ] if selected_mode : obsres = selected_mode . build_ob ( obsres , self ) obsres = selected_mode . tag_ob ( obsres ) if configuration : # override instrument configuration # obsres.configuration = self.search_instrument_configuration( # obsres.instrument, # configuration #) pass else : # Insert Instrument configuration pass # obsres.configuration = this_drp.configuration_selector(obsres) key , date_obs , keyname = this_drp . select_profile ( obsres ) obsres . configuration = self . assembly_instrument ( key , date_obs , keyname ) obsres . profile = obsres . configuration _logger . debug ( 'obsres_from_oblock_id %s END' , obsid ) return obsres
Override instrument configuration if configuration is not None
354
8
3,690
def map_tree ( visitor , tree ) : newn = [ map_tree ( visitor , node ) for node in tree . nodes ] return visitor ( tree , newn )
Apply function to nodes
37
4
3,691
def filter_tree ( condition , tree ) : if condition ( tree ) : for node in tree . nodes : # this works in python > 3.3 # yield from filter_tree(condition, node) for n in filter_tree ( condition , node ) : yield n yield tree
Return parts of the tree that fulfill condition
59
8
3,692
def fill_placeholders ( self , tags ) : def change_p_node_tags ( node , children ) : if isinstance ( node , Placeholder ) : value = ConstExpr ( tags [ node . name ] ) return value else : return node . clone ( children ) return map_tree ( change_p_node_tags , self )
Substitute Placeholder nodes by its value in tags
74
11
3,693
def molecules2symbols ( molecules , add_hydrogen = True ) : symbols = sorted ( list ( set ( ase . symbols . string2symbols ( '' . join ( map ( lambda _x : '' . join ( ase . symbols . string2symbols ( _x ) ) , molecules ) ) ) ) ) , key = lambda _y : ase . data . atomic_numbers [ _y ] ) if add_hydrogen and 'H' not in symbols : symbols . insert ( 0 , 'H' ) return symbols
Take a list of molecules and return just a list of atomic symbols possibly adding hydrogen
118
16
3,694
def construct_reference_system ( symbols , candidates = None , options = None , ) : if hasattr ( options , 'no_hydrogen' ) and options . no_hydrogen : add_hydrogen = False else : add_hydrogen = True references = { } sorted_candidates = [ 'H2' , 'H2O' , 'NH3' , 'N2' , 'CH4' , 'CO' , 'H2S' , 'HCl' , 'O2' ] if candidates is None : candidates = sorted_candidates else : odd_candidates = [ c for c in candidates if c not in sorted_candidates ] candidates = [ c for c in sorted_candidates if c in candidates ] + odd_candidates added_symbols = [ ] # go symbols in adsorbate # to add reference species in procedural manner for symbol in symbols : added_symbols . append ( symbol ) for candidate in candidates : _symbols = ase . symbols . string2symbols ( candidate ) # Add partial adsorbate species # is subset of reference species # and reference species # is subset of full adsorbate species set if set ( added_symbols ) <= set ( list ( references . keys ( ) ) + _symbols ) and set ( list ( references . keys ( ) ) + _symbols ) <= set ( symbols ) and candidate not in references . values ( ) : references [ symbol ] = candidate break else : raise UserWarning ( ( "No candidate satisfied {symbol}. Add more candidates\n" " Symbols {symbols}\n" " _Symbols {_symbols}\n" " References {references}\n" " Candidates {candidates}\n" ) . format ( symbol = symbol , symbols = symbols , _symbols = _symbols , candidates = candidates , references = list ( references . keys ( ) ) , ) ) sorted_references = [ ] references = list ( references . items ( ) ) # put references in order so that each reference # only adds one one additional species in each step # while references: # for i, reference in enumerate(references): # if len(set(ase.symbols.string2symbols(reference[1])) - # set(x[0] for x in sorted_references)) == 1: # sorted_references.append(references.pop(i)) # break return references
Take a list of symbols and construct gas phase references system when possible avoiding O2 . Candidates can be rearranged where earlier candidates get higher preference than later candidates
530
32
3,695
def get_stoichiometry_factors ( adsorbates , references ) : stoichiometry = get_atomic_stoichiometry ( references ) stoichiometry_factors = { } for adsorbate in adsorbates : for symbol in ase . symbols . string2symbols ( adsorbate ) : symbol_index = list ( map ( lambda _x : _x [ 0 ] , references ) ) . index ( symbol ) for ( factor , ( ref_symbol , ref_molecule ) ) in zip ( stoichiometry [ symbol_index ] , references ) : stoichiometry_factors . setdefault ( adsorbate , { } ) [ ref_molecule ] = stoichiometry_factors . setdefault ( adsorbate , { } ) . get ( ref_molecule , 0 ) + factor nonzero_factors = { } for key , value in stoichiometry_factors [ adsorbate ] . items ( ) : if not np . isclose ( value , 0. ) : nonzero_factors [ key ] = value stoichiometry_factors [ adsorbate ] = nonzero_factors return stoichiometry_factors
Take a list of adsorabtes and a corresponding reference system and return a list of dictionaries encoding the stoichiometry factors converting between adsorbates and reference molecules .
259
35
3,696
def get_fields_dict ( self , row ) : return { k : getattr ( self , 'clean_{}' . format ( k ) , lambda x : x ) ( v . strip ( ) if isinstance ( v , str ) else None ) for k , v in zip_longest ( self . get_fields ( ) , row ) }
Returns a dict of field name and cleaned value pairs to initialize the model . Beware it aligns the lists of fields and row values with Nones to allow for adding fields not found in the CSV . Whitespace around the value of the cell is stripped .
75
51
3,697
def process_node ( node ) : value = node [ 'value' ] mname = node [ 'name' ] typeid = node [ 'typeid' ] if typeid == 52 : # StructDataValue obj = { } for el in value [ 'elements' ] : key , val = process_node ( el ) obj [ key ] = val if value [ 'struct_type' ] != 'dict' : # Value is not a dict klass = objimp . import_object ( value [ 'struct_type' ] ) newobj = klass . __new__ ( klass ) if hasattr ( newobj , '__setstate__' ) : newobj . __setstate__ ( obj ) else : newobj . __dict__ = obj obj = newobj elif typeid == 9 : data = value [ 'data' ] dim = value [ 'dimension' ] shape = dim [ 'height' ] , dim [ 'width' ] obj = data elif typeid == 90 : # StructDataValueList obj = [ ] for el in value : sobj = { } for sel in el [ 'elements' ] : key , val = process_node ( sel ) sobj [ key ] = val obj . append ( sobj ) elif typeid == 45 : # Frame obj = dataframe . DataFrame ( frame = os . path . abspath ( value [ 'path' ] ) ) else : obj = value return mname , obj
Process a node in result . json structure
315
8
3,698
def build_result ( data ) : more = { } for key , value in data . items ( ) : if key != 'elements' : newnode = value else : newnode = { } for el in value : nkey , nvalue = process_node ( el ) newnode [ nkey ] = nvalue more [ key ] = newnode return more
Create a dictionary with the contents of result . json
77
10
3,699
def _finalize ( self , all_msg_errors = None ) : if all_msg_errors is None : all_msg_errors = [ ] for key in self . stored ( ) : try : getattr ( self , key ) except ( ValueError , TypeError ) as err : all_msg_errors . append ( err . args [ 0 ] ) # Raises a list of all the missing entries if all_msg_errors : raise ValueError ( all_msg_errors )
Access all the instance descriptors
104
6